Witam!
Mam problem z połączeniem w jednej aplikacji rozpoznawania mowy i rozpoznawania twarzy.
Rozpoznawanie mowy działa dopóki nie włączę rozpoznawania twarzy (button1_Click). Wydaje mi się, że problem może być w:
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
i
Application.Idle += new EventHandler(FrameGrabber);
Mogę prosić o pomoc w rozwiązaniu problemu?
Poniżej kod:
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.CV.CvEnum;
using System.IO;
using System.Diagnostics;
using Microsoft.Speech.Recognition;
using Microsoft.Speech.Synthesis;
namespace MultiFaceRec
{
public partial class FrmPrincipal : Form
{
//Declararation of all variables, vectors and haarcascades
Image<Bgr, Byte> currentFrame;
Capture grabber;
HaarCascade face;
HaarCascade eye;
MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
Image<Gray, byte> result, TrainedFace = null;
Image<Gray, byte> gray = null;
List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
List<string> labels= new List<string>();
List<string> NamePersons = new List<string>();
int ContTrain, NumLabels, t;
string name, names = null;
SpeechSynthesizer synth = new SpeechSynthesizer();
public FrmPrincipal()
{
InitializeComponent();
//Load haarcascades for face detection
face = new HaarCascade("haarcascade_frontalface_default.xml");
synth.SelectVoice("Microsoft Server Speech Text to Speech Voice (pl-PL, Paulina)");
synth.SetOutputToDefaultAudioDevice();
System.Globalization.CultureInfo ci = new System.Globalization.CultureInfo("pl-PL");
SpeechRecognitionEngine sre = new SpeechRecognitionEngine(ci);
sre.SetInputToDefaultAudioDevice();
sre.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
Choices numbers = new Choices();
numbers.Add(new string[] { "Pierwszy test aplikacji.", "dwa", "szczyt", "jeden"});
GrammarBuilder gb = new GrammarBuilder();
gb.Append(numbers);
Grammar g = new Grammar(gb);
sre.LoadGrammar(g);
sre.RecognizeAsync(RecognizeMode.Multiple);
try
{
//Load of previus trainned faces and labels for each image
string Labelsinfo = File.ReadAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt");
string[] Labels = Labelsinfo.Split('%');
NumLabels = Convert.ToInt16(Labels[0]);
ContTrain = NumLabels;
string LoadFaces;
for (int tf = 1; tf < NumLabels+1; tf++)
{
LoadFaces = "face" + tf + ".bmp";
trainingImages.Add(new Image<Gray, byte>(Application.StartupPath + "/TrainedFaces/" + LoadFaces));
labels.Add(Labels[tf]);
}
}
catch(Exception e)
{
//MessageBox.Show(e.ToString());
MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
}
}
private void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Confidence > 0.82)
{
label6.Text = e.Result.Text;
synth.Speak("Powiedziałeś: ");
synth.SpeakAsync(label6.Text);
//label6.Text = "";
}
//throw new NotImplementedException();
return;
}
private void button1_Click(object sender, EventArgs e)
{
//Initialize the capture device
grabber = new Capture();
grabber.QueryFrame();
//Initialize the FrameGraber event
Application.Idle += new EventHandler(FrameGrabber);
button1.Enabled = false;
}