how to enter different values in multiple textboxes through voice in C# - c#

I have need to write an application which uses a speech recognition engine.
How can I enter different values in multiple textboxes through voice in c#?
I can enter value in single textbox but not in second textbox. I have the following code for entering value in single textbox.
private SpeechRecognitionEngine rec;
private void voice()
{
rec = new SpeechRecognitionEngine();
rec.SetInputToDefaultAudioDevice();
Choices choice = new Choices("apple","Orange","Onion");
GrammarBuilder gr = new GrammarBuilder(choice);
Grammar grammar = new Grammar(gr);
rec.LoadGrammar(grammar);
rec.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(rec_SpeechRecogonized);
rec.RecognizeAsync(RecognizeMode.Multiple);
}
void rec_SpeechRecogonized(object sender, SpeechRecognizedEventArgs e)
{
foreach (RecognizedWordUnit word in e.Result.Words)
{
textBox1.Text = word.Text;
}
}

I will do something like this (very simple example):
private SpeechRecognitionEngine rec;
private void voice()
{
rec = new SpeechRecognitionEngine();
rec.SetInputToDefaultAudioDevice();
Choices choice = new Choices("apple","Orange","Onion", "next");
GrammarBuilder gr = new GrammarBuilder(choice);
Grammar grammar = new Grammar(gr);
rec.LoadGrammar(grammar);
rec.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(rec_SpeechRecogonized);
rec.RecognizeAsync(RecognizeMode.Multiple);
}
private TextBox currentInput;
void rec_SpeechRecogonized(object sender, SpeechRecognizedEventArgs e)
{
if (currentInput == null) currentInput = textBox1;
foreach (RecognizedWordUnit word in e.Result.Words)
{
if (word.Text = "next") { currentInput = textBox2; }
else { currentInput.Text = word.Text; }
}
}

Related

Array method that takes userinput?

I am currently working on a project of mine, i call it "Automated speech detector" Basically this program sits in the system tray most of the time just listening for user input.
I have now come to a conclusion that i will not be able to fill the "command" array with all the commands people want so i have decided i want tointegrate a "AddCommand" user input. Where the user can input a desired command themself and the program will later do whatever i decide it to do. However i really need help with this.
How can i make a string array method that takes 1 argument, the argument will be the userinputs string "command". adding that userinput to the string array. Is this possible?
this is my current given code for the "default" commands i have set.
Choices commands = new Choices();
commands.Add(new string[] { "dollar", "euro", "hotmail", "notepad", "outlook", "onedrive", "discord" });
GrammarBuilder gBuilder = new GrammarBuilder();
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
So it will work something like this only that the other array like commands2 will be able to take 1 argument and insert that to the array. Code below is the whole project if neccesary to look at.
public partial class Form1 : Form
{
public SpeechRecognitionEngine recEngine;
public static bool keyHold = false;
NotifyIcon IconPicture;
Icon ActiveIcon;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
#region Icon and windows system tray dropdown text & click events
//Creating icon and setting it to default.
ActiveIcon = new Icon("speak_lzW_icon.ico");
IconPicture = new NotifyIcon();
IconPicture.Icon = ActiveIcon;
//iconPicture.Visible = true;
//Creating menu item for window in system tray.
//MenuItem ProgNameMenuItem = new MenuItem("Voice detection by: Lmannen");
MenuItem QuitMenuItem = new MenuItem("Quit");
ContextMenu contextMenu = new ContextMenu();
contextMenu.MenuItems.Add(ProgNameMenuItem);
contextMenu.MenuItems.Add(QuitMenuItem);
//Adding the icon to the system tray window.
IconPicture.ContextMenu = contextMenu;
//System tray click event handlers
QuitMenuItem.Click += QuitMenuItem_Click;
IconPicture.MouseDoubleClick += IconPicture_MouseDoubleClick1;
#endregion
#region SpeechRecognition commands & event handlers
recEngine = new SpeechRecognitionEngine();
recEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recEngine_SpeechRecognized);
recEngine.AudioStateChanged += new EventHandler<AudioStateChangedEventArgs>(recEngine_AudioStateChange);
Choices commands = new Choices();
commands.Add(new string[] { "dollar", "euro", "hotmail", "notepad", "outlook", "onedrive", "discord" });
GrammarBuilder gBuilder = new GrammarBuilder();
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
recEngine.SetInputToDefaultAudioDevice();
recEngine.LoadGrammarAsync(grammar);
recEngine.RequestRecognizerUpdate();
recEngine.RecognizeAsync(RecognizeMode.Multiple);
#endregion
}
internal void recEngine_AudioStateChange(object sender, AudioStateChangedEventArgs e)
{
InputStatusLbl.Text = string.Format("{0}", e.AudioState);
}
internal static void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
switch(e.Result.Text)
{
case "notepad":
System.Diagnostics.Process.Start("notepad.exe");
break;
case "hotmail":
System.Diagnostics.Process.Start("https://outlook.live.com/owa/");
break;
case "outlook":
System.Diagnostics.Process.Start("https://outlook.live.com/owa/");
break;
case "ondrive":
System.Diagnostics.Process.Start("https://onedrive.live.com/");
break;
case "discord":
string name = Environment.UserName;
string path = string.Format(#"C:\Users\{0}\AppData\Local\Discord\app-0.0.300\Discord.exe", name);
System.Diagnostics.Process.Start(path);
break;
}
}
private void Form1_Resize(object sender, EventArgs e)
{
if(WindowState == FormWindowState.Minimized)
{
ShowInTaskbar = false;
ShowIcon = false;
IconPicture.Visible = true;
}
}
private void IconPicture_MouseDoubleClick1(object sender, MouseEventArgs e)
{
ShowInTaskbar = true;
IconPicture.Visible = false;
ShowIcon = true;
WindowState = FormWindowState.Normal;
}
private void QuitMenuItem_Click(object sender, EventArgs e)
{
IconPicture.Dispose();
this.Close();
}
private void addToolStripMenuItem_Click(object sender, EventArgs e)
{
string input = Microsoft.VisualBasic.Interaction.InputBox("Add a voice-command by text", "Command");
MessageBox.Show(input + " is now added to the command list");
}
}
}
Having some background on your task, I believe you need a Dictionary. It will be a public variable at the form level. The key will be the command and the value will be the path of execution. In the form, you'll initialize it with your 5 values BEFORE assigning your events.
Public Dictionary<String, String> Commands = new Dictionary<String, String>();
So in the form load (you'll need 5 of these):
Dictionary.Add("notepad", "notepad.exe");
Dictionary.Add("hotmail", "https://outlook.live.com/owa/");
Instead of a case statement, you will search the dictionary and if the key exists, you will start the value. Assuming you have a dictionary called commands it would be:
string command = "";
if ( Commands.TryGetValue(e.Result.Text, out command))
System.Diagnostics.Process.Start(command)
The add command will path in the command name and the application path and add to the dictionary.
Commands.Add(commandName, pathToCommand);
Note that when you do this, you should ALSO save to a file in the users local application data area that can be brought back on form load, so its retained, but that's out of scope.

Conversation with C# Speech Library

I have the following code using the Speech Recognition library:
var listen = new SpeechRecognitionEngine();
var reader = new Choices(File.ReadLines(#"C:\words.txt")
listen.LoadGrammar(new Grammar(new GrammarBuilder(reader)));
listen.SpeechRecognized += listen_SpeechRecognized;
listen.SpeechRecognitionRejected += listen_SpeechRecognitionRejected;
listen.SetInputToDefaultAudioDevice();
listen.RecognizeAsync(RecognizeMode.Multiple);
And I have an event listener like this...
static void listen_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
var talk = new SpeechSynthesizer();
if (e.Result.Text == "Search Stock Symbol")
{
talk.Speak("What symbol?");
//Do I have to create another event listener?
//a Listener .. symbol = a.Result.Text
//talk.Speak(GetQuote(symbol))
{
}
Would I have to create an event listener for every portion of the "conversation"? Is there a better way if that is the case?
Example Conversation:
Me: Search Stock Symbol
Computer: What Symbol?
Me: AAPL
Computer: Apple is trading at ....
Nope, just the one, then vary what you do depending on what text was received. In some code before:
List<string> stockSymbols = new List<string>();
stockSymbols.Add("AAPL");
Then
string lastSpeechInput;
static void listen_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
var talk = new SpeechSynthesizer();
switch (e.Result.Text) {
case "Search Stock Symbol":
talk.Speak("What symbol?");
break;
default:
break;
}
if (stockSymbols.Contains(e.Result.Text) && lastSpeechInput == "Search Stock Symbol") {
talk.Speak(getStockPrice(e.Result.Text);
}
lastSpeechInput = e.Result.Text;
}

Parse first word of the command in recognized speech

I am trying to take speech input and convert them to string and show it on the richtext box control. I have read about speech synthesis and voice recognition in several articles where I learned to get commands via speech however I want to write on richtext box control after my command Write is recognized. Is it possible?
Here is the code if it helps understand what I am trying to achieve and what I have done so far
object declarations
PromptBuilder pb = new PromptBuilder();
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
Choices clist = new Choices();
The code for Enabling Voice Input
private void btnEnableVoice_Click(object sender, EventArgs e)
{
btnEnableVoice.Enabled = false;
btnDisableVoice.Enabled = true;
/////////////Adding commands in a list of type Choices///////////////////////
clist.Add(new string[] { "Is it working", "Write" });
Grammar gr = new Grammar(new GrammarBuilder(clist));
try
{
recognizer.RequestRecognizerUpdate(); ///////starting engine
recognizer.LoadGrammar(gr);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
switch (e.Result.Text.ToString())
{
case "Is it working":
ss.SpeakAsync("Yes its working");
break;
case "Write":
richTextBox1.Text += ""; //Speech to text input here
break;
}
}
First you need to construct a grammar in a proper way to allow dictation, see for the reference http://msdn.microsoft.com/en-us/library/ms576565(v=vs.110).aspx:
Choices clist = new Choices();
clist.Add(new string[] { "Is it working", "Write" });
GrammarBuilder bl = new GrammarBuilder(clist);
bl.appendDictation();
Grammar gr = new Grammar(bl);
To parse you need something like
void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
string result = e.Result.Text.ToString();
if (result.startsWith("Write")) {
richTextBox1.Text += result.substring(7); // Skip first 6 chars
} else if (result.startsWith("Is it working")) {
ss.SpeakAsync("Yes its working");
}
}

C# Speech Recognition Command Confirmation?

So I've made the application recognize what I say. But how do I make the application confirm a request when I command it to carry out a task?
As of now I have this code:
public partial class Form1 : Form
{
SpeechSynthesizer synth = new SpeechSynthesizer();
SpeechRecognitionEngine sRecognize= new SpeechRecognitionEngine();
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
Choices sList = new Choices();
sList.Add(new String[] { "Exit"});
Grammar gr = new Grammar(new GrammarBuilder(sList));
sRecognize.RequestRecognizerUpdate();
sRecognize.LoadGrammar(gr);
sRecognize.SpeechRecognized += sRecognize_SpeechRecognized;
sRecognize.SetInputToDefaultAudioDevice();
sRecognize.RecognizeAsync(RecognizeMode.Multiple);
sRecognize.SpeechRecognitionRejected += sRecognize_SpeechRecognitionRejected;
}
private void sRecognize_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "Exit")
{
Application.Exit();
}
}
}
So my question as an example:
I say, "Exit"
Application confirms by:
Are you sure you want to exit?
And depending on my answer, the application responds.
Yes being a confirmation and No being a request cancellation. What changes do I have to make?
Something like this?
private void sRecognize_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "Exit")
{
Choices sList = new Choices();
sList.Add(new String[] { "Yes"});
Grammar gr = new Grammar(new GrammarBuilder(sList));
sRecognize.RequestRecognizerUpdate();
sRecognize.LoadGrammar(gr);
sRecognize.SpeechRecognized += delegate(object sender, SpeechRecognizedEventArgs e)
{
Application.Exit();
};
sRecognize.SetInputToDefaultAudioDevice();
sRecognize.RecognizeAsync(RecognizeMode.Multiple);
}
}

Commands using speech recognition

I created a C# project using speech recognition where I have a form that has a next and last button What I am trying to do is when I say next the button will take me to the next file or if I say back it will go to the previous file. But When debug the project it only shows me what I say instead of doing it. Does anyone know how can I Fix it?
This is the code I made:
private void Form1_Load(object sender, EventArgs e)
{
SpeechRecognizer recognizer = new SpeechRecognizer();
Choices command = new Choices();
command.Add(new string[] { "next", "last", "first" });
GrammarBuilder gb = new GrammarBuilder();
gb.Append(command);
Grammar g = new Grammar(gb);
recognizer.LoadGrammar(g);
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(sre_SpeechRecognized);
}
void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
MessageBox.Show("Speech recognized: " + e.Result.Text);
}
}
A couple of years ago I had a case study for this subject. If you compare my codes with yours you can find out something. The code below changes a light bulb's status.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Windows.Forms;
using System.Speech.Recognition;
using System.Threading;
namespace SesTanima
{
public partial class Form1 : Form
{
private SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
LoadGrammars();
StartRecognition();
}
private void LoadGrammars()
{
Choices choices = new Choices( new string[] {"Lights on", "Exit", "Zoom out", "Zoom in", "Reset", "Lights off" } );
GrammarBuilder grammarBuilder = new GrammarBuilder(choices);
Grammar grammar = new Grammar(grammarBuilder);
recognizer.LoadGrammar(grammar);
}
private void StartRecognition()
{
recognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(recognizer_SpeechDetected);
recognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(recognizer_SpeechRecognitionRejected);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.RecognizeCompleted += new EventHandler<RecognizeCompletedEventArgs>(recognizer_RecognizeCompleted);
Thread t1 = new Thread(delegate()
{
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Single);
});
t1.Start();
}
private void recognizer_SpeechDetected(object sender, SpeechDetectedEventArgs e)
{
textBox1.Text = "Recognizing voice command...";
}
private void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text == "Lights on")
{
pictureBox1.Image = Properties.Resources.lightsOn;
}
else if (e.Result.Text == "Lights off")
{
pictureBox1.Image = Properties.Resources.lightsOff;
}
else if ( e.Result.Text == "Exit" )
{
recognizer.Dispose();
Application.Exit();
}
else if ( e.Result.Text == "Zoom out" )
{
pictureBox1.Size = new System.Drawing.Size( 135, 107 );
}
else if ( e.Result.Text == "Zoom in" )
{
pictureBox1.Size = new System.Drawing.Size( 538, 426 );
}
else if ( e.Result.Text == "Reset" )
{
pictureBox1.Size = new System.Drawing.Size( 269, 213 );
}
textBox1.Text = e.Result.Text;
}
private void recognizer_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
{
textBox1.Text = "Failure.";
}
private void recognizer_RecognizeCompleted(object sender, RecognizeCompletedEventArgs e)
{
recognizer.RecognizeAsync();
}
}
}

Categories