I have the following code using the Speech Recognition library:
var listen = new SpeechRecognitionEngine();
var reader = new Choices(File.ReadLines(#"C:\words.txt")
listen.LoadGrammar(new Grammar(new GrammarBuilder(reader)));
listen.SpeechRecognized += listen_SpeechRecognized;
listen.SpeechRecognitionRejected += listen_SpeechRecognitionRejected;
listen.SetInputToDefaultAudioDevice();
listen.RecognizeAsync(RecognizeMode.Multiple);
And I have an event listener like this...
static void listen_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
var talk = new SpeechSynthesizer();
if (e.Result.Text == "Search Stock Symbol")
{
talk.Speak("What symbol?");
//Do I have to create another event listener?
//a Listener .. symbol = a.Result.Text
//talk.Speak(GetQuote(symbol))
{
}
Would I have to create an event listener for every portion of the "conversation"? Is there a better way if that is the case?
Example Conversation:
Me: Search Stock Symbol
Computer: What Symbol?
Me: AAPL
Computer: Apple is trading at ....
Nope, just the one, then vary what you do depending on what text was received. In some code before:
List<string> stockSymbols = new List<string>();
stockSymbols.Add("AAPL");
Then
string lastSpeechInput;
static void listen_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
var talk = new SpeechSynthesizer();
switch (e.Result.Text) {
case "Search Stock Symbol":
talk.Speak("What symbol?");
break;
default:
break;
}
if (stockSymbols.Contains(e.Result.Text) && lastSpeechInput == "Search Stock Symbol") {
talk.Speak(getStockPrice(e.Result.Text);
}
lastSpeechInput = e.Result.Text;
}
Related
I am currently working on a project of mine, i call it "Automated speech detector" Basically this program sits in the system tray most of the time just listening for user input.
I have now come to a conclusion that i will not be able to fill the "command" array with all the commands people want so i have decided i want tointegrate a "AddCommand" user input. Where the user can input a desired command themself and the program will later do whatever i decide it to do. However i really need help with this.
How can i make a string array method that takes 1 argument, the argument will be the userinputs string "command". adding that userinput to the string array. Is this possible?
this is my current given code for the "default" commands i have set.
Choices commands = new Choices();
commands.Add(new string[] { "dollar", "euro", "hotmail", "notepad", "outlook", "onedrive", "discord" });
GrammarBuilder gBuilder = new GrammarBuilder();
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
So it will work something like this only that the other array like commands2 will be able to take 1 argument and insert that to the array. Code below is the whole project if neccesary to look at.
public partial class Form1 : Form
{
public SpeechRecognitionEngine recEngine;
public static bool keyHold = false;
NotifyIcon IconPicture;
Icon ActiveIcon;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
#region Icon and windows system tray dropdown text & click events
//Creating icon and setting it to default.
ActiveIcon = new Icon("speak_lzW_icon.ico");
IconPicture = new NotifyIcon();
IconPicture.Icon = ActiveIcon;
//iconPicture.Visible = true;
//Creating menu item for window in system tray.
//MenuItem ProgNameMenuItem = new MenuItem("Voice detection by: Lmannen");
MenuItem QuitMenuItem = new MenuItem("Quit");
ContextMenu contextMenu = new ContextMenu();
contextMenu.MenuItems.Add(ProgNameMenuItem);
contextMenu.MenuItems.Add(QuitMenuItem);
//Adding the icon to the system tray window.
IconPicture.ContextMenu = contextMenu;
//System tray click event handlers
QuitMenuItem.Click += QuitMenuItem_Click;
IconPicture.MouseDoubleClick += IconPicture_MouseDoubleClick1;
#endregion
#region SpeechRecognition commands & event handlers
recEngine = new SpeechRecognitionEngine();
recEngine.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recEngine_SpeechRecognized);
recEngine.AudioStateChanged += new EventHandler<AudioStateChangedEventArgs>(recEngine_AudioStateChange);
Choices commands = new Choices();
commands.Add(new string[] { "dollar", "euro", "hotmail", "notepad", "outlook", "onedrive", "discord" });
GrammarBuilder gBuilder = new GrammarBuilder();
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
recEngine.SetInputToDefaultAudioDevice();
recEngine.LoadGrammarAsync(grammar);
recEngine.RequestRecognizerUpdate();
recEngine.RecognizeAsync(RecognizeMode.Multiple);
#endregion
}
internal void recEngine_AudioStateChange(object sender, AudioStateChangedEventArgs e)
{
InputStatusLbl.Text = string.Format("{0}", e.AudioState);
}
internal static void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
switch(e.Result.Text)
{
case "notepad":
System.Diagnostics.Process.Start("notepad.exe");
break;
case "hotmail":
System.Diagnostics.Process.Start("https://outlook.live.com/owa/");
break;
case "outlook":
System.Diagnostics.Process.Start("https://outlook.live.com/owa/");
break;
case "ondrive":
System.Diagnostics.Process.Start("https://onedrive.live.com/");
break;
case "discord":
string name = Environment.UserName;
string path = string.Format(#"C:\Users\{0}\AppData\Local\Discord\app-0.0.300\Discord.exe", name);
System.Diagnostics.Process.Start(path);
break;
}
}
private void Form1_Resize(object sender, EventArgs e)
{
if(WindowState == FormWindowState.Minimized)
{
ShowInTaskbar = false;
ShowIcon = false;
IconPicture.Visible = true;
}
}
private void IconPicture_MouseDoubleClick1(object sender, MouseEventArgs e)
{
ShowInTaskbar = true;
IconPicture.Visible = false;
ShowIcon = true;
WindowState = FormWindowState.Normal;
}
private void QuitMenuItem_Click(object sender, EventArgs e)
{
IconPicture.Dispose();
this.Close();
}
private void addToolStripMenuItem_Click(object sender, EventArgs e)
{
string input = Microsoft.VisualBasic.Interaction.InputBox("Add a voice-command by text", "Command");
MessageBox.Show(input + " is now added to the command list");
}
}
}
Having some background on your task, I believe you need a Dictionary. It will be a public variable at the form level. The key will be the command and the value will be the path of execution. In the form, you'll initialize it with your 5 values BEFORE assigning your events.
Public Dictionary<String, String> Commands = new Dictionary<String, String>();
So in the form load (you'll need 5 of these):
Dictionary.Add("notepad", "notepad.exe");
Dictionary.Add("hotmail", "https://outlook.live.com/owa/");
Instead of a case statement, you will search the dictionary and if the key exists, you will start the value. Assuming you have a dictionary called commands it would be:
string command = "";
if ( Commands.TryGetValue(e.Result.Text, out command))
System.Diagnostics.Process.Start(command)
The add command will path in the command name and the application path and add to the dictionary.
Commands.Add(commandName, pathToCommand);
Note that when you do this, you should ALSO save to a file in the users local application data area that can be brought back on form load, so its retained, but that's out of scope.
I am attempting to use a different input device with Speech Recognition Engine. I have attempted to use NAudio to achieve this, using WaveIn and the DataAvailable event, however I can't quite work out how to convert the buffer from the event into a stream usable with speech recognition engine's SetInputToAudioStream(). My current code looks like:
using System.Speech.Recognition;
using NAudio.Wave;
private SpeechRecognition sre;
private WaveInEvent wi;
private Stream st;
static void main(string[] args) {
Choices words = new Choices(new string[] { "word", "test" });
Grammar g = new Grammar(words);
wi = new WaveInEvent();
wi.DeviceNumber = 0; // Default device
wi.DataAvailable += Wi_DataAvailable;
wi.StartRecording();
st = new MemoryStream();
sre = new SpeechRecognitionEngine();
sre.LoadGrammar(g);
//sre.SetInputToDefaultAudioDevice();
sre.SetInputToAudioStream(st, /* SpeechAudioFormatInfo */);
sre.SpeechRecognized += Sre_SpeechRecognized;
sre.RecognizeAsync(RecognizeMode.Multiple);
}
private void Wi_DataAvailable(object sender, WaveInEventArgs e) {
// Convert e.Buffer to Stream st
}
private void Sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) {
Console.WriteLine(e.Result.Text);
}
Could anybody help me with this? Thanks.
I'm trying to write code that will open Notepad++, write to a file, and close it afterward. My code is included below. I'm totally new to C#. Is there any library or way to do this?
// Button Reference
private void button1_Click(object sender, EventArgs e)
{
if (button1.Text.Equals("Enable Voice Control"))
{
button1.Text = "Stop Voice Control";
recEngine.RecognizeAsync(RecognizeMode.Multiple);
}
else
{
button1.Text = "Enable Voice Control";
recEngine.RecognizeAsyncStop();
}
}
public void Form1_Load(object sender, EventArgs e)
{
Choices commands = new Choices();
commands.Add(myCommands);
GrammarBuilder gBuilder = new GrammarBuilder();
gBuilder.Append(commands);
Grammar grammar = new Grammar(gBuilder);
recEngine.LoadGrammarAsync(grammar);
recEngine.SetInputToDefaultAudioDevice();
recEngine.SpeechRecognized += recEngine_SpeechRecognized;
}
void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Process cmd = new Process();
cmd.StartInfo.FileName = #"notepad++.exe" ;
//cmd.StartInfo.Arguments =#"\Write.txt";
cmd.Start();
cmd.CloseMainWindow();
cmd.WaitForExit();
cmd.Refresh();
//if (cmd.StandardError != null)
//Console.WriteLine(cmd.StandardError.ReadToEnd());
var result = e.Result;
var i = 0;
foreach (var command in myCommands)
{
if (command.StartsWith("close"))
{
this.Close();
//cmd.StartInfo.FileName = #"notepad++";
cmd.Kill();
}
if (command.StartsWith("--") || command == string.Empty) continue; // Skip commentBlocks and skipEmptylines
var parts = command.Split(new char[] { '|' }); // Split the lines
i++;
if (command.Equals(result.Text))
{
Console.WriteLine("Command is {0}: {1}", i, command);
break;
}
}
}
private void richTextBox1_TextChanged(object sender, EventArgs e)
{
}
You need not open notepad and write to file. You can do it programmarically.
// Create a file to write to.
string createText = "Hello and Welcome" + Environment.NewLine;
File.WriteAllText(path, createText);
// Open the file to read from.
string readText = File.ReadAllText(path);
You can use the below code to close the notepad:
Process[] processes = Process.GetProcessesByName("notepad");
foreach (var process in processes)
{
process.Kill();
}
It will close all instances of notepad. So better be sure the notepads are only that of yours (opened by your speech recognition program)
If you actually do not want to open notepad and your requirement is just to capture text and write to file programatically, then use the below code:
// Create a file to write to.
string createText = "Hello World"; // Replace with voice captured text
File.WriteAllText(path, createText);
// Open the file to read from.
string readText = File.ReadAllText(path);
I am trying to take speech input and convert them to string and show it on the richtext box control. I have read about speech synthesis and voice recognition in several articles where I learned to get commands via speech however I want to write on richtext box control after my command Write is recognized. Is it possible?
Here is the code if it helps understand what I am trying to achieve and what I have done so far
object declarations
PromptBuilder pb = new PromptBuilder();
SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
Choices clist = new Choices();
The code for Enabling Voice Input
private void btnEnableVoice_Click(object sender, EventArgs e)
{
btnEnableVoice.Enabled = false;
btnDisableVoice.Enabled = true;
/////////////Adding commands in a list of type Choices///////////////////////
clist.Add(new string[] { "Is it working", "Write" });
Grammar gr = new Grammar(new GrammarBuilder(clist));
try
{
recognizer.RequestRecognizerUpdate(); ///////starting engine
recognizer.LoadGrammar(gr);
recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
switch (e.Result.Text.ToString())
{
case "Is it working":
ss.SpeakAsync("Yes its working");
break;
case "Write":
richTextBox1.Text += ""; //Speech to text input here
break;
}
}
First you need to construct a grammar in a proper way to allow dictation, see for the reference http://msdn.microsoft.com/en-us/library/ms576565(v=vs.110).aspx:
Choices clist = new Choices();
clist.Add(new string[] { "Is it working", "Write" });
GrammarBuilder bl = new GrammarBuilder(clist);
bl.appendDictation();
Grammar gr = new Grammar(bl);
To parse you need something like
void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
string result = e.Result.Text.ToString();
if (result.startsWith("Write")) {
richTextBox1.Text += result.substring(7); // Skip first 6 chars
} else if (result.startsWith("Is it working")) {
ss.SpeakAsync("Yes its working");
}
}
I have need to write an application which uses a speech recognition engine.
How can I enter different values in multiple textboxes through voice in c#?
I can enter value in single textbox but not in second textbox. I have the following code for entering value in single textbox.
private SpeechRecognitionEngine rec;
private void voice()
{
rec = new SpeechRecognitionEngine();
rec.SetInputToDefaultAudioDevice();
Choices choice = new Choices("apple","Orange","Onion");
GrammarBuilder gr = new GrammarBuilder(choice);
Grammar grammar = new Grammar(gr);
rec.LoadGrammar(grammar);
rec.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(rec_SpeechRecogonized);
rec.RecognizeAsync(RecognizeMode.Multiple);
}
void rec_SpeechRecogonized(object sender, SpeechRecognizedEventArgs e)
{
foreach (RecognizedWordUnit word in e.Result.Words)
{
textBox1.Text = word.Text;
}
}
I will do something like this (very simple example):
private SpeechRecognitionEngine rec;
private void voice()
{
rec = new SpeechRecognitionEngine();
rec.SetInputToDefaultAudioDevice();
Choices choice = new Choices("apple","Orange","Onion", "next");
GrammarBuilder gr = new GrammarBuilder(choice);
Grammar grammar = new Grammar(gr);
rec.LoadGrammar(grammar);
rec.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(rec_SpeechRecogonized);
rec.RecognizeAsync(RecognizeMode.Multiple);
}
private TextBox currentInput;
void rec_SpeechRecogonized(object sender, SpeechRecognizedEventArgs e)
{
if (currentInput == null) currentInput = textBox1;
foreach (RecognizedWordUnit word in e.Result.Words)
{
if (word.Text = "next") { currentInput = textBox2; }
else { currentInput.Text = word.Text; }
}
}