I'm developing the skype plugin. It should play an audio file during the call.
Audio player use naudio library
public class AudioPlayback : IDisposable, IPlayer
{
WaveStream _outStream;
IWavePlayer _player;
IWaveIn _recorder;
public event Action<byte[], int> DataAvailable;
public AudioPlayback()
{
_recorder = new WaveInEvent();
_recorder.WaveFormat = new WaveFormat(16000, 16, 1);
_recorder.DataAvailable += OnRecorderDataAvailable;
}
private void OnRecorderDataAvailable(object sender, WaveInEventArgs e)
{
if (DataAvailable!= null)
{
DataAvailable(e.Buffer, e.BytesRecorded);
}
}
public void LoadFile(string fileName)
{
_outStream = new Mp3FileReader(fileName);
if (_outStream.WaveFormat.Encoding != WaveFormatEncoding.Pcm)
{
_outStream = WaveFormatConversionStream.CreatePcmStream(_outStream);
}
}
private void CreatePlayer()
{
if (_player == null)
{
var waveOut = new WaveOut();
waveOut.DesiredLatency = 200;
waveOut.NumberOfBuffers = 2;
waveOut.DeviceNumber = 0;
_player = waveOut;
}
}
public void Play()
{
CreatePlayer();
if (_player.PlaybackState != PlaybackState.Playing)
{
if (_player.PlaybackState == PlaybackState.Stopped)
_recorder.StartRecording();
_player.Init(_outStream);
_player.Play();
}
}
}
In skype plugin class I create NetworkStream and TcpListener. Use event from player to get buffer data and write to the network stream
WriteToStream(buffer, 0, num);
On call started I change input for skype
call.InputDevice[TCallIoDeviceType.callIoDeviceTypeSoundcard] = "";
call.InputDevice[TCallIoDeviceType.callIoDeviceTypePort] = _inputPort.ToString();
Was fighting with this for several hours. Finally got the sound on the another skype only when turn on Stereo Mixer (Recording devices).
The question: is this the right way? I don't like a bit to play sound and capture it. But here I have a positive thing - I capture exactly with parameters appropriate for skype.
Related
I am using VideoLan.VLC to get 20 seconds of an audio stream every 30 seconds.
I have a loop, something like
private LibVLC libvlc = new LibVLC();
private MediaPlayer mediaPlayer = null;
private Media Media = null;
private string AudioSampleFileName { get {
return "audio_" + DateTime.Now.ToString("yyyyMMdd_HHmmss")+
".ts";
} }
public void Start(){
mediaPlayer = new MediaPlayer(libvlc);
while(...){
Get_20_seconds_audio_sample();
Wait_30_Seconds();
}
}
public void Get_sample(Uri playPathUri, string FileName)
{
var currentDirectory = Path.GetDirectoryName(Assembly.GetEntryAssembly().Location);
var destination = Path.Combine(currentDirectory, FileName);
var mediaOptions = new string[]
{
":sout=#file{dst=" + destination + ",channels=1,samplerate=16000}",
":sout-keep"
};
if (Media == null)
{
Media = new Media(libvlc, playPathUri, mediaOptions);
mediaPlayer.Media = Media;
}
else {
Media.AddOption(":sout=#file{dst=" + destination + ",channels=1,samplerate=16000}");
}
mediaPlayer.Play();
}
public void Get_20_seconds_audio_sample(){
Get_sample(RadioURI,AudioSampleFileName);
Wait_20_seconds();
Stop();
}
public void Stop()
{
mediaPlayer.Stop();
}
The Problem is that radio streming ususally starts with a commercial that lasts about 25 seconds. Every sample plays the commercial. It seems that Stop() closes the stream until Play() is called again and it restart the stream. I tired to pause the audio but, well...it makes no much sense to pause and play.
I can accept to get the commercial only in the first sample but then I want the regular radio audio. Is there a way not to close the stream every time? I am not tied to VideoLan dll so I can start from scratch if you have a better way to do it.
Use Audio callbacks and save the audio stream yourself. Full sample:
class Program
{
// This sample shows you how you can use SetAudioFormatCallback and SetAudioCallbacks. It does two things:
// 1) Play the sound from the specified video using NAudio
// 2) Extract the sound into a file using NAudio
static void Main(string[] args)
{
Core.Initialize();
using var libVLC = new LibVLC(enableDebugLogs: true);
using var media = new Media(libVLC,
new Uri("http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4"),
":no-video");
using var mediaPlayer = new MediaPlayer(media);
using var outputDevice = new WaveOutEvent();
var waveFormat = new WaveFormat(8000, 16, 1);
var writer = new WaveFileWriter("sound.wav", waveFormat);
var waveProvider = new BufferedWaveProvider(waveFormat);
outputDevice.Init(waveProvider);
mediaPlayer.SetAudioFormatCallback(AudioSetup, AudioCleanup);
mediaPlayer.SetAudioCallbacks(PlayAudio, PauseAudio, ResumeAudio, FlushAudio, DrainAudio);
mediaPlayer.Play();
mediaPlayer.Time = 20_000; // Seek the video 20 seconds
outputDevice.Play();
Console.WriteLine("Press 'q' to quit. Press any other key to pause/play.");
while (true)
{
if (Console.ReadKey().KeyChar == 'q')
break;
if (mediaPlayer.IsPlaying)
mediaPlayer.Pause();
else
mediaPlayer.Play();
}
void PlayAudio(IntPtr data, IntPtr samples, uint count, long pts)
{
int bytes = (int)count * 2; // (16 bit, 1 channel)
var buffer = new byte[bytes];
Marshal.Copy(samples, buffer, 0, bytes);
waveProvider.AddSamples(buffer, 0, bytes);
writer.Write(buffer, 0, bytes);
}
int AudioSetup(ref IntPtr opaque, ref IntPtr format, ref uint rate, ref uint channels)
{
channels = (uint)waveFormat.Channels;
rate = (uint)waveFormat.SampleRate;
return 0;
}
void DrainAudio(IntPtr data)
{
writer.Flush();
}
void FlushAudio(IntPtr data, long pts)
{
writer.Flush();
waveProvider.ClearBuffer();
}
void ResumeAudio(IntPtr data, long pts)
{
outputDevice.Play();
}
void PauseAudio(IntPtr data, long pts)
{
outputDevice.Pause();
}
void AudioCleanup(IntPtr opaque) { }
}
}
I finally have built a program to listen to the internal audio loopback using NAudio, and output recognized text. The problem is it listens, and always says, eg:
Recognized text: had
Recognized text: had
Recognized text: had
Recognized text: had
Recognized text: had had phone Le K add phone Laton
Recognized text: had phone looked had phone looked had phone looked had phone lo
oked zone
Recognized text: had phone lines to had, had phone looked had phone looked had p
hone line had phone
Recognized text: had phone line had phone looked had phone
Recognized text: had phone looked had phone looked had phone line had phone
Recognized text: had phone looked had phone look to had pot they had phone lit o
nly had phone
Recognized text: had phone line had phone looked had phone line to had to had ph
one
Recognized text: had phone line had phone looked had phone looked had phone
Recognized text: had phone line had phone looked had phone looked had phone line
10 only T had phone
Recognized text: had phone line had
Recognized text: had phone line had phone looked had phone line had
Recognized text: had phone Le tone looked had
Recognized text: had phone looked had phone looked had phone
Recognized text: had phone line had phone line had phone licked had phone
Recognized text: had phone lines to had popped the own
and similar nonsense, but even when I pause audio it just shows "Recognized text: had" or "an" again and again and again. When I unpause audio it keeps unsuccessfully recognizing the internal audio. Is there a way I can fix this, or at least get a wav of what it's trying to send to the Microsoft speech recognition recognizer?
using System;
using System.Speech.Recognition;
using NAudio.Wave;
using NAudio.CoreAudioApi.Interfaces;
using NAudio.CoreAudioApi;
using System.IO;
using System.Speech.AudioFormat;
using NAudio.Wave.SampleProviders;
using NAudio.Utils;
using System.Threading;
using System.Collections.Generic;
namespace SpeechRecognitionApp
{
class SpeechStreamer : Stream
{
private AutoResetEvent _writeEvent;
private List<byte> _buffer;
private int _buffersize;
private int _readposition;
private int _writeposition;
private bool _reset;
public SpeechStreamer(int bufferSize)
{
_writeEvent = new AutoResetEvent(false);
_buffersize = bufferSize;
_buffer = new List<byte>(_buffersize);
for (int i = 0; i < _buffersize; i++)
_buffer.Add(new byte());
_readposition = 0;
_writeposition = 0;
}
public override bool CanRead
{
get { return true; }
}
public override bool CanSeek
{
get { return false; }
}
public override bool CanWrite
{
get { return true; }
}
public override long Length
{
get { return -1L; }
}
public override long Position
{
get { return 0L; }
set { }
}
public override long Seek(long offset, SeekOrigin origin)
{
return 0L;
}
public override void SetLength(long value)
{
}
public override int Read(byte[] buffer, int offset, int count)
{
int i = 0;
while (i < count && _writeEvent != null)
{
if (!_reset && _readposition >= _writeposition)
{
_writeEvent.WaitOne(100, true);
continue;
}
buffer[i] = _buffer[_readposition + offset];
_readposition++;
if (_readposition == _buffersize)
{
_readposition = 0;
_reset = false;
}
i++;
}
return count;
}
public override void Write(byte[] buffer, int offset, int count)
{
for (int i = offset; i < offset + count; i++)
{
_buffer[_writeposition] = buffer[i];
_writeposition++;
if (_writeposition == _buffersize)
{
_writeposition = 0;
_reset = true;
}
}
_writeEvent.Set();
}
public override void Close()
{
_writeEvent.Close();
_writeEvent = null;
base.Close();
}
public override void Flush()
{
}
}
class FakeStreamer : Stream
{
public bool bExit = false;
Stream stream;
Stream client;
public FakeStreamer(Stream client)
{
this.client = client;
this.stream = client;
}
public override bool CanRead
{
get { return stream.CanRead; }
}
public override bool CanSeek
{
get { return false; }
}
public override bool CanWrite
{
get { return stream.CanWrite; }
}
public override long Length
{
get { return -1L; }
}
public override long Position
{
get { return 0L; }
set { }
}
public override long Seek(long offset, SeekOrigin origin)
{
return 0L;
}
public override void SetLength(long value)
{
stream.SetLength(value);
}
public override int Read(byte[] buffer, int offset, int count)
{
int len = 0, c = count;
while (c > 0 && !bExit)
{
//try {
len = stream.Read(buffer, offset, c);
/*}
catch (Exception e)
{
Console.WriteLine("ouch");
}
if (!client.Connected || len == 0)
{
//Exit read loop
return 0;
}*/
offset += len;
c -= len;
}
return count;
}
public override void Write(byte[] buffer, int offset, int count)
{
stream.Write(buffer, offset, count);
}
public override void Close()
{
stream.Close();
base.Close();
}
public override void Flush()
{
stream.Flush();
}
}
class Program
{
static void Main(string[] args)
{
// Create an in-process speech recognizer for the en-US locale.
using (
SpeechRecognitionEngine recognizer =
new SpeechRecognitionEngine(
new System.Globalization.CultureInfo("en-US")))
{
// Create and load a dictation grammar.
recognizer.LoadGrammar(new DictationGrammar());
// Add a handler for the speech recognized event.
recognizer.SpeechRecognized +=
new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
// Configure input to the speech recognizer.
//recognizer.SetInputToDefaultAudioDevice();
WasapiLoopbackCapture capture = new WasapiLoopbackCapture();
BufferedWaveProvider WaveBuffer = new BufferedWaveProvider(capture.WaveFormat);
WaveBuffer.DiscardOnBufferOverflow = true;
//WaveBuffer.ReadFully = false;
WaveToSampleProvider sampleStream = new WaveToSampleProvider(WaveBuffer);
StereoToMonoSampleProvider monoStream = new StereoToMonoSampleProvider(sampleStream)
{
LeftVolume = 1f,
RightVolume = 1f
};
//Downsample to 8000 https://stackoverflow.com/questions/48233099/capture-audio-from-wasapiloopbackcapture-and-convert-to-mulaw
WdlResamplingSampleProvider resamplingProvider = new WdlResamplingSampleProvider(monoStream, 16000);
SampleToWaveProvider16 ieeeToPcm = new SampleToWaveProvider16(resamplingProvider);
var arr = new byte[128];
Stream captureConvertStream = new System.IO.MemoryStream();
capture.StartRecording();
//outputStream = new MuLawConversionProvider(ieeeToPcm);
Stream captureStream = new System.IO.MemoryStream();
//Stream buffStream = new FakeStreamer(captureStream);
capture.DataAvailable += (s, a) =>
{
//It is getting here.
//captureStream.Write(a.Buffer, 0, a.BytesRecorded);
WaveBuffer.AddSamples(a.Buffer, 0, a.BytesRecorded);
};
Console.WriteLine(capture.WaveFormat.AverageBytesPerSecond);
Console.WriteLine(capture.WaveFormat.BitsPerSample);
//var newFormat = new WaveFormat(8000, 16, 1);
//using (var conversionStream = new WaveFormatConversionStream(newFormat, capture)
//capture.StartRecording();
//using (var resampler = new MediaFoundationResampler(new NAudio.Wave.RawSourceWaveStream(captureStream, capture.WaveFormat), newFormat))
//{
//resampler.ResamplerQuality = 60;
//WaveFileWriter.WriteWavFileToStream(captureConvertStream, resampler);
//recognizer.SetInputToDefaultAudioDevice();
//Stream buffStream = new FakeStreamer(captureConvertStream);
Stream buffStream = new SpeechStreamer(2048);
//recognizer.SetInputToWaveStream(buffStream);
recognizer.SetInputToAudioStream(buffStream, new SpeechAudioFormatInfo(
16000, AudioBitsPerSample.Eight, AudioChannel.Mono));
// Start asynchronous, continuous speech recognition.
recognizer.RecognizeAsync(RecognizeMode.Multiple);
/*System.Threading.Thread.Sleep(5000);
works when playing anything
var floata = new float[128];
while(monoStream.Read(floata, 0, floata.Length) > 0 )
{
Console.WriteLine(arr.Length);
}*/
while (ieeeToPcm.Read(arr, 0, arr.Length) > 0)
{
//Console.Write("Writing PCM ");
//Console.WriteLine(arr.Length);
//captureConvertStream.Write(arr, 0, arr.Length);
buffStream.Write(arr, 0, arr.Length);
}
Console.WriteLine("end");
/*capture.StartRecording();
//Never getting to the resampler, the read is always zero!? even if waiting 5s for the audio to buffer.
System.Threading.Thread.Sleep(5000);
var arr = new byte[128];
while (resampler.Read(arr, 0, arr.Length) > 0)
{
captureConvertStream.Write(arr, 0, arr.Length);
Console.WriteLine("Never getting here");
}
// Keep the console window open.
while (true)
{
Console.ReadLine();
}*/
//}
}
}
// Handle the SpeechRecognized event.
static void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
Console.WriteLine("Recognized text: " + e.Result.Text);
}
}
}
That SpeechStreamer class has some problems, I cannot really see what its purpose is. I tried. Also looking at wavefile dumps from your implementation, the audio is really choppy, with long pauses between the samples. This might be what is throwing the speech recognizer off. This is an example: Windows Volume Adjutment Sound From Your Code
As you may hear, it is pretty choppy with a lot of silence between. The Voice Recognition part recognizes this as : "ta ta ta ta ta ta..."
I had to rewrite your code a bit to dump a wave file, since the Read method of your SpeechStream causes an eternal loop when used to read its contents.
To dump a wave file you could do the following:
var buffer = new byte[2048];
using (var writer = new WaveFileWriter("tmp.wav", ieeeToPcm.WaveFormat))
{
//buffStream is changed to a MemoryStream for this to work.
buffStream.Seek(0,SeekOrigin.Begin);
while (buffStream.Read(buffer, 0, buffer.Length)>0)
{
writer.Write(buffer, 0, buffer.Length);
}
}
Or you can do it when you read from your SampleToWaveProvider16:
var writer = new WaveFileWriter("dump.wav", ieeeToPcm.WaveFormat);
while (ieeeToPcm.Read(arr, 0, arr.Length) > 0)
{
if (Console.KeyAvailable && Console.ReadKey().Key == ConsoleKey.Escape)
break;
buffStream.Write(arr, 0, arr.Length);
writer.Write(arr, 0, arr.Length);
}
I just added the ability to hit Escape to exit the loop.
Now I do wonder why you are using NAudio? Why not use the methods native to the Sound.Speech API?
class Program
{
private static ManualResetEvent _done;
static void Main(string[] args)
{
_done = new ManualResetEvent(false);
using (SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new CultureInfo("en-US")))
{
recognizer.LoadGrammar(new DictationGrammar());
recognizer.SpeechRecognized += RecognizedSpeech;
recognizer.SetInputToDefaultAudioDevice();
recognizer.RecognizeAsync(RecognizeMode.Multiple);
_done.WaitOne();
}
}
private static void RecognizedSpeech(object sender, SpeechRecognizedEventArgs e)
{
if (e.Result.Text.Contains("exit"))
{
_done.Set();
}
Console.WriteLine(e.Result.Text);
}
}
I've been working on a project that reads a person's heart rate from a Polar H7 hrm. I've been successful in connecting the device and getting the heart rate which the program shows as text in the UI. However, there are instances where the program suddenly stops getting input from the device.
I have already checked the connection of the device to my Win 10 laptop and saw that it was stable, there were also no exceptions getting thrown by the program. The text simply stops changing.
Here is the code I've written:
public sealed partial class MainPage : Page
{
private GattDeviceService device;
public MainPage()
{
this.InitializeComponent();
init();
}
async void init()
{
var devices = await DeviceInformation.FindAllAsync(GattDeviceService.GetDeviceSelectorFromUuid(GattServiceUuids.HeartRate));
Status.Text = devices.Count.ToString();
device = await GattDeviceService.FromIdAsync(devices[0].Id);
enableSensor();
}
private async void enableSensor()
{
IReadOnlyList<GattCharacteristic> characteristicList;
characteristicList = device.GetAllCharacteristics();
if (characteristicList != null)
{
GattCharacteristic characteristic = characteristicList[0];
if (characteristic.CharacteristicProperties.HasFlag(GattCharacteristicProperties.Notify))
{
characteristic.ValueChanged += SendNotif;
await characteristic.WriteClientCharacteristicConfigurationDescriptorAsync(GattClientCharacteristicConfigurationDescriptorValue.Notify);
}
}
}
async void SendNotif(GattCharacteristic sender, GattValueChangedEventArgs eventArgs)
{
if (eventArgs.CharacteristicValue.Length != 0) {
byte[] hrData = new byte[eventArgs.CharacteristicValue.Length];
DataReader.FromBuffer(eventArgs.CharacteristicValue).ReadBytes(hrData);
var hrValue = ProcessData(hrData);
await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
{
Status.Text = hrValue.ToString();
});
}
}
private int ProcessData(byte[] data)
{
// Heart Rate profile defined flag values
const byte heartRateValueFormat = 0x01;
byte currentOffset = 0;
byte flags = data[currentOffset];
bool isHeartRateValueSizeLong = ((flags & heartRateValueFormat) != 0);
currentOffset++;
ushort heartRateMeasurementValue;
if (isHeartRateValueSizeLong)
{
heartRateMeasurementValue = (ushort)((data[currentOffset + 1] << 8) + data[currentOffset]);
currentOffset += 2;
}
else
{
heartRateMeasurementValue = data[currentOffset];
}
return heartRateMeasurementValue;
}
}
So I have
IWavePlayer waveOutDevice;
WaveStream mainOutputStream;
WaveChannel32 volumeStream;
private WaveStream CreateInputStream(string fileName)
{
WaveChannel32 inputStream;
if (fileName.EndsWith(".mp3"))
{
WaveStream mp3Reader = new Mp3FileReader(fileName);
inputStream = new WaveChannel32(mp3Reader);
}
else
{
throw new InvalidOperationException("Unsupported extension");
}
volumeStream = inputStream;
return volumeStream;
}
private void Stop()
{
if (waveOutDevice != null)
{
waveOutDevice.Stop();
}
if (mainOutputStream != null)
{
// this one really closes the file and ACM conversion
volumeStream.Close();
volumeStream = null;
// this one does the metering stream
mainOutputStream.Close();
mainOutputStream = null;
}
if (waveOutDevice != null)
{
waveOutDevice.Dispose();
waveOutDevice = null;
}
}
private void Play(string was)
{
waveOutDevice = new WaveOut();
mainOutputStream = CreateInputStream(was);
waveOutDevice.Init(mainOutputStream);
waveOutDevice.Play();
}
private void Form1_Load(object sender, EventArgs e)
{
Play(#"E:\Eigene Audiodateien\Musik\Alben\Pur\Abenteuerland\ - - .mp3");
}
private void button1_Click(object sender, EventArgs e)
{
Stop();
}
There is a Stop-Button ( button1 ), which stops playback. When the form is loaded, the file is played. While the file is playing I want to get the current volume of the file by running a function. So what does a function like this has to look like at "...."?
private int currentVolumeLevel(...some suitable parameters...)
{
int currentVolumeLevelValue = 0;
//....
return currentVolumeLevelValue;
}
I am not talking about the volume level you can adjust with windows' sound controls. I am talking about the currently played sound file's volume at this very position it is playing right now, based on something like a byte[] array.
The NAudioDemo shows how to do this. Look in AudioPlaybackPanel.cs at how a MeteringSampleProvider is added to the playback pipeline. MeteringSampleProvider will periodically raise StreamVolume events telling you the maximum sample value you have received in the last 100ms (this is configurable). You will need to decide whether you want to place the MeteringSampleProvider before or after any software volume adjustment (for waveform drawing it is usually before, and for volume metering it is usually after)
Here's a working WindowsForms demo, writing the stream volume to the Console:
var player = new WaveOut();
var file = new AudioFileReader(#"test.mp3");
var meter = new MeteringSampleProvider(file);
meter.StreamVolume += (s,e) => Console.WriteLine("{0} - {1}", e.MaxSampleValues[0],e.MaxSampleValues[1]);
player.Init(new SampleToWaveProvider(meter));
var form = new Form();
form.Load += (s,e) => player.Play();
form.FormClosed += (s,e) => player.Dispose();
form.ShowDialog();
at the moment im trying to figure out how i can manage to play a wave file in C# by filling up the secondary buffer with data from the wave file through threading and then play the wave file.
Any help or sample coding i can use?
thanks
sample code being used:
public delegate void PullAudio(short[] buffer, int length);
public class SoundPlayer : IDisposable
{
private Device soundDevice;
private SecondaryBuffer soundBuffer;
private int samplesPerUpdate;
private AutoResetEvent[] fillEvent = new AutoResetEvent[2];
private Thread thread;
private PullAudio pullAudio;
private short channels;
private bool halted;
private bool running;
public SoundPlayer(Control owner, PullAudio pullAudio, short channels)
{
this.channels = channels;
this.pullAudio = pullAudio;
this.soundDevice = new Device();
this.soundDevice.SetCooperativeLevel(owner, CooperativeLevel.Priority);
// Set up our wave format to 44,100Hz, with 16 bit resolution
WaveFormat wf = new WaveFormat();
wf.FormatTag = WaveFormatTag.Pcm;
wf.SamplesPerSecond = 44100;
wf.BitsPerSample = 16;
wf.Channels = channels;
wf.BlockAlign = (short)(wf.Channels * wf.BitsPerSample / 8);
wf.AverageBytesPerSecond = wf.SamplesPerSecond * wf.BlockAlign;
this.samplesPerUpdate = 512;
// Create a buffer with 2 seconds of sample data
BufferDescription bufferDesc = new BufferDescription(wf);
bufferDesc.BufferBytes = this.samplesPerUpdate * wf.BlockAlign * 2;
bufferDesc.ControlPositionNotify = true;
bufferDesc.GlobalFocus = true;
this.soundBuffer = new SecondaryBuffer(bufferDesc, this.soundDevice);
Notify notify = new Notify(this.soundBuffer);
fillEvent[0] = new AutoResetEvent(false);
fillEvent[1] = new AutoResetEvent(false);
// Set up two notification events, one at halfway, and one at the end of the buffer
BufferPositionNotify[] posNotify = new BufferPositionNotify[2];
posNotify[0] = new BufferPositionNotify();
posNotify[0].Offset = bufferDesc.BufferBytes / 2 - 1;
posNotify[0].EventNotifyHandle = fillEvent[0].Handle;
posNotify[1] = new BufferPositionNotify();
posNotify[1].Offset = bufferDesc.BufferBytes - 1;
posNotify[1].EventNotifyHandle = fillEvent[1].Handle;
notify.SetNotificationPositions(posNotify);
this.thread = new Thread(new ThreadStart(SoundPlayback));
this.thread.Priority = ThreadPriority.Highest;
this.Pause();
this.running = true;
this.thread.Start();
}
public void Pause()
{
if (this.halted) return;
this.halted = true;
Monitor.Enter(this.thread);
}
public void Resume()
{
if (!this.halted) return;
this.halted = false;
Monitor.Pulse(this.thread);
Monitor.Exit(this.thread);
}
private void SoundPlayback()
{
lock (this.thread)
{
if (!this.running) return;
// Set up the initial sound buffer to be the full length
int bufferLength = this.samplesPerUpdate * 2 * this.channels;
short[] soundData = new short[bufferLength];
// Prime it with the first x seconds of data
this.pullAudio(soundData, soundData.Length);
this.soundBuffer.Write(0, soundData, LockFlag.None);
// Start it playing
this.soundBuffer.Play(0, BufferPlayFlags.Looping);
int lastWritten = 0;
while (this.running)
{
if (this.halted)
{
Monitor.Pulse(this.thread);
Monitor.Wait(this.thread);
}
// Wait on one of the notification events
WaitHandle.WaitAny(this.fillEvent, 3, true);
// Get the current play position (divide by two because we are using 16 bit samples)
int tmp = this.soundBuffer.PlayPosition / 2;
// Generate new sounds from lastWritten to tmp in the sound buffer
if (tmp == lastWritten)
{
continue;
}
else
{
soundData = new short[(tmp - lastWritten + bufferLength) % bufferLength];
}
this.pullAudio(soundData, soundData.Length);
// Write in the generated data
soundBuffer.Write(lastWritten * 2, soundData, LockFlag.None);
// Save the position we were at
lastWritten = tmp;
}
}
}
public void Dispose()
{
this.running = false;
this.Resume();
if (this.soundBuffer != null)
{
this.soundBuffer.Dispose();
}
if (this.soundDevice != null)
{
this.soundDevice.Dispose();
}
}
}
}
The concept is the same that im using but i can't manage to get a set on wave byte [] data to play
I have not done this.
But the first place i would look is XNA.
I know that the c# managed directx project was ditched in favor of XNA and i have found it to be good for graphics - i prefer using it to directx.
what is the reason that you decided not to just use soundplayer, as per this msdn entry below?
private SoundPlayer Player = new SoundPlayer();
private void loadSoundAsync()
{
// Note: You may need to change the location specified based on
// the location of the sound to be played.
this.Player.SoundLocation = http://www.tailspintoys.com/sounds/stop.wav";
this.Player.LoadAsync();
}
private void Player_LoadCompleted (
object sender,
System.ComponentModel.AsyncCompletedEventArgs e)
{
if (this.Player.IsLoadCompleted)
{
this.Player.PlaySync();
}
}
usually i just load them all up in a thread, or asynch delegate, then play or playsynch them when needed.
You can use the DirectSound support in SlimDX: http://slimdx.org/ :-)
You can use nBASS or better FMOD both are great audio libraries and can work nicely together with .NET.
DirectSound is where you want to go. It's a piece of cake to use, but I'm not sure what formats it can play besides .wav
http://msdn.microsoft.com/en-us/library/windows/desktop/ee416960(v=vs.85).aspx