I have a function that strips a wav file from a video, then using FileStream I read from the wavefile. The problem is, the read function returns -1 (no more bytes to be read) prematurely. Its a 20 minute wav file, but only about 17.5 minutes are written.
In this function, I am mixing several wave files into a one large one. The problem is the filestream reader for the large wave file is ending too early. I can't figure out why.
Here is my code:
public void combineWaveFileData(object status)
{
//ProgressWindow progressWindow = status as ProgressWindow;
ExportProgressWindow exportProgressWindow = status as ExportProgressWindow;
string videoAudioFile = outfileName + ".temp";
int descriptionStartSample = 0;
int highestNumber = 0;
int extraSamplesFromExtendedDescription = 0;
const double VOLUME_VIDEO_FACTOR = 0.8; //controls video volume levels; won't be const as volume control will be added later
const double VOLUME_DESCRIPTION_FACTOR = 1.5; //controls description volume levels
double currentSample = 0; //keeps track of the current sample in the video audio track
byte[] videoAudioBuffer = new byte[4];//new byte[bitsPerSample / 8];
int sample = 0; //holds raw audio data sample
int videoReadStatus = 1;
int descriptionEndSample = 0;
byte[] buffer = new byte[4];
FileStream tempStream;
videoAudioStream = new FileStream(videoAudioFile, FileMode.Open, FileAccess.ReadWrite);
videoAudioStream.Seek(DATA_START_POS - 4, 0);
videoAudioStream.Read(buffer, 0, buffer.Length);
totalRawSoundDataBits = BitConverter.ToInt32(buffer, 0);
//totalRawSoundDataBits = videoAudioStream.Length;
videoAudioStream.Seek(24, 0);
videoAudioStream.Read(buffer, 0, 4);
int videoSampleRate = BitConverter.ToInt32(buffer, 0);
sampleRateHz = videoSampleRate;
outFileStream = new FileStream(outfileName, FileMode.Create, FileAccess.ReadWrite);
videoAudioStream.Seek(0, 0);
writeAudioFileHeader(outFileStream, videoAudioStream);
// videoAudioStream.Seek(0, 0); //reset video audio position to 0 (beginning)
convertStatus = false;
if (compatibilityIssue)
{
exportProgressWindow.Close();
return;
}
//calculate total length of extended description files
foreach (Description description in descriptionList)
{
if (description.IsExtendedDescription)
{
tempStream = new FileStream(description.getFilename(), FileMode.Open);
totalRawSoundDataBits += tempStream.Length - DATA_START_POS;
try
{
tempStream.Close();
tempStream.Dispose();
tempStream = null;
}
catch { }
}
WaveReader read = new WaveReader(File.OpenRead(description.getFilename()));
IntPtr oldFormat = read.ReadFormat();
WaveFormat waveformat = AudioCompressionManager.GetWaveFormat(oldFormat);
int descriptionSampleRateHz = waveformat.nSamplesPerSec;
read.Close();
string resampledFilename = description.getFilename();
if (descriptionSampleRateHz != sampleRateHz)
{
exportProgressWindow.SetText(".");
resampledFilename = convertSampleRate(description.getFilename(), sampleRateHz);
description.setFilename(resampledFilename);
}
}
for (int i = 0; i < descriptionList.Count; i++)
{
for (int j = 0; j < descriptionList.Count; j++)
{
Description tempDescription;
if (((Description)descriptionList[i]).getStart() < ((Description)descriptionList[j]).getStart())
{
tempDescription = (Description)descriptionList[j];
descriptionList[j] = descriptionList[i];
descriptionList[i] = tempDescription;
}
}
}
int k = 0;
while (videoReadStatus > 0)
{
try
{
Description description;
description = (Description)descriptionList[k];
descriptionStartSample = (int)Math.Truncate(sampleRateHz * description.getStart());
descriptionEndSample = (int)Math.Truncate(sampleRateHz * description.getEnd());
if (videoAudioStream.Position / 4 > descriptionStartSample )
{
double currentTime = videoAudioStream.Position / 4 / sampleRateHz;
Console.WriteLine(currentTime+ " " + description.getStart() + " " + description.getEnd());
if (k < descriptionList.Count - 1)
{
k++;
}
double percentage = Convert.ToDouble(k) / Convert.ToDouble(descriptionList.Count) * 100.0;
try
{
exportProgressWindow.Increment(percentage);
}
catch (Exception ex)
{
return;
}
buffer = new byte[4];
tempStream = new FileStream(description.getFilename(), FileMode.Open);
try
{
tempStream.Seek(44, 0); //to search for position 34; write: use writeSample()
int tempReadStatus = 1;
while (tempReadStatus > 0 && videoReadStatus > 0)//(currentSample < descriptionEndSample)
{
//If description isn't an extended description then mix the description with the video audio
if (!description.IsExtendedDescription)
{
videoReadStatus = videoAudioStream.Read(videoAudioBuffer, 0, 2);
tempReadStatus = tempStream.Read(buffer, 0, 2);
if (videoReadStatus == 0)
{
Console.WriteLine(currentTime);
int debug = 0;
}
if (tempReadStatus <= 0 || videoReadStatus <=0)
{
int deleteme = 0;
}
sample += (int)(((BitConverter.ToInt16(buffer, 0))* VOLUME_DESCRIPTION_FACTOR + (BitConverter.ToInt16(videoAudioBuffer, 0) * VOLUME_VIDEO_FACTOR)) / 2);
writeSample(sample);
sample = 0;
}
else
// If description is extended then only write the description samples
{
int tempStatus = 1;
while (tempReadStatus > 0)
{
tempReadStatus = tempStream.Read(buffer, 0, 2);
sample = (int)((BitConverter.ToInt16(buffer, 0)));// -((sample * (int)(BitConverter.ToInt16(buffer, 0))) / 65535); //Z = A+B-AB/65535 http://www.vttoth.com/CMS/index.php/technical-notes/68 //* VOLUME_DESCRIPTION_FACTOR);
writeSample(sample);
sample = 0;
}
break;
}
}
}
catch (Exception ex)
{
Console.WriteLine("Debug 1: " + ex.Message);//MessageBox.Show(ex.Message);
}
finally
{
tempStream.Close();
tempStream.Dispose();
tempStream = null;
}
}
else
{
try
{
videoReadStatus = videoAudioStream.Read(videoAudioBuffer, 0, 2);
sample += (int)((BitConverter.ToInt16(videoAudioBuffer, 0)) * VOLUME_VIDEO_FACTOR) ;
if (videoReadStatus == 0)
{
int debug = 0;
}
writeSample(sample);
sample = 0;
convertStatus = true;
}
catch (Exception ex)
{
int test = 0;
}
}
}
catch (Exception ex)
{
MessageBox.Show(ex.GetBaseException().ToString());
}
}
exportProgressWindow.SetText("\n\nLiveDescribe has successfully exported the file.");
try
{
closeStreams();
Control.CheckForIllegalCrossThreadCalls = false;
}
catch (Exception ex)
{
}
}
Related
I am adding voip in the game and since Unity's Microphone class is not supported in Web_GL and is already slow and gives floats instead of bytes. Now some people suggested me to use codec i.e Opus and then I found its wrapper along with its demo which used NAudio, well I was fairly happy with it, it was using some extra loops which after removing also gave the same result but anyway it also gave 4000 bytes with 48k sample rate which I reduced to 8k and max buffer size to 350. Here's the code for that script
private void Start()
{
//StartEncoding();
UnityEditor.EditorApplication.playmodeStateChanged = PlayModeStateChangedHandler;
}
private void PlayModeStateChangedHandler()
{
if (UnityEditor.EditorApplication.isPaused)
{
StopEncoding();
}
}
public void StartGame()
{
StartEncoding();
}
private void StartEncoding()
{
_client = FindObjectOfType<Client>();
_client.AudioReceivers += UpdateAudioOutput;
_startTime = DateTime.Now;
_bytesSent = 0;
_segmentFrames = 160;
_encoder = OpusEncoder.Create(8000, 1, FragLabs.Audio.Codecs.Opus.Application.Voip);
_encoder.MaxDataBytes = 350;
_encoder.Bitrate = 4000;
_decoder = OpusDecoder.Create(8000, 1);
_decoder.MaxDataBytes = 175;
_bytesPerSegment = _encoder.FrameByteCount(_segmentFrames);
_waveIn = new WaveIn(WaveCallbackInfo.FunctionCallback());
_waveIn.BufferMilliseconds = 50;
_waveIn.DeviceNumber = 0;
_waveIn.DataAvailable += _waveIn_DataAvailable;
_waveIn.WaveFormat = new WaveFormat(8000, 16, 1);
_playBuffer = new BufferedWaveProvider(new WaveFormat(8000, 16, 1));
_playBuffer.DiscardOnBufferOverflow = true;
_waveOut = new WaveOut(WaveCallbackInfo.FunctionCallback());
_waveOut.DeviceNumber = 0;
_waveOut.Init(_playBuffer);
_waveOut.Play();
_waveIn.StartRecording();
if (_timer == null)
{
_timer = new Timer();
_timer.Interval = 1000;
_timer.Elapsed += _timer_Tick;
}
_timer.Start();
}
private void _timer_Tick(object sender, EventArgs e)
{
var timeDiff = DateTime.Now - _startTime;
var bytesPerSecond = _bytesSent / timeDiff.TotalSeconds;
}
byte[] _notEncodedBuffer = new byte[0];
private void _waveIn_DataAvailable(object sender, WaveInEventArgs e)
{
byte[] soundBuffer = new byte[e.BytesRecorded + _notEncodedBuffer.Length];
for (int i = 0; i < _notEncodedBuffer.Length; i++)
soundBuffer[i] = _notEncodedBuffer[i];
for (int i = 0; i < e.BytesRecorded; i++)
soundBuffer[i + _notEncodedBuffer.Length] = e.Buffer[i];
int byteCap = _bytesPerSegment;
int segmentCount = (int)Math.Floor((decimal)soundBuffer.Length / byteCap);
int segmentsEnd = segmentCount * byteCap;
int notEncodedCount = soundBuffer.Length - segmentsEnd;
_notEncodedBuffer = new byte[notEncodedCount];
for (int i = 0; i < notEncodedCount; i++)
{
_notEncodedBuffer[i] = soundBuffer[segmentsEnd + i];
}
for (int i = 0; i < segmentCount; i++)
{
byte[] segment = new byte[byteCap];
for (int j = 0; j < segment.Length; j++)
segment[j] = soundBuffer[(i * byteCap) + j];
int len;
byte[] buff = _encoder.Encode(segment, segment.Length, out len);
SendToServer(buff, len);
}
}
public void UpdateAudioOutput(byte[] ba, int len)
{
int outlen = len;
byte[] buff = new byte[len];
buff = _decoder.Decode(ba, outlen, out outlen);
_playBuffer.AddSamples(buff, 0, outlen);
}
private void SendToServer(byte[] EncodedAudio, int Length)
{
print("SENDING AUDIO");
//print("audio length : " + EncodedAudio.Length);
_client.Send(EncodedAudio, Length);
//UpdateAudioOutput(EncodedAudio, Length);
}
private void StopEncoding()
{
_timer.Stop();
_waveIn.StopRecording();
_waveIn.Dispose();
_waveIn = null;
_waveOut.Stop();
_waveOut.Dispose();
_waveOut = null;
_playBuffer = null;
_encoder.Dispose();
_encoder = null;
_decoder.Dispose();
_decoder = null;
}
private void OnApplicationQuit()
{
StopEncoding();
}
Now here is the tcp send and receive, they are pretty much same for the client and the server
public void Send(byte[] data, int customParamLen = 0)
{
if (!socketReady)
{
return;
}
byte messageType = (1 << 3); // assume that 0000 1000 would be the Message type
byte[] message = data;
byte[] length = BitConverter.GetBytes(message.Length);
byte[] customParam = BitConverter.GetBytes(customParamLen); //length also 4/sizeof(int)
byte[] buffer = new byte[sizeof(int) + message.Length + 1 + customParam.Length];
buffer[0] = messageType;
//Enter length in the buffer
for (int i = 0; i < sizeof(int); i++)
{
buffer[i + 1] = length[i];
}
//Enter data in the buffer
for (int i = 0; i < message.Length; i++)
{
buffer[i + 1 + sizeof(int)] = message[i];
}
//Enter custom Param in the buffer
for (int i = 0; i < sizeof(int); i++)
{
buffer[i + 1 + sizeof(int) + message.Length] = customParam[i];
}
heavyStream.Write(buffer, 0, buffer.Length);
print("Writtin bytes");
}
if (heavyStream.DataAvailable)
{
print("Data Receiving YAY!");
//Get message Type
byte messageType = (byte)heavyStream.ReadByte();
//Get length of the Data
byte[] lengthBuffer = new byte[sizeof(int)];
int recv = heavyStream.Read(lengthBuffer, 0, lengthBuffer.Length);
if (recv == sizeof(int))
{
int messageLen = BitConverter.ToInt32(lengthBuffer, 0);
//Get the Data
byte[] messageBuffer = new byte[messageLen];
recv = heavyStream.Read(messageBuffer, 0, messageBuffer.Length);
if (recv == messageLen)
{
// messageBuffer contains the whole message ...
//Get length paramater needed for opus to decode
byte[] customParamAudioLen = new byte[sizeof(int)];
recv = heavyStream.Read(customParamAudioLen, 0, customParamAudioLen.Length);
if (recv == sizeof(int))
{
AudioReceivers(messageBuffer, BitConverter.ToInt32(customParamAudioLen, 0) - 5);
print("Done! Everything went straight as planned");
}
}
}
Now the problem is that the audio is choppy and has gaps in them, as the time flies the more out of sync it becomes.
UPDATE
Still not fixed.
It looks like you're just sending audio straight out with no jitter buffer on the receiving end. This means if you have any variability in latency you'll start to hear gaps.
What you need to do is buffer audio on the client side - until you have a good amount, say 400ms, then start playing. That gives you a buffer of extra time to account for jitter.
This is a very naive approach, but gives you something to play with - you'll probably want to look at adaptive jitter buffers, and probably switch to UDP instead of TCP to get better performance. With UDP you will need to deal with lost packets, out of order etc.
Have a look at Speex which has a Jitter Buffer https://github.com/xiph/speex or Mumble which uses Speex for VOIP https://github.com/mumble-voip/mumble
I have function that searches for string in a large binary file and give its position to me.
How to implement a function that read that position and give me string after specific length.
As we do in String.Substring()
Here is the code I have so far.
public void example()
{
string match = "400000002532"; //This is 12 chars in hex of the string to search
byte[] matchBytes = StringToByteArray(match);
foreach (var jsFile in jsscan)
{
using (var fs = new FileStream(jsFile, FileMode.Open))
{
int i = 0;
int readByte;
while ((readByte = fs.ReadByte()) != -1)
{
if (matchBytes[i] == readByte)
{
i++;
}
else
{
i = 0;
}
if (i == matchBytes.Length)
{
Console.WriteLine("It found between {0} and {1}.",
fs.Position - matchBytes.Length, fs.Position);
break;
}
}
}
}
}
public static byte[] StringToByteArray(String hex)
{
int NumberChars = hex.Length;
byte[] bytes = new byte[NumberChars / 2];
for (int i = 0; i < NumberChars; i += 2)
bytes[i / 2] = Convert.ToByte(hex.Substring(i, 2), 16);
return bytes;
}
Example what I am searching is below
400000002532010953667A51E44BE5B6A59417B71F4B91BBE590B6AF6E84EF570C32C56400E05123B0A44AF389331E4B91B02E8980B85157F910D7238918A73012B6243F772F7B60E5A7CF6E8CB25374B8FF96311130AABD9F71A860C904C9F6AE9706E570CC0E881E997762710EDE8818CCC551BA05579D30C0D53CEBD9BAF0C2E557D7B9D37A9C94A8A9B5FA7FCF7973B0BDA88A06DE1AE357130E4A06018ABB0A1ABD818DABEB518649CF885953EE05564FD69F0E2F860175667C5FC84F1C97727CEA1C841BFA86A26BABA942E0275FAB2A8F78132E3A05404F0DCD02FD4E7CAD08B1FFD4C2184400F22F6EBC14857BCC2E2AF858BE20CBB807C3467A91E38F31901FD452B5F87F296174631980E039CAB58D97E8F91E3255DD7DEF3177D68A4943F629A70B421B1D6E53DC0D26A1B5EF7C6912F48E0842037FA72B17C18E11B93AEE4DDA0FFE6F217BD5DEB957B1C26169029DE4396103D1F89FA0856489B1958DE5C896DB8F27A24C21AC66BF2095E383DA5EC6DA7138FE82C62FDE9BEFF0308F507736F1B35B1CA083F6C96A6860889BDCCBC989E86F4FB1C483E71557369E7308450330AEF8C9A13A115E8A97642E4A0A4098F5BC04A096A22E5F97116B59AE17BCAEFD2A8B0BCB5341EC64CA3E474900D5A8A620448A6C97827C42332C4DD326572A3C5DB4DA1362F3C0012E1AA1B70C812DCCAEF74F67E94E907518CA31945DD56A61A7
If performance is not of huge concern, you could do the following, which is more easy and readable
using (var fs = new StreamReader(fileName))
{
var content = await fs.ReadToEndAsync();
var pos = content.IndexOf(matchBytes);
if (pos != -1)
{
Console.WriteLine($"Found # {pos}, {pos + matchBytes.Length}");
}
}
Assuming you know which Encoding is used to store characters in the Stream, try this function:
static string GetString(Stream stream, long position, int stringLength, Encoding encoding) {
int offset = 0;
int readByte;
byte[] buffer = new byte[stream.Length - position];
stream.Seek(position, SeekOrigin.Begin);
while ((readByte = stream.ReadByte()) != -1)
{
buffer[offset++] = (byte)readByte;
if (encoding.GetCharCount(buffer, 0, offset) == stringLength + 1)
{
return encoding.GetString(buffer, 0, offset - 1);
}
}
if (encoding.GetCharCount(buffer, 0, offset) == stringLength)
{
return encoding.GetString(buffer, 0, offset);
}
throw new Exception(string.Format("Stream doesn't contains {0} characters", stringLength));
}
For example, with your code and utf-16:
using (var fs = new FileStream(jsFile, FileMode.Open))
{
int i = 0;
int readByte;
while ((readByte = fs.ReadByte()) != -1)
{
if (matchBytes[i] == readByte)
{
i++;
}
else
{
i = 0;
}
if (i == matchBytes.Length)
{
Console.WriteLine("It found between {0} and {1}.",
fs.Position - matchBytes.Length, fs.Position);
//Desired string length in charachters
const int DESIRED_STRING_LENGTH = 5;
Console.WriteLine(GetString(fs, fs.Position, DESIRED_STRING_LENGTH, Encoding.Unicode));
break;
}
}
}
I'm trying to read from two alaw/pcm files in the following way:
byte[] dlBuffer = null;
if (dlStream != null)
{
dlStream.Position = 0;
dlBuffer = new byte[dlStream.Length+1];
int readDL = dlStream.Read(dlBuffer, 0, dlBuffer.Length);
}
byte[] ulBuffer = null;
if (ulStream != null)
{
ulStream.Position = 0;
ulBuffer = new byte[ulStream.Length+1];
int readUL = ulStream.Read(ulBuffer, 0, ulBuffer.Length);
}
And then to save the buffers to one wav file:
private const int WAVE_HEADER_SIZE = 44;
private const int WAVE_BUFFER_SIZE = 2 * 1024 * 1024; // = 2,097,152
private bool SaveBufferToWave(string wavFileName, VoiceMetadata metadata,byte[] dlBuffer,byte[] ulBuffer)
{
if ((wavFileName == null) || (wavFileName.Length == 0) || (metadata == null))
return false;
FileStream fileStream = null;
bool success = true;
try
{
byte[] waveBuffer = new byte[WAVE_BUFFER_SIZE];
int bytesToWrite = 0;
int dlRead = 0, ulRead = 0;
//If we have DL & UL write stereo wav, else write mono wave
int channels = (metadata.DLExists && metadata.ULExists) ? 2 : 1;
int samples = (int)metadata.TotalTimeMS * (VoiceMetadata.SAMPLES_PER_SECOND / 1000);
fileStream = new FileStream(wavFileName, FileMode.Create, FileAccess.ReadWrite, FileShare.None);
BinaryWriter wrt = new BinaryWriter(fileStream);
wrt.Write(GetHeader(
WAVE_HEADER_SIZE + (samples) * (channels) * sizeof(short), sizeof(short) * 8,
channels,
8000));
if (channels == 2)
{
if (dlRead == 0)
{
dlRead = dlBuffer.Length;
}
if (ulRead == 0)
{
ulRead = ulBuffer.Length;
}
if ((dlRead != 0) && (ulRead != 0))
{
//Create the stero wave buffer
Array.Clear(waveBuffer, 0, waveBuffer.Length);
for (int i = 0; i < dlRead / 2; ++i)
{
waveBuffer[i * 4 + 0] = dlBuffer[i * 2];
waveBuffer[i * 4 + 1] = dlBuffer[i * 2 + 1];
}
for (int i = 0; i < ulRead / 2; ++i)
{
waveBuffer[i * 4 + 2] = ulBuffer[i * 2];
waveBuffer[i * 4 + 3] = ulBuffer[i * 2 + 1];
}
bytesToWrite = Math.Max(ulRead * 2, dlRead * 2);
dlRead = 0;
ulRead = 0;
}
else
bytesToWrite = 0;
}
else
{
//Create the mono wave buffer
if (metadata.ULExists)
{
Buffer.BlockCopy(ulBuffer, 0, waveBuffer, 0, ulBuffer.Length);
bytesToWrite = ulBuffer.Length;
}
else if (metadata.DLExists)
{
Buffer.BlockCopy(dlBuffer, 0, waveBuffer, 0, dlBuffer.Length);
bytesToWrite = dlBuffer.Length;
}
else
{
bytesToWrite = 0;
}
}
if (bytesToWrite > 0)
fileStream.Write(waveBuffer, 0, bytesToWrite);
Logger.Debug("File {0} was saved successfully in wav format.", wavFileName);
}
catch (Exception e)
{
Logger.Error("Failed saving file {0} in wav format, Exception: {1}.", wavFileName, e);
success = false;
}
finally
{
if (fileStream != null)
fileStream.Close();
}
return success;
}
Most of the times it works fine, but sometimes I get one of this two exceptions:
Failed saving file c:\Samples\WAV\24112014-095948.283.wav in wav format, Exception: System.ArgumentException: Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection.
Failed saving file c:\Samples\WAV\24112014-100742.409.wav in wav format, Exception: System.IndexOutOfRangeException: Index was outside the bounds of the array.
What can be the problem?
I encountered a System.StackOverflowException problem when I was trying to Marshal.Copy()
Here is the screen shot of the code where exception happens:
Here is the function:
private static void ImageUpdate(IntPtr returnArray, ref int channels)
{
if (_prevTickCount == 0)
{
_sumTickCount = 0;
}
else
{
_sumTickCount = _sumTickCount * .75 + (Environment.TickCount - _prevTickCount) * .25;
}
//only copy to the buffer when pixel data is not being read
if (_pixelDataReady == false)
{
int width = 0;
int height = 0;
if (false == GetImageDimensions(ref width, ref height))
{
return;
}
_dataLength = width * height;
_colorChannels = channels;
if ((_pixelData == null) || (_pixelData.Length != (_dataLength * _colorChannels)))
{
_pixelData = new short[_dataLength * _colorChannels];
//_pixelDataHistogram = new int[_colorChannels][];
_pixelDataHistogram = new int[MAX_CHANNELS][];
if (_colorChannels == 1)
{
_pixelDataByte = new byte[_dataLength];
}
else
{
_pixelDataByte = new byte[_dataLength * 3];
}
//for (int i = 0; i < _colorChannels; i++)
for (int i = 0; i < MAX_CHANNELS; i++)
{
_pixelDataHistogram[i] = new int[PIXEL_DATA_HISTOGRAM_SIZE];
}
}
//2^n == FULL_RANGE_NORMALIZATION_FACTOR
const int SHIFT_VALUE = 6;
switch (_colorChannels)
{
case 1:
{
Marshal.Copy(returnArray, _pixelData, 0, _dataLength * _colorChannels);
//factor is derived by taking CAMERA_MAX_INTENSITY_VALUE/256
//const double FULL_RANGE_NORMALIZATION_FACTOR = 64.0;
//clear the histogram
for (int i = 0; i < PIXEL_DATA_HISTOGRAM_SIZE; i++)
{
_pixelDataHistogram[0][i] = 0;
}
for (int i = 0; i < _dataLength * _colorChannels; i++)
{
double valHist;
if (_pixelData[i] < 0)
{
valHist = (_pixelData[i] + 32768) >> SHIFT_VALUE;
}
else
{
valHist = (_pixelData[i]) >> SHIFT_VALUE;
}
_pixelDataHistogram[0][(byte)valHist]++;
}
}
break;
default:
{
Marshal.Copy(returnArray, _pixelData, 0, _dataLength * _colorChannels);
}
break;
}
_dataWidth = width;
_dataHeight = height;
_pixelDataReady = true;
ThorLog.Instance.TraceEvent(TraceEventType.Verbose, 1, "ImageUpdate pixeldata updated");
}
else
{
ThorLog.Instance.TraceEvent(TraceEventType.Verbose, 1, "ImageUpdate pixeldata not ready");
}
_prevTickCount = Environment.TickCount;
}
The whole idea is to copy image buffer from native code. This exception occurs only when image size is large 4K X 4K, but I dont have a problem processing a size below that.
I have no idea how I should correct this. Anyone care to educate? Thanks!
I traced it down, eventually, if was the returnArray which is not newed large enough to cause this problem.
I'm trying to convert a WAV file(PCM,48kHz, 4-Channel, 16 bit) into mono-channel WAV files.
I tried splittiing the WAV file into 4 byte-arrays like this answer and created a WaveMemoryStream like shown below but does not work.
byte[] chan1ByteArray = new byte[channel1Buffer.Length];
Buffer.BlockCopy(channel1Buffer, 0, chan1ByteArray, 0, chan1ByteArray.Length);
WaveMemoryStream chan1 = new WaveMemoryStream(chan1ByteArray, sampleRate, (ushort)bitsPerSample, 1);
Am I missing something in creating the WAVE headers ? Or is there more to splitting a
WAV into mono channel WAV files ?
The basic idea is that the source wave file contains the samples interleaved. One for the first channel, one for the second, and so on. Here's some untested example code to give you an idea of how to do this.
var reader = new WaveFileReader("fourchannel.wav");
var buffer = new byte[2 * reader.WaveFormat.SampleRate * reader.WaveFormat.Channels];
var writers = new WaveFileWriter[reader.WaveFormat.Channels];
for (int n = 0; n < writers.Length; n++)
{
var format = new WaveFormat(reader.WaveFormat.SampleRate,16,1);
writers[n] = new WaveFileWriter(String.Format("channel{0}.wav",n+1), format);
}
int bytesRead;
while((bytesRead = reader.Read(buffer,0, buffer.Length)) > 0)
{
int offset= 0;
while (offset < bytesRead)
{
for (int n = 0; n < writers.Length; n++)
{
// write one sample
writers[n].Write(buffer,offset,2);
offset += 2;
}
}
}
for (int n = 0; n < writers.Length; n++)
{
writers[n].Dispose();
}
reader.Dispose();
Based on Mark Heath's answer, I struggled with a 32 bit floating WAV containing 32 channels and managed to get it working by simplifying his proposal. I would guess this peace code also works for a four-channel audio WAV file.
var reader = new WaveFileReader("thirtytwochannels.wav");
var writers = new WaveFileWriter[reader.WaveFormat.Channels];
for (int n = 0; n < writers.Length; n++)
{
var format = new WaveFormat(reader.WaveFormat.SampleRate, 16, 1);
writers[n] = new WaveFileWriter(string.Format($"channel{n + 1}.wav"), format);
}
float[] buffer;
while ((buffer = reader.ReadNextSampleFrame())?.Length > 0)
{
for(int i = 0; i < buffer.Length; i++)
{
// write one sample for each channel (i is the channelNumber)
writers[i].WriteSample(buffer[i]);
}
}
for (int n = 0; n < writers.Length; n++)
{
writers[n].Dispose();
}
reader.Dispose();
Here is a method I used, you can set the output mono format, e.g BitsPerSample, SampleRate
using NAudio.Wave;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
namespace DataScraper.TranscriptionCenter
{
public class MP3ToWave
{
/// <summary>
/// Converts an MP3 file to distinct wav files, using NAudio
/// They are saved in the same directory as the MP3 file
/// </summary>
/// <param name="MP3FileIn">The MP3 file</param>
/// <returns>Returns the WAV files</returns>
public static string[] MP3FilesToTranscriptionWaveFiles(string MP3FileIn)
{
FileInfo MP3FileInfo = new FileInfo(MP3FileIn);
if (MP3FileInfo.Exists == false)
throw new Exception("File does not exist? " + MP3FileIn);
Mp3FileReader readerMP3 = null;
WaveStream PCMStream = null;
WaveFileReader readerWAV = null;
List<string> ListFilesOut = null;
WaveFileWriter[] FileWriters = null;
MemoryStream TempStream = null;
WaveFormatConversionStream WaveFormatConversionStream_ = null;
WaveFormat SaveWaveFormatMono = new WaveFormat((16 * 1000),
16,
1);
try
{
readerMP3 = new Mp3FileReader(MP3FileInfo.FullName);
PCMStream = WaveFormatConversionStream.CreatePcmStream(readerMP3);
WaveFormatConversionStream_ = new WaveFormatConversionStream(new WaveFormat(SaveWaveFormatMono.SampleRate,
SaveWaveFormatMono.BitsPerSample,
PCMStream.WaveFormat.Channels),
PCMStream);
//Each filepath, each channel
ListFilesOut = new List<string> (WaveFormatConversionStream_.WaveFormat.Channels);
//Each is a wav file out
for (int index = 0; index < WaveFormatConversionStream_.WaveFormat.Channels; index++)
{
ListFilesOut.Add(MP3FileInfo.Directory.FullName + "\\" + Path.GetFileNameWithoutExtension(MP3FileInfo.Name) + "_" + index.ToString() + ".wav");
}
//Initialize the writers
FileWriters = new WaveFileWriter[WaveFormatConversionStream_.WaveFormat.Channels];
for (int index = 0; index < WaveFormatConversionStream_.WaveFormat.Channels; index++)
{
FileWriters[index] = new WaveFileWriter(ListFilesOut[index], SaveWaveFormatMono);
}
TempStream = new MemoryStream(int.Parse("" + WaveFormatConversionStream_.Length));
WaveFileWriter NewWriter = new WaveFileWriter(TempStream, WaveFormatConversionStream_.WaveFormat);
byte[] BUFFER = new byte[1024];
int ReadLength = WaveFormatConversionStream_.Read(BUFFER, 0, BUFFER.Length);
while (ReadLength != -1 && ReadLength > 0)
{
NewWriter.Write(BUFFER, 0, ReadLength);
ReadLength = WaveFormatConversionStream_.Read(BUFFER, 0, BUFFER.Length);
}
NewWriter.Flush();
TempStream.Position = 0;
readerWAV = new WaveFileReader(TempStream);
float[] buffer = readerWAV.ReadNextSampleFrame();
while(buffer != null && buffer.Length > 0)
{
for(int i = 0; i < buffer.Length; i++)
{
FileWriters[i].WriteSample(buffer[i]);
}
buffer = readerWAV.ReadNextSampleFrame();
}
}
catch (Exception em1)
{
throw em1;
}
finally
{
try
{
//Flush each writer and close
for (int writercount = 0; writercount < FileWriters.Length; writercount++)
{
FileWriters[writercount].Flush();
FileWriters[writercount].Close();
FileWriters[writercount].Dispose();
}
}
catch
{
}
try { readerWAV.Dispose(); readerWAV = null; }
catch { }
try { WaveFormatConversionStream_.Dispose(); WaveFormatConversionStream_ = null; }
catch { }
try { PCMStream.Dispose(); PCMStream = null; }
catch { }
try { readerMP3.Dispose(); readerMP3 = null; }
catch { }
try
{
TempStream.Close(); TempStream.Dispose();
}
catch
{
}
}
return ListFilesOut.ToArray();
}
}
}