c# Pitch shift of wave files - c#

I'm currently trying to do pitch shifting of a wave file using this algorithm
https://sites.google.com/site/mikescoderama/pitch-shifting
Here my code which use the above implementation, but with no luck. The outputted wave file seems to be corrupted or not valid.
The code is quite simple, except for the pitch shift algorithm :)
It load a wave file, it reads the wave file data and put it in a
byte[] array.
Then it "normalize" bytes data into -1.0f to 1.0f format (as
requested by the creator of the pitch shift algorithm).
It applies the pitch shift algorithm and then convert back the
normalized data into a bytes[] array.
Finally saves a wave file with the same header of the original wave
file and the pitch shifted data.
Am I missing something?
static void Main(string[] args)
{
// Read the wave file data bytes
byte[] waveheader = null;
byte[] wavedata = null;
using (BinaryReader reader = new BinaryReader(File.OpenRead("sound.wav")))
{
// Read first 44 bytes (header);
waveheader= reader.ReadBytes(44);
// Read data
wavedata = reader.ReadBytes((int)reader.BaseStream.Length - 44);
}
short nChannels = BitConverter.ToInt16(waveheader, 22);
int sampleRate = BitConverter.ToInt32(waveheader, 24);
short bitRate = BitConverter.ToInt16(waveheader, 34);
// Normalized data store. Store values in the format -1.0 to 1.0
float[] in_data = new float[wavedata.Length / 2];
// Normalize wave data into -1.0 to 1.0 values
using(BinaryReader reader = new BinaryReader(new MemoryStream(wavedata)))
{
for (int i = 0; i < in_data.Length; i++)
{
if(bitRate == 16)
in_data[i] = reader.ReadInt16() / 32768f;
if (bitRate == 8)
in_data[i] = (reader.ReadByte() - 128) / 128f;
}
}
//PitchShifter.PitchShift(1f, in_data.Length, (long)1024, (long)32, sampleRate, in_data);
// Backup wave data
byte[] copydata = new byte[wavedata.Length];
Array.Copy(wavedata, copydata, wavedata.Length);
// Revert data to byte format
Array.Clear(wavedata, 0, wavedata.Length);
using (BinaryWriter writer = new BinaryWriter(new MemoryStream(wavedata)))
{
for (int i = 0; i < in_data.Length; i++)
{
if(bitRate == 16)
writer.Write((short)(in_data[i] * 32768f));
if (bitRate == 8)
writer.Write((byte)((in_data[i] * 128f) + 128));
}
}
// Compare new wavedata with copydata
if (wavedata.SequenceEqual(copydata))
{
Console.WriteLine("Data has no changes");
}
else
{
Console.WriteLine("Data has changed!");
}
// Save modified wavedata
string targetFilePath = "sound_low.wav";
if (File.Exists(targetFilePath))
File.Delete(targetFilePath);
using (BinaryWriter writer = new BinaryWriter(File.OpenWrite(targetFilePath)))
{
writer.Write(waveheader);
writer.Write(wavedata);
}
Console.ReadLine();
}

The algorithm here works fine
https://sites.google.com/site/mikescoderama/pitch-shifting
My mistake was on how i was reading the wave header and wave data. I post here the fully working code
WARNING: this code works only for PCM 16 bit (stereo/mono) waves. Can be easily adapted to works with PCM 8 bit.
static void Main(string[] args)
{
// Read header, data and channels as separated data
// Normalized data stores. Store values in the format -1.0 to 1.0
byte[] waveheader = null;
byte[] wavedata = null;
int sampleRate = 0;
float[] in_data_l = null;
float[] in_data_r = null;
GetWaveData("sound.wav", out waveheader, out wavedata, out sampleRate, out in_data_l, out in_data_r);
//
// Apply Pitch Shifting
//
if(in_data_l != null)
PitchShifter.PitchShift(2f, in_data_l.Length, (long)1024, (long)10, sampleRate, in_data_l);
if(in_data_r != null)
PitchShifter.PitchShift(2f, in_data_r.Length, (long)1024, (long)10, sampleRate, in_data_r);
//
// Time to save the processed data
//
// Backup wave data
byte[] copydata = new byte[wavedata.Length];
Array.Copy(wavedata, copydata, wavedata.Length);
GetWaveData(in_data_l, in_data_r, ref wavedata);
//
// Check if data actually changed
//
bool noChanges = true;
for (int i = 0; i < wavedata.Length; i++)
{
if (wavedata[i] != copydata[i])
{
noChanges = false;
Console.WriteLine("Data has changed!");
break;
}
}
if(noChanges)
Console.WriteLine("Data has no changes");
// Save modified wavedata
string targetFilePath = "sound_low.wav";
if (File.Exists(targetFilePath))
File.Delete(targetFilePath);
using (BinaryWriter writer = new BinaryWriter(File.OpenWrite(targetFilePath)))
{
writer.Write(waveheader);
writer.Write(wavedata);
}
Console.ReadLine();
}
// Returns left and right float arrays. 'right' will be null if sound is mono.
public static void GetWaveData(string filename, out byte[] header, out byte[] data, out int sampleRate, out float[] left, out float[] right)
{
byte[] wav = File.ReadAllBytes(filename);
// Determine if mono or stereo
int channels = wav[22]; // Forget byte 23 as 99.999% of WAVs are 1 or 2 channels
// Get sample rate
sampleRate = BitConverter.ToInt32(wav, 24);
int pos = 12;
// Keep iterating until we find the data chunk (i.e. 64 61 74 61 ...... (i.e. 100 97 116 97 in decimal))
while(!(wav[pos]==100 && wav[pos+1]==97 && wav[pos+2]==116 && wav[pos+3]==97)) {
pos += 4;
int chunkSize = wav[pos] + wav[pos + 1] * 256 + wav[pos + 2] * 65536 + wav[pos + 3] * 16777216;
pos += 4 + chunkSize;
}
pos += 4;
int subchunk2Size = BitConverter.ToInt32(wav, pos);
pos += 4;
// Pos is now positioned to start of actual sound data.
int samples = subchunk2Size / 2; // 2 bytes per sample (16 bit sound mono)
if (channels == 2)
samples /= 2; // 4 bytes per sample (16 bit stereo)
// Allocate memory (right will be null if only mono sound)
left = new float[samples];
if (channels == 2)
right = new float[samples];
else
right = null;
header = new byte[pos];
Array.Copy(wav, header, pos);
data = new byte[subchunk2Size];
Array.Copy(wav, pos, data, 0, subchunk2Size);
// Write to float array/s:
int i=0;
while (pos < subchunk2Size)
{
left[i] = BytesToNormalized_16(wav[pos], wav[pos + 1]);
pos += 2;
if (channels == 2)
{
right[i] = BytesToNormalized_16(wav[pos], wav[pos + 1]);
pos += 2;
}
i++;
}
}
// Return byte data from left and right float data. Ignore right when sound is mono
public static void GetWaveData(float[] left, float[] right, ref byte[] data)
{
// Calculate k
// This value will be used to convert float to Int16
// We are not using Int16.Max to avoid peaks due to overflow conversions
float k = (float)Int16.MaxValue / left.Select(x => Math.Abs(x)).Max();
// Revert data to byte format
Array.Clear(data, 0, data.Length);
int dataLenght = left.Length;
int byteId = -1;
using (BinaryWriter writer = new BinaryWriter(new MemoryStream(data)))
{
for (int i = 0; i < dataLenght; i++)
{
byte byte1 = 0;
byte byte2 = 0;
byteId++;
NormalizedToBytes_16(left[i], k, out byte1, out byte2);
writer.Write(byte1);
writer.Write(byte2);
if (right != null)
{
byteId++;
NormalizedToBytes_16(right[i], k, out byte1, out byte2);
writer.Write(byte1);
writer.Write(byte2);
}
}
}
}
// Convert two bytes to one double in the range -1 to 1
static float BytesToNormalized_16(byte firstByte, byte secondByte)
{
// convert two bytes to one short (little endian)
short s = (short)((secondByte << 8) | firstByte);
// convert to range from -1 to (just below) 1
return s / 32678f;
}
// Convert a float value into two bytes (use k as conversion value and not Int16.MaxValue to avoid peaks)
static void NormalizedToBytes_16(float value, float k, out byte firstByte, out byte secondByte)
{
short s = (short)(value * k);
firstByte = (byte)(s & 0x00FF);
secondByte = (byte)(s >> 8);
}

sorry to revive this but I tried that pitchshifter class and, while it works, I get crackles in the audio while pitching down(0.5f). You work out a way around that?

Related

ANT+ FE-C Writing User Configuration data Page 55 (0x37) to Smart Trainer - no change in resistance noticed

I am trying to write user configuration data (Data Page 55, 0x37) to a smart fitness device (bicycle trainer) via FE-C over Bluetooth. Although using different values or using the max values I do not notice any change in resistance. Is there a mistake processing the data? The second method (private static byte[] CreateFECUserConfiguration()) returns a byte array which will be written to the device.
private async Task WriteUserConfiguration(GattCharacteristic characteristic)
{
DataWriter writer = new DataWriter();
byte[] bytes = CreateFECUserConfiguration();
writer.WriteBytes(bytes);
var valResult = await characteristic.WriteValueAsync(writer.DetachBuffer());
if (valResult == GattCommunicationStatus.Success)
{
Debug.WriteLine("Write UserConfiguration Successful");
}
}
// create values for testing, will be provided by the user later on
private static byte[] CreateFECUserConfiguration()
{
byte[] bytes = new byte[13]; // size of message
UInt16 userWeight = (ushort)(655.34 / 0.01); // 0-655.34
byte[] userWeightBytes = BitConverter.GetBytes(userWeight);
byte bicycleWheelDiameterOffset = 10; // 0-10, 0.5 byte
UInt16 bicycleWeight = 50 * 20; // 0 – 50, * 20, 1.5 byte
// start merging bicycle wheel diameter offset and bicycle weight + putting them in the right order
byte[] tempWheelDiameterOffset = new byte[1] { bicycleWheelDiameterOffset };
BitArray bicycleWheelDiameterOffsetBits = new BitArray(tempWheelDiameterOffset);
byte[] testbicycleWeightBytes = BitConverter.GetBytes(bicycleWeight);
BitArray testBicycleWeight = new BitArray(testbicycleWeightBytes);
bool[] tempBicycleWeightPartTwo = new bool[8] { testBicycleWeight[4], testBicycleWeight[5], testBicycleWeight[6], testBicycleWeight[7], testBicycleWeight[8], testBicycleWeight[9], testBicycleWeight[10], testBicycleWeight[11] };
BitArray bicycleWeightBitsTwo = new BitArray(tempBicycleWeightPartTwo);
bool[] mergeBitsAsBools = new bool[8] { testBicycleWeight[0], testBicycleWeight[1], testBicycleWeight[2], testBicycleWeight[3], bicycleWheelDiameterOffsetBits[0], bicycleWheelDiameterOffsetBits[1], bicycleWheelDiameterOffsetBits[2], bicycleWheelDiameterOffsetBits[3] };
BitArray tempMergeWheelDiameterOffsetPlusBicycleWeight = new BitArray(mergeBitsAsBools);
byte[] wheelDiameterOffsetPlusBicycleWeight = new byte[1];
byte[] bicycleWeightByteTwo = new byte[1];
tempMergeWheelDiameterOffsetPlusBicycleWeight.CopyTo(wheelDiameterOffsetPlusBicycleWeight, 0);
bicycleWeightBitsTwo.CopyTo(bicycleWeightByteTwo, 0);
//end merging
byte bicycleWheelDiameter = (byte)(0.5 / 0.01); // 0 – 2.54m
byte gearRatio = (byte)(1 / 0.03); // 0.03 – 7.65
bytes[0] = 0xA4;
bytes[1] = 0x09; // lenght
bytes[2] = 0x4F; // message type
bytes[3] = 0x05; // channel
bytes[4] = 0x37; // Page Number 55
bytes[5] = userWeightBytes[0]; // User Weight LSB
bytes[6] = userWeightBytes[1]; // User Weight MSB
bytes[7] = 0xFF; // Reserved for future use
bytes[8] = wheelDiameterOffsetPlusBicycleWeight[0]; // Bicycle Wheel Diameter Offset 0,5 byte + Bicycle Weight LSN (probably typo in documentation -> LSB?) 0,5 byte
bytes[9] = bicycleWeightByteTwo[0]; // BicycleWeight MSB
bytes[10] = bicycleWheelDiameter; // Bicycle Wheel Diameter
bytes[11] = gearRatio; // Gear Ration
bytes[12] = ComputeChecksum(bytes); // Method to calcute checksum
return bytes;
}
Ant+ FE-C Data Page 55 (0x3)
Ant+ message format structure

C# - Padding image bytes with white bytes to fill 512 x 512

I'm using Digital Persona SDK to scan fingerprints in wsq format, for requeriment I need 512 x 512 image, the SDK only export 357 x 392 image.
The sdk provide a method to compress captured image from device in wsq format and return a byte array that I can write to disk.
-I've tried to allocate a buffer of 262144 for 512 x 512 image.
-Fill the new buffer with white pixel data each byte to value 255.
-Copy the original image buffer into the new image buffer. The original image doesn’t need to be centered but it's important to make sure to copy without corrupting the image data.
To summarize I've tried to copy the old image into the upper right corner of the new image.
DPUruNet.Compression.Start();
DPUruNet.Compression.SetWsqBitrate(95, 0);
Fid capturedImage = captureResult.Data;
//Fill the new buffer with white pixel data each byte to value 255.
byte[] bytesWSQ512 = new byte[262144];
for (int i = 0; i < bytesWSQ512.Length; i++)
{
bytesWSQ512[i] = 255;
}
//Compress capturedImage and get bytes (357 x 392)
byte[] bytesWSQ = DPUruNet.Compression.CompressRaw(capturedImage.Views[0].Width, capturedImage.Views[0].Height, 500, 8, capturedImage.Views[0].RawImage, CompressionAlgorithm.COMPRESSION_WSQ_NIST);
//Copy the original image buffer into the new image buffer
for (int i = 0; i < capturedImage.Views[0].Height; i++)
{
for (int j = 0; j < capturedImage.Views[0].Width; j++)
{
bytesWSQ512[i * bytesWSQ512.Length + j ] = bytesWSQ[i * capturedImage.Views[0].Width + j];
}
}
//Write bytes to disk
File.WriteAllBytes(#"C:\Users\Admin\Desktop\bytesWSQ512.wsq", bytesWSQ512);
DPUruNet.Compression.Finish();
When running that snippet I get IndexOutOfRangeException, I don't know if the loop or the calculation of indexes for new array are right.
Here is a representation of what I'm trying to do.
If someone is trying to achieve something like this or padding a raw image, I hope this will help.
DPUruNet.Compression.
DPUruNet.Compression.SetWsqBitrate(75, 0);
Fid ISOFid = captureResult.Data;
byte[] paddedImage = PadImage8BPP(captureResult.Data.Views[0].RawImage, captureResult.Data.Views[0].Width, captureResult.Data.Views[0].Height, 512, 512, 255);
byte[] bytesWSQ512 = Compression.CompressRaw(512, 512, 500, 8, paddedImage, CompressionAlgorithm.COMPRESSION_WSQ_NIST);
And the method to resize (pad) the image is:
public byte[] PadImage8BPP(byte[] original, int original_width, int original_height, int desired_width, int desired_height, byte pad_color)
{
byte[] canvas_8bpp = new byte[desired_width * desired_height];
for (int i = 0; i < canvas_8bpp.Length; i++)
canvas_8bpp[i] = pad_color; //Fill background. Note this type of fill will fail histogram checks.
int clamp_y_begin = 0;
int clamp_y_end = original_height;
int clamp_x_begin = 0;
int clamp_x_end = original_width;
int pad_y = 0;
int pad_x = 0;
if (original_height > desired_height)
{
int crop_distance = (int)Math.Ceiling((original_height - desired_height) / 2.0);
clamp_y_begin = crop_distance;
clamp_y_end = original_height - crop_distance;
}
else
{
pad_y = (desired_height - original_height) / 2;
}
if (original_width > desired_width)
{
int crop_distance = (int)Math.Ceiling((original_width - desired_width) / 2.0);
clamp_x_begin = crop_distance;
clamp_x_end = original_width - crop_distance;
}
else
{
pad_x = (desired_width - original_width) / 2;
}
//We traverse the captured image (either whole image or subset)
for (int y = clamp_y_begin; y < clamp_y_end; y++)
{
for (int x = clamp_x_begin; x < clamp_x_end; x++)
{
byte image_pixel = original[y * original_width + x];
canvas_8bpp[(pad_y + y - clamp_y_begin) * desired_width + pad_x + x - clamp_x_begin] = image_pixel;
}
}
return canvas_8bpp;
}

Save audio stream float frames as WAV with C#

I am testing an application in C# that receives a live audio stream and then saves it to a WAV file. The audio stream has these characteristics: frequency or sampling rate: 16000, channels: 1, frame Samples Per Channel: 320, play Delay in Ms: 200. The audio frames come as floats, and I am collecting the float frames and storing them into a Memorystream with Binarywriter. After that, I convert the content of the Memorystream into an array, and that array then is converted to a Float array again. With the float array, I start the process to assemble the WAV file.
I have compared the float frames values received with the ones inside the float array that I am using to build the WAV file and are the same. I am having trouble processing the float array to assemble the WAV file. I am not sure if I am doing the data conversion wrong with the ConvertAndWrite() method, or if the WAV header is not well formatted according to the characteristics of the audio stream.
I can see the WAV file being created, but there is no content inside apart from the header I think. Any guidance will be much appreciated. I put together this sample code for you to test what I am doing:
using System;
using System.IO;
using System.Text;
class SaveAudioStreamToWav
{
//Sample as received from stream. Here as a double to avoid altering the sample adding F to each value.
public double[] receivedStreamSample = { 0, -0.003509521, -0.003356934, 0.0002746582, -0.004516602, -0.0027771, -0.0003967285, -0.001739502, 0.004150391, 0.0008544922, 0.002593994, 0.00970459, 0.003631592, 0.001800537, 0.004760742, 0.004272461, -0.002655029, -0.001495361, -0.006835938, -0.004211426, -0.0008239746, 0.001525879, 0.006347656, 0.002532959, -0.002471924, -0.001342773, 0.001159668, 0.0006713867, -0.000793457, 0.001403809, -0.0006713867, -0.0006713867, -0.0007629395, 0.0009460449, -0.003662109, 0.00390625, -0.001312256, -0.001678467, 0.002288818, -0.001831055, -0.00579834, 0.001220703, -0.005096436, -0.003631592, -0.007019043, -0.0001220703, -0.0008850098, -0.0001220703, -0.005371094, 0.004608154, 0.004425049, 0.0027771, 0.005279541, 0.0001525879, 0.0009765625, 0.004150391, -0.002807617, 0.001678467, -0.004577637, -0.002685547, -0.004364014, -0.0008544922, 0.001281738, -0.0009155273, -0.008148193, -0.001983643, 9.155273E-05, 0.0008239746, 0.0004272461, 0.002807617, -0.00289917, 0.002075195, 0.008392334, 0.003479004, 0.005615234, 0.0009460449, 0.002471924, 0.0004272461, -0.006164551, 0.0003967285, -0.0007629395, -0.007476807, -0.002532959, 0.01495361, 0.01382446, 0.002288818, -0.009063721, -0.1129761, -0.05401611, 0.03497314, -0.03027344, 0.08999634, 0.01831055, 0.01037598, 0.03302002, 0.02667236, 0.04309082, -0.01806641, -0.0440979, 0.07125854, 0.00680542, -0.01242065, 0.001983643, -0.03710938, 0.009552002, 0.01013184, 0.002258301, 0.007446289, 0.004486084, -0.009063721, -0.007293701, 0.008239746, -0.0003967285, 0.001556396, 0.001586914, 0.002258301, 0.001281738, 0.001617432, -0.001831055, 0.001556396, -0.001525879, -0.002410889, 0.004516602, 0.000793457, -0.001403809, -0.004882813, -0.0005187988, -0.003540039, -0.004302979, 0.0004272461, 0.004974365, -0.002868652, -0.003875732, -0.0001220703, 0.001617432, 0.002258301, -0.005889893, -0.001068115, 0.003295898, 0.002410889, -0.00201416, 0.001068115, 0.003143311, -0.001464844, 0.000579834, 0.005310059, 0.001434326, 0.001403809, 0.001312256, -0.001617432, 0.0009460449, -0.0009765625, -0.0007324219, -0.001617432, -0.004730225, 0.001373291, -0.001586914, 0.0005187988, 0.001556396, -0.001647949, 0.0008544922, 0.001739502, 0.0027771, 0.001831055, 3.051758E-05, -0.04672241, 0.02276611, 0.02529907, -0.005249023, -0.02285767, -0.0378418, -0.1454468, 0.04385376, -0.04058838, -0.005249023, -3.051758E-05, -0.02166748, -0.006378174, -0.002380371, -0.0368042, 0.04330444, -0.008453369, 0.0300293, -0.01651001, -0.005554199, -0.01828003, 0.008972168, -0.01571655, -0.01202393, 0.01141357, -0.003997803, 0.004119873, -0.002532959, 0.004333496, -0.001495361, -0.001281738, -0.003692627, -0.001647949, -0.001861572, 0.000793457, -0.0003662109, -0.002532959, -0.001342773, 0.0003051758, 0.002075195, 0.002349854, 0.001464844, 0.001678467, -0.0008850098, -0.0001525879, 0.003723145, -0.0009155273, 0.002807617, -0.005157471, -0.001617432, 0.002471924, 0.002166748, -0.0003356934, 0.000213623, -0.000793457, -0.0008544922, -0.00100708, 0.000213623, 0.001037598, -0.003448486, 0.0009460449, -0.0006103516, -0.002655029, -0.009735107, -0.01101685, 0.01937866, 0.00994873, -0.02600098, 0.04592896, 0.1063843, 0.002441406, -0.0100708, 0.002990723, -0.01235962, -0.003448486, 0.01089478, -0.01480103, -0.02902222, 0.02990723, -0.01376343, 0.01275635, -0.008666992, 0.006469727, -0.009857178, 0.002655029, -0.0004882813, 0.003814697, 0.004943848, -0.002990723, -0.0003051758, -0.001678467, 0.003265381, 0.0009460449, -9.155273E-05, -0.001403809, 0.001739502, -0.002685547, -0.0009460449, -0.001281738, 0.0009765625, 0.001312256, 0.002288818, -0.0002746582, -0.001098633, -0.002319336, -0.000793457, 0.001464844, 0.001281738, -0.002319336, 6.103516E-05, 0.0003967285, -0.002532959, 0.0002441406, 0.001861572, 0.0009765625 };
public float[] floatsArray;
public FileStream fileStream;
static void Main(string[] args)
{
var saveAudioStreamToWav = new SaveAudioStreamToWav();
saveAudioStreamToWav.ConvertDoubleToFloat();
saveAudioStreamToWav.CreateEmpty(saveAudioStreamToWav.SetNameAndPath());
saveAudioStreamToWav.ConvertAndWrite();
saveAudioStreamToWav.WriteHeader();
}
public void ConvertDoubleToFloat()
{
floatsArray = new float[receivedStreamSample.Length];
floatsArray = Array.ConvertAll(receivedStreamSample, x => (float)x);
}
public string SetNameAndPath()
{
//Setting the name of the file
string timeStamp = DateTime.Now.ToString("yyyyMMddHHmmssfff");
string filename = "/TestSavingStreamToWav_" + timeStamp + ".wav";
string path = Directory.GetCurrentDirectory();
string filepath = path + filename;
Console.WriteLine(filepath);
return filepath;
}
public void CreateEmpty(string filepath)
{
const int HEADER_SIZE = 44;
fileStream = new FileStream(filepath, FileMode.CreateNew, FileAccess.ReadWrite);
byte emptyByte = new byte();
for (int i = 0; i < HEADER_SIZE; i++) //preparing an empty space for the header
{
fileStream.WriteByte(emptyByte);
}
}
public void ConvertAndWrite()
{
Int16[] intData = new Int16[floatsArray.Length];
Byte[] bytesData = new Byte[floatsArray.Length * 2]; // bytesData array is twice the size of floatsArray array because a float converted in Int16 is 2 bytes.
const float rescaleFactor = 32767; //to convert float to Int16
for (var i = 0; i < floatsArray.Length; i++)
{
intData[i] = (short)(floatsArray[i] * rescaleFactor);
var byteArr = new Byte[2];
byteArr = BitConverter.GetBytes(intData[i]);
byteArr.CopyTo(bytesData, i * 2);
}
fileStream.Write(bytesData, 0, bytesData.Length);
}
public void WriteHeader()
{
int hz = 16000; //frequency or sampling rate
int headerSize = 44; //default for uncompressed wav
fileStream.Seek(0, SeekOrigin.Begin);
Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF"); //RIFF marker. Marks the file as a riff file. Characters are each 1 byte long.
fileStream.Write(riff, 0, 4);
Byte[] chunkSize = BitConverter.GetBytes(fileStream.Length - 8); //file-size (equals file-size - 8). Size of the overall file - 8 bytes, in bytes (32-bit integer). Typically, you'd fill this in after creation.
fileStream.Write(chunkSize, 0, 4);
Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE"); //File Type Header. For our purposes, it always equals "WAVE".
fileStream.Write(wave, 0, 4);
Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt "); //Mark the format section. Format chunk marker. Includes trailing null.
fileStream.Write(fmt, 0, 4);
Byte[] subChunk1 = BitConverter.GetBytes(16); //Length of format data. Always 16.
fileStream.Write(subChunk1, 0, 4);
UInt16 two = 2;
UInt16 one = 1;
Byte[] audioFormat = BitConverter.GetBytes(one); //Type of format (1 is PCM, other number means compression) . 2 byte integer. Wave type PCM
fileStream.Write(audioFormat, 0, 2);
Byte[] numChannels = BitConverter.GetBytes(one); //Number of Channels - 2 byte integer
fileStream.Write(numChannels, 0, 2);
Byte[] sampleRate = BitConverter.GetBytes(hz); //Sample Rate - 32 byte integer. Sample Rate = Number of Samples per second, or Hertz.
fileStream.Write(sampleRate, 0, 4);
Byte[] byteRate = BitConverter.GetBytes(hz * 2 * 1);// sampleRate * bytesPerSample * number of channels, here 16000*2*1.
fileStream.Write(byteRate, 0, 4);
UInt16 blockAlign = (ushort)(1 * 2); //channels * bytesPerSample, here 1 * 2 // Bytes Per Sample: 1=8 bit Mono, 2 = 8 bit Stereo or 16 bit Mono, 4 = 16 bit Stereo
fileStream.Write(BitConverter.GetBytes(blockAlign), 0, 2);
UInt16 sixteen = 16;
Byte[] bitsPerSample = BitConverter.GetBytes(sixteen); //Bits per sample (BitsPerSample * Channels) ?? should be 8???
fileStream.Write(bitsPerSample, 0, 2);
Byte[] dataString = System.Text.Encoding.UTF8.GetBytes("data"); //"data" chunk header. Marks the beginning of the data section.
fileStream.Write(dataString, 0, 4);
Byte[] subChunk2 = BitConverter.GetBytes(fileStream.Length - headerSize); //Size of the data section. data-size (equals file-size - 44). or NumSamples * NumChannels * bytesPerSample ??
fileStream.Write(subChunk2, 0, 4);
fileStream.Close();
}
}//end of class
I have updated your code into an extension method.
The idea was so you could append you data to a stream, like a file stream or memory stream, obviously this won't work for non seekable streams. So you could probably add error checking and validation.
I think I got the header right after looking at the specs, it seems to play at least. Note this is not really cross platform because of the endianness.
I'm not really sure what the rescaleFactor however I'll have to trust you there.
However, you should be able to modify this to accept data in different formats.
Lastly, I am updating the header at the end of the append, you could probably do this separately, i.e keep adding to the stream and then update it once when finished, add pepper and salt to taste.
Usage
using (var stream = new FileStream(GetFileName(), FileMode.OpenOrCreate, FileAccess.ReadWrite))
{
stream.AppendWaveData(receivedStreamSample);
}
Extension
public static class BinaryWriterExtensions
{
private const int HeaderSize = 44;
private const int Hz = 16000; //frequency or sampling rate
private const float RescaleFactor = 32767; //to convert float to Int16
public static void AppendWaveData<T>(this T stream, float[] buffer)
where T : Stream
{
if (stream.Length > HeaderSize)
{
stream.Seek(0, SeekOrigin.End);
}
else
{
stream.SetLength(HeaderSize);
stream.Position = HeaderSize;
}
// rescale
var floats = Array.ConvertAll(buffer, x => (short)(x * RescaleFactor));
// Copy to bytes
var result = new byte[floats.Length * sizeof(short)];
Buffer.BlockCopy(floats, 0, result, 0, result.Length);
// write to stream
stream.Write(result, 0, result.Length);
// Update Header
UpdateHeader(stream);
}
public static void UpdateHeader(Stream stream)
{
var writer = new BinaryWriter(stream);
writer.Seek(0, SeekOrigin.Begin);
writer.Write(Encoding.ASCII.GetBytes("RIFF")); //RIFF marker. Marks the file as a riff file. Characters are each 1 byte long.
writer.Write((int)(writer.BaseStream.Length - 8)); //file-size (equals file-size - 8). Size of the overall file - 8 bytes, in bytes (32-bit integer). Typically, you'd fill this in after creation.
writer.Write(Encoding.ASCII.GetBytes("WAVE")); //File Type Header. For our purposes, it always equals "WAVE".
writer.Write(Encoding.ASCII.GetBytes("fmt ")); //Mark the format section. Format chunk marker. Includes trailing null.
writer.Write(16); //Length of format data. Always 16.
writer.Write((short)1); //Type of format (1 is PCM, other number means compression) . 2 byte integer. Wave type PCM
writer.Write((short)2); //Number of Channels - 2 byte integer
writer.Write(Hz); //Sample Rate - 32 byte integer. Sample Rate = Number of Samples per second, or Hertz.
writer.Write(Hz * 2 * 1); // sampleRate * bytesPerSample * number of channels, here 16000*2*1.
writer.Write((short)(1 * 2)); //channels * bytesPerSample, here 1 * 2 // Bytes Per Sample: 1=8 bit Mono, 2 = 8 bit Stereo or 16 bit Mono, 4 = 16 bit Stereo
writer.Write((short)16); //Bits per sample (BitsPerSample * Channels) ?? should be 8???
writer.Write(Encoding.ASCII.GetBytes("data")); //"data" chunk header. Marks the beginning of the data section.
writer.Write((int)(writer.BaseStream.Length - HeaderSize)); //Size of the data section. data-size (equals file-size - 44). or NumSamples * NumChannels * bytesPerSample ??
}
} //end of class
change
writer.Write((short)2); into writer.Write((short)1);
and the generated file(.wav) will be plays well both on Windows(test against on windows 7) and android device.
otherwise,the Windows Media Player will says:having trouble playing files;android will plays with a quickly speed than expected.

How to split a wav file on WP8?

Is there a way to split a wav file in C# WP8? I'd like to save a sample of a wav file, starting at, for example, 00:30 and ending at 00:40.
Maybe using some kind of stream or buffer, but then I'd have to know when to start/finish copying the stream to another wav file.
How can I do this?
on my blog I posted about this, starting with this post (http://csharp-tricks-en.blogspot.de/2011/03/read-in-wave-files.html) aboud reading a wave file with the next 2 posts building up on this .. hope that helps!
Best
Oliver
Since a wav file's header can vary from 44 (standard) to over 100 bytes, you'll need to first determine the exact size of the header and also read important meta information before you can start chopping up your wav file(s).
So you need to know the following meta data about your wav file(s),
Sample rate,
Bit depth,
Channel count,
Audio data format (whether samples are PCM or Floating-Point)
Header size.
Before continuing, I'd recommend reading the format of a wav file header first so you'll have a better idea of what the code is doing.
So first we need to get our meta info,
private void ReadMetaData(FileStream stream, out bool isFloatinfPoint, out int channelCount, out int sampleRate, out int bitDepth, out int headerSize)
{
var headerBytes = new byte[200];
// Read header bytes.
stream.Position = 0;
stream.Read(headerBytes, 0, 200);
headerSize = new string(Encoding.ASCII.GetChars(headerBytes)).IndexOf("data") + 8;
isFloatinfPoint = BitConverter.ToUInt16(new byte[] { headerBytes[20], headerBytes[21] }, 0) == 3 ? true : false;
channelCount = BitConverter.ToUInt16(new byte[] { headerBytes[22] , headerBytes[23] }, 0);
sampleRate = (int)BitConverter.ToUInt32(new byte[] { headerBytes[24], headerBytes[25], headerBytes[26], headerBytes[27] }, 0);
bitDepth = BitConverter.ToUInt16(new byte[] { headerBytes[34], headerBytes[35] }, 0);
}
Once we have this data we can then calculate where we need to start and stop reading our file. To calculate the start and end indexes we do,
var startIndex = (int)(start.TotalSeconds * sampleRate * byteDepth * channelCount);
var endIndex = (int)(end.TotalSeconds * sampleRate * byteDepth * channelCount);
start & end would be a TimeSpan indicating when to start and stop cropping.
We can now read the bytes from our file using our newly calculated info, if you're using a FileStream you would do the following,
var newBytes = new byte[endIndex - startIndex];
myStream.Position = headerSize + startIndex; // Add headerSize to position to make sure we don't read the header.
myStream.Read(newBytes, 0, newBytes.Length);
The all you have to do is write a wav header to the destination file along with the newly extracted audio. So, putting this altogether you should end up with something like this,
private void CropWavFile(string inputFilePath, string outputFilePath, TimeSpan start, TimeSpan end)
{
var stream = new FileStream(inputFilePath, FileMode.Open);
var newStream = new FileStream(outputFilePath, FileMode.OpenOrCreate);
var isFloatingPoint = false;
var sampleRate = 0;
var bitDepth = 0;
var channelCount = 0;
var headerSize = 0;
// Get meta info
ReadMetaData(stream, out isFloatingPoint, out channelCount, out sampleRate, out bitDepth, out headerSize);
// Calculate where we need to start and stop reading.
var startIndex = (int)(start.TotalSeconds * sampleRate * (bitDepth / 8) * channelCount);
var endIndex = (int)(end.TotalSeconds * sampleRate * (bitDepth / 8) * channelCount);
var bytesCount = endIndex - startIndex;
var newBytes = new byte[bytesCount];
// Read audio data.
stream.Position = startIndex + headerSize;
stream.Read(newBytes, 0, bytesCount);
// Write the wav header and our newly extracted audio to the new wav file.
WriteMetaData(newStream, isFloatingPoint, (ushort)channelCount, (ushort)bitDepth, sampleRate, newBytes.Length / (bitDepth / 8));
newStream.Write(newBytes, 0, newBytes.Length);
stream.Dispose();
newStream.Dispose();
}
private void WriteMetaData(FileStream stream, bool isFloatingPoint, ushort channels, ushort bitDepth, int sampleRate, int totalSampleCount)
{
stream.Position = 0;
// RIFF header.
// Chunk ID.
stream.Write(Encoding.ASCII.GetBytes("RIFF"), 0, 4);
// Chunk size.
stream.Write(BitConverter.GetBytes(((bitDepth / 8) * totalSampleCount) + 36), 0, 4);
// Format.
stream.Write(Encoding.ASCII.GetBytes("WAVE"), 0, 4);
// Sub-chunk 1.
// Sub-chunk 1 ID.
stream.Write(Encoding.ASCII.GetBytes("fmt "), 0, 4);
// Sub-chunk 1 size.
stream.Write(BitConverter.GetBytes(16), 0, 4);
// Audio format (floating point (3) or PCM (1)). Any other format indicates compression.
stream.Write(BitConverter.GetBytes((ushort)(isFloatingPoint ? 3 : 1)), 0, 2);
// Channels.
stream.Write(BitConverter.GetBytes(channels), 0, 2);
// Sample rate.
stream.Write(BitConverter.GetBytes(sampleRate), 0, 4);
// Bytes rate.
stream.Write(BitConverter.GetBytes(sampleRate * channels * (bitDepth / 8)), 0, 4);
// Block align.
stream.Write(BitConverter.GetBytes((ushort)channels * (bitDepth / 8)), 0, 2);
// Bits per sample.
stream.Write(BitConverter.GetBytes(bitDepth), 0, 2);
// Sub-chunk 2.
// Sub-chunk 2 ID.
stream.Write(Encoding.ASCII.GetBytes("data"), 0, 4);
// Sub-chunk 2 size.
stream.Write(BitConverter.GetBytes((bitDepth / 8) * totalSampleCount), 0, 4);
}
private void ReadMetaData(FileStream stream, out bool isFloatinfPoint, out int channelCount, out int sampleRate, out int bitDepth, out int headerSize)
{
var headerBytes = new byte[200];
// Read header bytes.
stream.Position = 0;
stream.Read(headerBytes, 0, 200);
headerSize = new string(Encoding.ASCII.GetChars(headerBytes)).IndexOf("data") + 8;
isFloatinfPoint = BitConverter.ToUInt16(new byte[] { headerBytes[20], headerBytes[21] }, 0) == 3 ? true : false;
channelCount = BitConverter.ToUInt16(new byte[] { headerBytes[22] , headerBytes[23] }, 0);
sampleRate = (int)BitConverter.ToUInt32(new byte[] { headerBytes[24], headerBytes[25], headerBytes[26], headerBytes[27] }, 0);
bitDepth = BitConverter.ToUInt16(new byte[] { headerBytes[34], headerBytes[35] }, 0);
}

Windows Phone Encoding and Decoding audio using NSpeex. Having issue with decoding?

I am trying to encode a recorded audio using Nspeex and then transfer it over internet and decode on the other end. I am doing all this in Windows Phone 7/8. To encode and decode I am using following code. But while decoding I am not getting the result back correctly which I can play again. Can anyone provide me with encoding and decoding code which runs on WP7/8 recorded audio:
private static Microphone mic = Microphone.Default;
private static byte[] EncodeSpeech(byte[] buf, int len)
{
BandMode mode = GetBandMode(mic.SampleRate);
SpeexEncoder encoder = new SpeexEncoder(mode);
// set encoding quality to lowest (which will generate the smallest size in the fastest time)
encoder.Quality = 1;
int inDataSize = len / 2;
// convert to short array
short[] data = new short[inDataSize];
int sampleIndex = 0;
for (int index = 0; index < len; index += 2, sampleIndex++)
{
data[sampleIndex] = BitConverter.ToInt16(buf, index);
}
// note: the number of samples per frame must be a multiple of encoder.FrameSize
inDataSize = inDataSize - inDataSize % encoder.FrameSize;
var encodedData = new byte[len];
int encodedBytes = encoder.Encode(data, 0, inDataSize, encodedData, 0, len);
if (encodedBytes != 0)
{
// each chunk is laid out as follows:
// | 4-byte total chunk size | 4-byte encoded buffer size | <encoded-bytes> |
byte[] inDataSizeBuf = BitConverter.GetBytes(inDataSize);
byte[] sizeBuf = BitConverter.GetBytes(encodedBytes + inDataSizeBuf.Length);
byte[] returnBuf = new byte[encodedBytes + sizeBuf.Length + inDataSizeBuf.Length];
sizeBuf.CopyTo(returnBuf, 0);
inDataSizeBuf.CopyTo(returnBuf, sizeBuf.Length);
Array.Copy(encodedData, 0, returnBuf, sizeBuf.Length + inDataSizeBuf.Length, encodedBytes);
return returnBuf;
}
else
return buf;
}
private byte[] DecodeSpeech(byte[] buf)
{
BandMode mode = GetBandMode(mic.SampleRate);
SpeexDecoder decoder = new SpeexDecoder(mode);
byte[] inDataSizeBuf = new byte[4];
byte[] sizeBuf = new byte[4];
byte[] encodedBuf = new byte[buf.Length - 8];
Array.Copy(buf, 0, sizeBuf, 0, 4);
Array.Copy(buf, 4, inDataSizeBuf, 0, 4);
Array.Copy(buf, 8, encodedBuf, 0, buf.Length - 8);
int inDataSize = BitConverter.ToInt32(inDataSizeBuf, 0);
int size = BitConverter.ToInt32(sizeBuf, 0);
short[] decodedBuf = new short[inDataSize];
int decodedSize = decoder.Decode(encodedBuf, 0, encodedBuf.Length, decodedBuf, 0, false);
byte[] returnBuf = new byte[inDataSize * 2];
for (int index = 0; index < decodedBuf.Length; index++)
{
byte[] temp = BitConverter.GetBytes(decodedBuf[index]);
Array.Copy(temp, 0, returnBuf, index * 2, 2);
}
return returnBuf;
}
private static BandMode GetBandMode(int sampleRate)
{
if (sampleRate <= 8000)
return BandMode.Narrow;
if (sampleRate <= 16000)
return BandMode.Wide;
return BandMode.UltraWide;
}
I think your problem may be that you are newing up a new SpeexEncoder every time you want to encode audio. You should try making that a member for your class and re-use it.
I looked at the code for Nspeex I noticed that SpeexEncoder uses NbEncoder for the narrow band. In that class it looks like it keeps a history of some previous audio data in order perform the encoding. This should mean that the output for different instances of encoders would not go together.
private static Microphone mic = Microphone.Default;
private static SpeexEncoder encoder = CreateEncoder();
private static SpeexEncoder CreateEncoder()
{
BandMode mode = GetBandMode(mic.SampleRate);
SpeexEncoder encoder = new SpeexEncoder(mode);
// set encoding quality to lowest (which will generate the smallest size in the fastest time)
encoder.Quality = 1;
return encoder;
}
private static byte[] EncodeSpeech(byte[] buf, int len)
{
int inDataSize = len / 2;
...

Categories