Trouble making a 1bpp PCX from a 1bpp bitmap - c#

I have this class that worked for 8 bpp bitmaps to pcx, but I am having trouble making it work for 1 bpp.
The image starts off like this:
But the resulting PCX is black after line 27, as seen in IrfanView:
Can anyone help me spot the glaring error?
Usage by the way is Pcx.SavePCX("AOut.pcx", new Bitmap("A.bmp"));
using System;
using System.Diagnostics;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using System.Runtime.InteropServices;
namespace Imaging
{
public sealed class Pcx
{
private static void WriteWord(Stream stream, int data)
{
stream.WriteByte((byte) (data & 0xFF));
stream.WriteByte((byte) ((data >> 8) & 0xFF));
}
public static void SavePCX(Stream pcxStream, Bitmap bmp)
{
if (bmp.PixelFormat != PixelFormat.Format1bppIndexed)
{
throw new Exception("Can only PCX bitmaps that are 1bpp indexed");
}
var data = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.ReadOnly, bmp.PixelFormat);
try
{
{
//header
pcxStream.WriteByte(10); // char manufacturer;
pcxStream.WriteByte(5); //char version;
pcxStream.WriteByte(1); //char encoding;
pcxStream.WriteByte(1); // char bpp;
pcxStream.WriteByte(0);
pcxStream.WriteByte(0); //char xmin[2];
pcxStream.WriteByte(0);
pcxStream.WriteByte(0); //char ymin[2];
WriteWord(pcxStream, bmp.Width - 1); // char xmax[2];
WriteWord(pcxStream, bmp.Height - 1); //char ymax[2];
WriteWord(pcxStream, 72); //word(pcx->hdpi, 72);
WriteWord(pcxStream, 72); // word(pcx->vdpi, 72);
for (var i = 0; i < 16*3; i++) //4bpp palette
{
pcxStream.WriteByte(0);
}
pcxStream.WriteByte(0); // pcx->res = 0;
pcxStream.WriteByte(1); // pcx->nplanes = 1;
WriteWord(pcxStream, bmp.Width); // word(pcx->bytesperline, width / 2);
WriteWord(pcxStream, 0); //word(pcx->palletteinfo, 0);
WriteWord(pcxStream, 0); //word(pcx->hscrn, 0);
WriteWord(pcxStream, 0); //word(pcx->vscrn, 0);
for (var i = 0; i < 54; i++) //memset(pcx->filler, 0, 54);
{
pcxStream.WriteByte(0);
}
} //end of header
{
//read all bytes to an array
var baseLine = data.Scan0;
// Declare an array to hold the bytes of the bitmap.
var byteLength = bmp.Width*bmp.Height;
var bytes = new byte[byteLength];
// Copy the RGB values into the array.
for (var y = 0; y < data.Height; y++)
{
var lineOffset = y*data.Stride;
Debug.WriteLine("Y={0}, Offset={1}", y, lineOffset);
for (var x = 0; x < data.Width; x++)
{
bytes[y*bmp.Width + x] = Marshal.ReadByte(baseLine, lineOffset + x);
}
}
var baseIdx = 0;
var end = byteLength;
var run = 0;
var ldata = -1;
byte ld;
while (baseIdx < end)
{
//if it matches, increase the run by 1 up to max of 63
if ((bytes[baseIdx] == ldata) && (run < 63)) run++;
else
{
//write data
if (run != 0) //not first run
{
ld = (byte) ldata;
if ((run > 1) || (ld >= 0xC0)) pcxStream.WriteByte((byte) (0xC0 | run));
pcxStream.WriteByte(ld);
}
run = 1;
}
ldata = bytes[baseIdx];
baseIdx++;
}
ld = (byte) ((ldata >> 4) | (ldata << 4));
if ((run > 1) || (ld >= 0xC0)) pcxStream.WriteByte((byte) (0xC0 | run));
pcxStream.WriteByte(ld);
}
}
finally
{
bmp.UnlockBits(data);
}
}
public static void SavePCX(string fileName, Bitmap bbp1Bmp)
{
using (var fstest = new FileStream(fileName, FileMode.Create, FileAccess.Write))
{
SavePCX(fstest, bbp1Bmp);
}
}
}
}

Solved it by using data.Stride for the bytes per line, and reading in the bytes from the 1bpp using stride as the width, not width.
public static void SavePCX(Stream pcxStream, Bitmap bmp)
{
if (bmp.PixelFormat != PixelFormat.Format1bppIndexed)
{
throw new Exception("Can only PCX bitmaps that are 1bpp indexed");
}
var data = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.ReadOnly, bmp.PixelFormat);
try
{
{
//header
pcxStream.WriteByte(10); // char manufacturer;
pcxStream.WriteByte(5); //char version;
pcxStream.WriteByte(1); //char encoding;
pcxStream.WriteByte(1); // char bpp;
pcxStream.WriteByte(0);
pcxStream.WriteByte(0); //char xmin[2];
pcxStream.WriteByte(0);
pcxStream.WriteByte(0); //char ymin[2];
WriteWord(pcxStream, bmp.Width - 1); // char xmax[2];
WriteWord(pcxStream, bmp.Height - 1); //char ymax[2];
WriteWord(pcxStream, 72); //word(pcx->hdpi, 72);
WriteWord(pcxStream, 72); // word(pcx->vdpi, 72);
for (var i = 0; i < 16*3; i++) //4bpp palette
{
pcxStream.WriteByte(0);
}
pcxStream.WriteByte(0); // pcx->res = 0;
pcxStream.WriteByte(1); // pcx->nplanes = 1;
WriteWord(pcxStream, data.Stride); // word(pcx->bytesperline, width / 2);
WriteWord(pcxStream, 0); //word(pcx->palletteinfo, 0);
WriteWord(pcxStream, 0); //word(pcx->hscrn, 0);
WriteWord(pcxStream, 0); //word(pcx->vscrn, 0);
for (var i = 0; i < 54; i++) //memset(pcx->filler, 0, 54);
{
pcxStream.WriteByte(0);
}
} //end of header
{
//read all bytes to an array
var baseLine = data.Scan0;
// Declare an array to hold the bytes of the bitmap.
var byteLength = data.Stride*data.Height;
var bytes = new byte[byteLength];
// Copy the RGB values into the array.
for (var y = 0; y < data.Height; y++)
{
var lineOffset = y*data.Stride;
Debug.WriteLine("Y={0}, Offset={1}", y, lineOffset);
for (var x = 0; x < data.Stride; x++)
{
bytes[y*data.Stride + x] = Marshal.ReadByte(baseLine, lineOffset + x);
}
}
var baseIdx = 0;
var end = byteLength;
var run = 0;
var ldata = -1;
byte ld;
while (baseIdx < end)
{
//if it matches, increase the run by 1 up to max of 63
if ((bytes[baseIdx] == ldata) && (run < 63)) run++;
else
{
//write data
if (run != 0) //not first run
{
ld = (byte) ldata;
if ((run > 1) || (ld >= 0xC0)) pcxStream.WriteByte((byte) (0xC0 | run));
pcxStream.WriteByte(ld);
}
run = 1;
}
ldata = bytes[baseIdx];
baseIdx++;
}
ld = (byte) ((ldata >> 4) | (ldata << 4));
if ((run > 1) || (ld >= 0xC0)) pcxStream.WriteByte((byte) (0xC0 | run));
pcxStream.WriteByte(ld);
}
}
finally
{
bmp.UnlockBits(data);
}
}

Related

Get frequency spectrum lines of a .wav file C#

I want to display the frequency spectrum of a .wav file.
I don’t know how to do it right. I use C# with naudio nuget to handle the audio data.
When I Insert all spec_data points I only have one-col filled with data. (Picture 1)
what it looks like now (Picture 1)
how it should look like at the end
How can I get all spectrum points of the audio file?
var buffer = new byte[fft_size];
int bytes_read = fileStream.Read(buffer, 0, buffer.Length);
int BYTES_PER_POINT = fileStream.WaveFormat.BitsPerSample / 8; //8Bit = 1Byte
short[] values = new short[buffer.Length / BYTES_PER_POINT];
for (int n = 0; n < BYTES_PER_POINT; n++)
{
for (int i = 0; i < bytes_read; i += BYTES_PER_POINT)
{
values[i / BYTES_PER_POINT] = (short)((buffer[i + 1] << 8) | buffer[i + 0]);
}
}
neanalizir_values.AddRange(values);
short[] data = new short[fft_size];
data = neanalizir_values.GetRange(0, fft_size).ToArray();
spec_data.RemoveAt(0);
List<double> new_data = new List<double>();
Complex[] fft_buffer = new Complex[fft_size];
for (int i = 0; i < fft_size; i++)
{
fft_buffer[i].X = (float)(neanalizir_values[i] * FastFourierTransform.HammingWindow(i, fft_size));
fft_buffer[i].Y = 0;
}
FastFourierTransform.FFT(true, (int)Math.Log(fft_size, 2.0), fft_buffer);
for (int i = 0; i < spec_data[spec_data.Count - 1].Count; i++)
{
double val;
val = (double)fft_buffer[i].X + (double)fft_buffer[i].Y;
val = Math.Abs(val);
new_data.Add(val);
}
new_data.Reverse();
spec_data.Insert(spec_data.Count, new_data);
neanalizir_values.RemoveRange(0, fft_size / pixelsPerBuffer);
Bitmap bitmap = new Bitmap(spec_data.Count, spec_data[0].Count, PixelFormat.Format8bppIndexed);
ColorPalette pal = bitmap.Palette;
for (int i = 0; i < 256; i++)
pal.Entries[i] = Color.FromArgb(255, i, i, i);
bitmap.Palette = pal;
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadOnly, bitmap.PixelFormat);
byte[] pixels = new byte[bitmapData.Stride * bitmap.Height];
for (int col = 0; col < spec_data.Count; col++)
{
double scaleFactor = 40;
for (int row = 0; row < spec_data[col].Count; row++)
{
int bytePosition = row * bitmapData.Stride + col;
double pixelVal = spec_data[col][row] * scaleFactor;
pixelVal = Math.Max(0, pixelVal);
pixelVal = Math.Min(255, pixelVal);
pixels[bytePosition] = (byte)(pixelVal);
}
}
Marshal.Copy(pixels, 0, bitmapData.Scan0, pixels.Length);
bitmap.UnlockBits(bitmapData);
pictureBox1.Image = bitmap;

Convert 2 Alaw stream to 1 Stereo wav file

I have 2 alaw streams that i'm trying to convert to a stereo wav file.
The result sounds bad, I can hear the content but there is an annoying squicking sound that doesn't exist in the original files.
The code I'm doing:
For the wav header:
public static byte[] GetHeader(int fileLength, int bitsPerSample, int numberOfChannels, int sampleRate) // bitsPerSample = 8, numberOfChannels= 1 or 2 (mono or stereo) , sampleRate = 8000
{
MemoryStream memStream = new MemoryStream(WAVE_HEADER_SIZE);
BinaryWriter writer = new BinaryWriter(memStream);
writer.Write(Encoding.ASCII.GetBytes("RIFF"), 0, 4); //4 bytes of RIFF description
writer.Write((fileLength - 8) / 2);
writer.Write(Encoding.ASCII.GetBytes("WAVE"), 0, 4);
writer.Write(Encoding.ASCII.GetBytes("fmt "), 0, 4);
writer.Write(16); //Chunk size
writer.Write((short)6); //wFormatTag (1 == uncompressed PCM, 6 = A law compression)
writer.Write((short)numberOfChannels); //nChannels
writer.Write(sampleRate); //nSamplesPerSec
int blockAllign = bitsPerSample / 8 * numberOfChannels;
writer.Write(sampleRate * blockAllign); //nAvgBytesPerSec
writer.Write((short)blockAllign); //nBlockAlign
writer.Write((short)bitsPerSample); //wBitsPerSample
writer.Write(Encoding.ASCII.GetBytes("data"), 0, 4);
writer.Write(fileLength / 2 - WAVE_HEADER_SIZE); //data length
writer.Flush();
byte[] buffer = new byte[memStream.Length];
memStream.Position = 0;
memStream.Read(buffer, 0, buffer.Length);
writer.Close();
return buffer;
}
The data (the original streams are read from alaw files):
do
{
if (channels == 2)
{
if (dlRead == 0)
{
dlRead = dlStream.Read (dlBuffer, 0, dlBuffer.Length);
}
if (ulRead == 0)
{
ulRead =ulStream.Read (ulBuffer, 0, ulBuffer.Length);
}
if ((dlRead != 0) && (ulRead != 0))
{
//Create the stero wave buffer
Array.Clear(waveBuffer, 0, waveBuffer.Length);
for (int i = 0; i < dlRead / 2; ++i)
{
waveBuffer[i * 4 + 0] = dlBuffer[i * 2];
waveBuffer[i * 4 + 1] = dlBuffer[i * 2 + 1];
}
for (int i = 0; i < ulRead / 2; ++i)
{
waveBuffer[i * 4 + 2] = ulBuffer[i * 2];
waveBuffer[i * 4 + 3] = ulBuffer[i * 2 + 1];
}
bytesToWrite = Math.Max(ulRead * 2, dlRead * 2);
dlRead = 0;
ulRead = 0;
}
else
bytesToWrite = 0;
}
else
{
//Create the mono wave buffer
if (metadata.ULExists)
{
bytesToWrite = ulStream.Read(ulBuffer, 0, ulBuffer.Length);
Buffer.BlockCopy(ulBuffer, 0, waveBuffer, 0, ulBuffer.Length);
}
else if (metadata.DLExists)
{
bytesToWrite = dlStream.Read(dlBuffer, 0, dlBuffer.Length);
Buffer.BlockCopy(dlBuffer, 0, waveBuffer, 0, dlBuffer.Length);
}
else
{
bytesToWrite = 0;
}
}
if (bytesToWrite > 0)
fileStream.Write(waveBuffer, 0, bytesToWrite);
} while (bytesToWrite > 0);
What am I doing wrong? Any ideas?

Converting a multi-band 16-bit tiff image to an 8-bit tiff image

I got some pixel data from 16-bit(range 0-65535) tif image as an integer array. I got the value using gdal readraster. How do I convert them to 8-bit(0-225) and convert it (the array) to 8-bit tif image ?
Here is some of my code :
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OSGeo.GDAL;
using OSGeo.OSR;
namespace ConsoleApplication4
{
class Program
{
static void Main(string[] args)
{
Gdal.AllRegister();
Dataset data1;
int xsize, ysize;
int bandsize;
data1 = Gdal.Open("F:\\po_1473547_bgrn_0000000.tif", Access.GA_ReadOnly);
bandsize = data1.RasterCount;
xsize = data1.RasterXSize; //cols
ysize = data1.RasterYSize; //rows
Console.WriteLine("cols : "+xsize+", rows : "+ysize);
Band[] bands = new Band[bandsize];
for (int i = 0; i < bandsize; i++) {
bands[i] = data1.GetRasterBand(i+1);
}
int[,,] pixel = new int[bandsize,xsize,ysize];
int[] pixtemp = new int[xsize * ysize];
for (int i = 0; i < bandsize; i++)
{
bands[i].ReadRaster(0, 0, xsize, ysize, pixtemp, xsize, ysize, 0, 0);
for (int j = 0; j < xsize; j++)
{
for (int k = 0; k < ysize; k++)
{
pixel[i,j,k] = pixtemp[j + k * xsize];
}
}
}
Console.WriteLine("");
for (int i = 0; i < bandsize; i++)
{
Console.WriteLine("some pixel from band " + (i+1));
for (int j = 0; j < 100; j++)
{
Console.Write(" " + pixel[i,100,j]);
}
Console.WriteLine("\n\n");
}
}
}
}
I was searching Google on how to do that but I only found how to do that if the data type is a byte. Someone please give me a hint.
I don't know about GEO Tiff format, but to convert a regular 16 bit tiff image file to an 8 bit one, you need to scale the 16 bit channel values to 8 bits. The example below shows how this can be achieved for gray scale images.
public static class TiffConverter
{
private static IEnumerable<BitmapSource> Load16BitTiff(Stream source)
{
var decoder = new TiffBitmapDecoder(source, BitmapCreateOptions.PreservePixelFormat, BitmapCacheOption.Default);
for (int i = 0; i < decoder.Frames.Count; i++)
// return all frames that are present in the input.
yield return decoder.Frames[i];
}
private static BitmapSource NormalizeTiffTo8BitImage(BitmapSource source)
{
// allocate buffer & copy image bytes.
var rawStride = source.PixelWidth * source.Format.BitsPerPixel / 8;
var rawImage = new byte[rawStride * source.PixelHeight];
source.CopyPixels(rawImage, rawStride, 0);
// get both max values of first & second byte of pixel as scaling bounds.
var max1 = 0;
int max2 = 1;
for (int i = 0; i < rawImage.Length; i++)
{
if ((i & 1) == 0)
{
if (rawImage[i] > max1)
max1 = rawImage[i];
}
else if (rawImage[i] > max2)
max2 = rawImage[i];
}
// determine normalization factors.
var normFactor = max2 == 0 ? 0.0d : 128.0d / max2;
var factor = max1 > 0 ? 255.0d / max1 : 0.0d;
max2 = Math.Max(max2, 1);
// normalize each pixel to output buffer.
var buffer8Bit = new byte[rawImage.Length / 2];
for (int src = 0, dst = 0; src < rawImage.Length; dst++)
{
int value16 = rawImage[src++];
double value8 = ((value16 * factor) / max2) - normFactor;
if (rawImage[src] > 0)
{
int b = rawImage[src] << 8;
value8 = ((value16 + b) / max2) - normFactor;
}
buffer8Bit[dst] = (byte)Math.Min(255, Math.Max(value8, 0));
src++;
}
// return new bitmap source.
return BitmapSource.Create(
source.PixelWidth, source.PixelHeight,
source.DpiX, source.DpiY,
PixelFormats.Gray8, BitmapPalettes.Gray256,
buffer8Bit, rawStride / 2);
}
private static void SaveTo(IEnumerable<BitmapSource> src, string fileName)
{
using (var stream = File.Create(fileName))
{
var encoder = new TiffBitmapEncoder();
foreach (var bms in src)
encoder.Frames.Add(BitmapFrame.Create(bms));
encoder.Save(stream);
}
}
public static void Convert(string inputFileName, string outputFileName)
{
using (var inputStream = File.OpenRead(inputFileName))
SaveTo(Load16BitTiff(inputStream).Select(NormalizeTiffTo8BitImage), outputFileName);
}
}
Usage:
TiffConverter.Convert(#"c:\temp\16bit.tif", #"c:\temp\8bit.tif");
Interpolate pixels from 16 bit to 8 bit, some resampling methods could perform.
Linear Interpolation may help.
//Convert tiff from 16-bit to 8-bit
byte[,,] ConvertBytes(int[,,] pixel, bandsize, xsize, ysize)
{
byte[,,] trgPixel = new byte[bandsize,xsize,ysize];
for (int i = 0; i < bandsize; i++)
{
for (int j = 0; j < xsize; j++)
{
for (int k = 0; k < ysize; k++)
{
//Linear Interpolation
trgPixel[i,j,k] = (byte)((65535-pixel[i,j,k])/65536.0*256);
}
}
}
return trgPixel;
}
//Save 8-bit tiff to file
void SaveBytesToTiff(string destPath, byte[,,] pixel, bandsize, xsize, ysize)
{
string fileformat = "GTiff";
Driver dr = Gdal.getDriverByName(fileformat);
Dataset newDs = dr.Create(destPath, xsize, ysize, bandsize, DateType.GDT_Byte, null);
for(int i=0; i< bandsize;i++)
{
byte[] buffer = new byte[xsize * ysize];
for (int j = 0; j < xsize; j++)
{
for (int k = 0; k < ysize; k++)
{
buffer[j+k*xsize] = pixel[i,j,k];
}
}
newDs.WriteRaster(0, 0, xsize, ysize, buffer, xsize, ysize, i+1, null, 0, 0, 0);
newDs.FlushCache();
}
newDs.Dispose();
}

Edit Wav File Samples for Increasing Amplitude using c#

I have the following c# code which reads a wave file into a byte array and then writes it into another array, creating a new wave file. I found it online and dont have much knowledge about it.
I want to change the values being written into the new array so that my amplitude is increased by let's say a value 'x' for the new wave file being generated.. Where and what should be the changes made?
private void ReadWavfiles(string fileName)
{
byte[] fa = File.ReadAllBytes(fileName);
int startByte = 0;
// look for data header
for (var x = 0; x < fa.Length; x++)
if (fa[x] == 'd' && fa[x + 1] == 'a' && fa[x + 2] == 't' && fa[x + 3] == 'a')
{
startByte = x + 8;
break;
}
var buff = new byte[fa.Length / 2];
var y = 0;
var length = fa.Length;
for (int s = startByte; s < length; s = s + 2)
buff[y++] = (byte)(fa[s + 1] * 0x100 + fa[s]);
write(buff, "D:\\as1.wav", "D:\\us1.wav");
WaveOut obj = new WaveOut();
obj.Init(new WaveFileReader("D:\\as1.wav"));
obj.Play();
}
private void write(byte[] buffer, string fileOut, string fileIn)
{
WaveFileReader reader = new WaveFileReader(fileIn);
int numChannels = reader.WaveFormat.Channels, sampleRate = reader.WaveFormat.SampleRate;
int BUFFER_SIZE = 1024 * 16;
using (WaveFileWriter writer = new WaveFileWriter(fileOut, new WaveFormat(sampleRate, 16, numChannels)))
{
int bytesRead;
while (true)
{
bytesRead = reader.Read(buffer, 0, BUFFER_SIZE);
if (bytesRead != 0)
writer.Write(buffer, 0, bytesRead);
else break;
}
writer.Close();
}
}

Float to 16bit, Stereo. Improve?

So i am converting Float 32bit, to 16Bit in Stereo. And as i don´t fully understand it myself, it´s pretty much copy paste sadly.
But i wonder if it can be improved, in either quality or speed?
Not that any of them are terrible or anything.
void SendWaloop(object sender, NAudio.Wave.WaveInEventArgs e)
{
byte[] newArray16Bit = new byte[e.BytesRecorded / 2];
short two;
float value;
for (int i = 0, j = 0; i < e.BytesRecorded; i += 4, j += 2)
{
value = (BitConverter.ToSingle(e.Buffer, i));
two = (short)(value * short.MaxValue);
newArray16Bit[j] = (byte)(two & 0xFF);
newArray16Bit[j + 1] = (byte)((two >> 8) & 0xFF);
}
if (connect == true && MuteMic.Checked == false)
{
udpSend.Send(newArray16Bit, newArray16Bit.Length, otherPartyIP.Address.ToString(), 1500);
}
}
So well, it´s converting the buffer from 32bit to 16bit, and send´s it with UDP, nothing weird.
Though for me this looks very complex, but from what i understand, it´s just removing every 4 byte or something like that.
EDIT:
unsafe
{
byte[] newArray16Bit = new byte[e.BytesRecorded / 2];
fixed (byte* sourcePtr = e.Buffer)
fixed (byte* targetPtr = newArray16Bit)
{
float* sourceTyped = (float*)sourcePtr;
short* targetTyped = (short*)targetPtr;
int count = e.BytesRecorded / 4;
for (int i = 0; i < count; i++)
{
targetTyped[i] = (short)(sourceTyped[i] * short.MaxValue);
}
}
if (connect == true && MuteMic.Checked == false)
{
udpSend.Send(newArray16Bit, newArray16Bit.Length, otherPartyIP.Address.ToString(), 1500);
}
}
}
It would need testing, but I would probably try with some unsafe:
fixed(byte* sourcePtr = e.Buffer)
fixed(byte* targetPtr = newArray16Bit)
{
float* sourceTyped = (float*)sourcePtr;
short* targetTyped = (short*)targetPtr;
int count = e.BytesRecorded / 4;
for(int i = 0 ; i < count ; i++)
{
targetTyped[i] = (short)(sourceTyped[i] * short.MaxValue);
}
}
To show that working identically:
using System;
static class Program
{
static void Main()
{
byte[] raw1 = new byte[64 * 1024];
new Random(12345).NextBytes(raw1); // 64k of random data
var raw2 = (byte[])raw1.Clone(); // just to rule out corruption
var result1 = OriginalImplFromTopPost(raw1, raw1.Length - 20);
var result2 = MyImpl(raw2, raw2.Length - 20);
bool areSame = Convert.ToBase64String(result1) == Convert.ToBase64String(result2);
Console.WriteLine(areSame); // True
}
public static unsafe byte[] MyImpl(byte[] source, int byteCount)
{
byte[] newArray16Bit = new byte[byteCount / 2];
fixed (byte* sourcePtr = source)
fixed (byte* targetPtr = newArray16Bit)
{
float* sourceTyped = (float*)sourcePtr;
short* targetTyped = (short*)targetPtr;
int count = byteCount / 4;
for (int i = 0; i < count; i++)
{
targetTyped[i] = (short)(sourceTyped[i] * short.MaxValue);
}
}
return newArray16Bit;
}
public static byte[] OriginalImplFromTopPost(byte[] source, int byteCount)
{
byte[] newArray16Bit = new byte[byteCount / 2];
short two;
float value;
for (int i = 0, j = 0; i < byteCount; i += 4, j += 2)
{
value = (BitConverter.ToSingle(source, i));
two = (short)(value * short.MaxValue);
newArray16Bit[j] = (byte)(two & 0xFF);
newArray16Bit[j + 1] = (byte)((two >> 8) & 0xFF);
}
return newArray16Bit;
}
}

Categories