Get frequency spectrum lines of a .wav file C# - c#

I want to display the frequency spectrum of a .wav file.
I don’t know how to do it right. I use C# with naudio nuget to handle the audio data.
When I Insert all spec_data points I only have one-col filled with data. (Picture 1)
what it looks like now (Picture 1)
how it should look like at the end
How can I get all spectrum points of the audio file?
var buffer = new byte[fft_size];
int bytes_read = fileStream.Read(buffer, 0, buffer.Length);
int BYTES_PER_POINT = fileStream.WaveFormat.BitsPerSample / 8; //8Bit = 1Byte
short[] values = new short[buffer.Length / BYTES_PER_POINT];
for (int n = 0; n < BYTES_PER_POINT; n++)
{
for (int i = 0; i < bytes_read; i += BYTES_PER_POINT)
{
values[i / BYTES_PER_POINT] = (short)((buffer[i + 1] << 8) | buffer[i + 0]);
}
}
neanalizir_values.AddRange(values);
short[] data = new short[fft_size];
data = neanalizir_values.GetRange(0, fft_size).ToArray();
spec_data.RemoveAt(0);
List<double> new_data = new List<double>();
Complex[] fft_buffer = new Complex[fft_size];
for (int i = 0; i < fft_size; i++)
{
fft_buffer[i].X = (float)(neanalizir_values[i] * FastFourierTransform.HammingWindow(i, fft_size));
fft_buffer[i].Y = 0;
}
FastFourierTransform.FFT(true, (int)Math.Log(fft_size, 2.0), fft_buffer);
for (int i = 0; i < spec_data[spec_data.Count - 1].Count; i++)
{
double val;
val = (double)fft_buffer[i].X + (double)fft_buffer[i].Y;
val = Math.Abs(val);
new_data.Add(val);
}
new_data.Reverse();
spec_data.Insert(spec_data.Count, new_data);
neanalizir_values.RemoveRange(0, fft_size / pixelsPerBuffer);
Bitmap bitmap = new Bitmap(spec_data.Count, spec_data[0].Count, PixelFormat.Format8bppIndexed);
ColorPalette pal = bitmap.Palette;
for (int i = 0; i < 256; i++)
pal.Entries[i] = Color.FromArgb(255, i, i, i);
bitmap.Palette = pal;
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadOnly, bitmap.PixelFormat);
byte[] pixels = new byte[bitmapData.Stride * bitmap.Height];
for (int col = 0; col < spec_data.Count; col++)
{
double scaleFactor = 40;
for (int row = 0; row < spec_data[col].Count; row++)
{
int bytePosition = row * bitmapData.Stride + col;
double pixelVal = spec_data[col][row] * scaleFactor;
pixelVal = Math.Max(0, pixelVal);
pixelVal = Math.Min(255, pixelVal);
pixels[bytePosition] = (byte)(pixelVal);
}
}
Marshal.Copy(pixels, 0, bitmapData.Scan0, pixels.Length);
bitmap.UnlockBits(bitmapData);
pictureBox1.Image = bitmap;

Related

C# NAudio rendering a waveform ASP.net without DMO or ACM

I'm trying to draw out a waveform using ASP.net on an Azure Website (which doesn't have the ACM or DMO codecs installed), so I had to use NLayer to read the mp3 file. The code I have below works perfectly with the regular DmoMp3FrameDecompressor, but when I use the NLayer decompressor it doesn't.
Maybe the format of the NLayer decompressor is 32bit Float and not 16bit PCM.
byte[] data = new WebClient().DownloadData(URL);
int maxAmplitude = 0;
short[,] dataArray = new short[Width, 2];
//using (Mp3FileReader wavestream = new Mp3FileReader(new MemoryStream(data), wf => new DmoMp3FrameDecompressor(wf)))
using (Mp3FileReader wavestream = new Mp3FileReader(new MemoryStream(data), new Mp3FileReader.FrameDecompressorBuilder(waveFormat => new NLayer.NAudioSupport.Mp3FrameDecompressor(waveFormat))))
{
WaveChannel32 channelStream = new WaveChannel32(wavestream);
int bytesPerSample = (wavestream.WaveFormat.BitsPerSample / 8) * channelStream.WaveFormat.Channels;
wavestream.Position = 0;
long lenSamples = wavestream.Length / bytesPerSample;
int samplesPerPixel = (int)(lenSamples / Width);
int bytesRead1;
byte[] waveData1 = new byte[samplesPerPixel * bytesPerSample];
// First get all the data
for (int x = 0; x < Width; x++)
{
short low = 0;
short high = 0;
bytesRead1 = wavestream.Read(waveData1, 0, samplesPerPixel * bytesPerSample);
if (bytesRead1 == 0)
break;
for (int n = 0; n < bytesRead1; n += 2)
{
short sample = BitConverter.ToInt16(waveData1, n);
if (sample < low) low = sample;
if (sample > high) high = sample;
}
if (-low > maxAmplitude) maxAmplitude = -low;
if (high > maxAmplitude) maxAmplitude = high;
dataArray[x, 0] = low;
dataArray[x, 1] = high;
}
}
Finally figured it out. Thanks #MarkHeath for your comments and suggestions (and for building the amazing NAudio / NLayer libraries)!
The key is that the WaveFloatTo16Provider doesn't have a Length attribute, so you can't compute the number of samples per pixel, so you need to have two loops. One which sequentially reads all the individual samples and then another which then groups the samples per pixel, and calculates the max amplitude. The final loop then maps the values to pixel positions and draws them to an image. If you don't need the AutoFit code, then you can merge the second and third loops.
Bitmap bmp = new Bitmap(Width, Height);
using (Graphics g = Graphics.FromImage(bmp))
{
g.Clear(Color.White);
Pen pen1 = new Pen(Color.Gray);
string hexValue = "#" + sColor;
Color colour1 = System.Drawing.ColorTranslator.FromHtml(hexValue);
pen1.Color = colour1;
int maxAmplitude = 0;
short[,] dataArray = new short[Width, 2];
using (Mp3FileReader wavestreamFloat = new Mp3FileReader(
new MemoryStream(new WebClient().DownloadData(URL)),
new Mp3FileReader.FrameDecompressorBuilder(
waveFormat => new NLayer.NAudioSupport.Mp3FrameDecompressorwaveFormat))))
{
IWaveProvider stream16 = new WaveFloatTo16Provider(wavestreamFloat);
int bytesPerSample = (stream16.WaveFormat.BitsPerSample / 8) * stream16.WaveFormat.Channels;
int bytesRead = 0;
byte[] buffer = new byte[8192];
List<short> rawDataArray = new List<short>();
do
{
bytesRead = stream16.Read(buffer, 0, buffer.Length);
for (int n = 0; n < bytesRead; n += bytesPerSample)
{
short sample = BitConverter.ToInt16(buffer, n);
rawDataArray.Add(sample);
}
} while (bytesRead != 0);
// Now that we have all the samples
long lenSamples = rawDataArray.Count;
int samplesPerPixel = (int)(lenSamples / Width);
int nCounter = 0;
for (int x = 0; x < Width; x++)
{
short low = 0;
short high = 0;
for (int n = 0; n < samplesPerPixel; n++)
{
short sample = rawDataArray[nCounter++];
if (sample < low) low = sample;
if (sample > high) high = sample;
}
if (-low > maxAmplitude) maxAmplitude = -low;
if (high > maxAmplitude) maxAmplitude = high;
dataArray[x, 0] = low;
dataArray[x, 1] = high;
}
// Now lay it out on the image. This is where we resize it to AutoFit.
for (int x = 0; x < Width; x++)
{
short low = dataArray[x, 0];
short high = dataArray[x, 1];
if (AutoFit)
{
low = (short)((int)low * (int)short.MaxValue / (int)maxAmplitude);
high = (short)((int)high * (int)short.MaxValue / (int)maxAmplitude);
}
float lowPercent = ((((float)low) - short.MinValue) / ushort.MaxValue);
float highPercent = ((((float)high) - short.MinValue) / ushort.MaxValue);
float lowValue = Height * lowPercent;
float highValue = Height * highPercent;
g.DrawLine(pen1, x, lowValue, x, highValue);
}
g.Flush();
}
}
return bmp;

C# and matlab give different image means?

I am calculating the average of the RGB channels of images in C# and matlab and getting slightly different result?? (am using 0-255 pixel values...)
The difference is not large but I just can't seem to understand the reason...
Is this common?? Or is it due to bitmap implementation of image?? Or precision issue?? Or does it mean their is something wrong with my code??
Code:
Matlab
I = imread('Photos\hv2512.jpg');
Ir=double(I(:,:,1));
Ig=double(I(:,:,2));
Ib=double(I(:,:,3));
avRed=mean2(Ir)
avGn=mean2(Ig)
avBl=mean2(Ib)
C#
Bitmap bmp= new Bitmap(open.FileName)
double[,] Red = new double[bmp.Width, bmp.Height];
double[,] Green = new double[bmp.Width, bmp.Height];
double[,] Blue = new double[bmp.Width, bmp.Height];
int PixelSize = 3;
BitmapData bmData = null;
if (Safe)
{
Color c;
for (int j = 0; j < bmp.Height; j++)
{
for (int i = 0; i < bmp.Width; i++)
{
c = bmp.GetPixel(i, j);
Red[i, j] = (double) c.R;
Green[i, j] = (double) c.G;
Blue[i, j] = (double) c.B;
}
}
}
double avRed = 0, avGrn = 0, avBlue = 0;
double sumRed = 0, sumGrn = 0, sumBlue = 0;
int cnt = 0;
for (int rws = 0; rws < Red.GetLength(0); rws++)
for (int clms = 0; clms < Red.GetLength(1); clms++)
{
sumRed = sumRed + Red[rws, clms];
sumGrn = sumGrn + Green[rws, clms];
sumBlue = sumBlue + Blue[rws, clms];
cnt++;
}
avRed = sumRed / cnt;
avGrn = sumGrn / cnt;
avBlue = sumBlue / cnt;
This is the image I am using

Converting a multi-band 16-bit tiff image to an 8-bit tiff image

I got some pixel data from 16-bit(range 0-65535) tif image as an integer array. I got the value using gdal readraster. How do I convert them to 8-bit(0-225) and convert it (the array) to 8-bit tif image ?
Here is some of my code :
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OSGeo.GDAL;
using OSGeo.OSR;
namespace ConsoleApplication4
{
class Program
{
static void Main(string[] args)
{
Gdal.AllRegister();
Dataset data1;
int xsize, ysize;
int bandsize;
data1 = Gdal.Open("F:\\po_1473547_bgrn_0000000.tif", Access.GA_ReadOnly);
bandsize = data1.RasterCount;
xsize = data1.RasterXSize; //cols
ysize = data1.RasterYSize; //rows
Console.WriteLine("cols : "+xsize+", rows : "+ysize);
Band[] bands = new Band[bandsize];
for (int i = 0; i < bandsize; i++) {
bands[i] = data1.GetRasterBand(i+1);
}
int[,,] pixel = new int[bandsize,xsize,ysize];
int[] pixtemp = new int[xsize * ysize];
for (int i = 0; i < bandsize; i++)
{
bands[i].ReadRaster(0, 0, xsize, ysize, pixtemp, xsize, ysize, 0, 0);
for (int j = 0; j < xsize; j++)
{
for (int k = 0; k < ysize; k++)
{
pixel[i,j,k] = pixtemp[j + k * xsize];
}
}
}
Console.WriteLine("");
for (int i = 0; i < bandsize; i++)
{
Console.WriteLine("some pixel from band " + (i+1));
for (int j = 0; j < 100; j++)
{
Console.Write(" " + pixel[i,100,j]);
}
Console.WriteLine("\n\n");
}
}
}
}
I was searching Google on how to do that but I only found how to do that if the data type is a byte. Someone please give me a hint.
I don't know about GEO Tiff format, but to convert a regular 16 bit tiff image file to an 8 bit one, you need to scale the 16 bit channel values to 8 bits. The example below shows how this can be achieved for gray scale images.
public static class TiffConverter
{
private static IEnumerable<BitmapSource> Load16BitTiff(Stream source)
{
var decoder = new TiffBitmapDecoder(source, BitmapCreateOptions.PreservePixelFormat, BitmapCacheOption.Default);
for (int i = 0; i < decoder.Frames.Count; i++)
// return all frames that are present in the input.
yield return decoder.Frames[i];
}
private static BitmapSource NormalizeTiffTo8BitImage(BitmapSource source)
{
// allocate buffer & copy image bytes.
var rawStride = source.PixelWidth * source.Format.BitsPerPixel / 8;
var rawImage = new byte[rawStride * source.PixelHeight];
source.CopyPixels(rawImage, rawStride, 0);
// get both max values of first & second byte of pixel as scaling bounds.
var max1 = 0;
int max2 = 1;
for (int i = 0; i < rawImage.Length; i++)
{
if ((i & 1) == 0)
{
if (rawImage[i] > max1)
max1 = rawImage[i];
}
else if (rawImage[i] > max2)
max2 = rawImage[i];
}
// determine normalization factors.
var normFactor = max2 == 0 ? 0.0d : 128.0d / max2;
var factor = max1 > 0 ? 255.0d / max1 : 0.0d;
max2 = Math.Max(max2, 1);
// normalize each pixel to output buffer.
var buffer8Bit = new byte[rawImage.Length / 2];
for (int src = 0, dst = 0; src < rawImage.Length; dst++)
{
int value16 = rawImage[src++];
double value8 = ((value16 * factor) / max2) - normFactor;
if (rawImage[src] > 0)
{
int b = rawImage[src] << 8;
value8 = ((value16 + b) / max2) - normFactor;
}
buffer8Bit[dst] = (byte)Math.Min(255, Math.Max(value8, 0));
src++;
}
// return new bitmap source.
return BitmapSource.Create(
source.PixelWidth, source.PixelHeight,
source.DpiX, source.DpiY,
PixelFormats.Gray8, BitmapPalettes.Gray256,
buffer8Bit, rawStride / 2);
}
private static void SaveTo(IEnumerable<BitmapSource> src, string fileName)
{
using (var stream = File.Create(fileName))
{
var encoder = new TiffBitmapEncoder();
foreach (var bms in src)
encoder.Frames.Add(BitmapFrame.Create(bms));
encoder.Save(stream);
}
}
public static void Convert(string inputFileName, string outputFileName)
{
using (var inputStream = File.OpenRead(inputFileName))
SaveTo(Load16BitTiff(inputStream).Select(NormalizeTiffTo8BitImage), outputFileName);
}
}
Usage:
TiffConverter.Convert(#"c:\temp\16bit.tif", #"c:\temp\8bit.tif");
Interpolate pixels from 16 bit to 8 bit, some resampling methods could perform.
Linear Interpolation may help.
//Convert tiff from 16-bit to 8-bit
byte[,,] ConvertBytes(int[,,] pixel, bandsize, xsize, ysize)
{
byte[,,] trgPixel = new byte[bandsize,xsize,ysize];
for (int i = 0; i < bandsize; i++)
{
for (int j = 0; j < xsize; j++)
{
for (int k = 0; k < ysize; k++)
{
//Linear Interpolation
trgPixel[i,j,k] = (byte)((65535-pixel[i,j,k])/65536.0*256);
}
}
}
return trgPixel;
}
//Save 8-bit tiff to file
void SaveBytesToTiff(string destPath, byte[,,] pixel, bandsize, xsize, ysize)
{
string fileformat = "GTiff";
Driver dr = Gdal.getDriverByName(fileformat);
Dataset newDs = dr.Create(destPath, xsize, ysize, bandsize, DateType.GDT_Byte, null);
for(int i=0; i< bandsize;i++)
{
byte[] buffer = new byte[xsize * ysize];
for (int j = 0; j < xsize; j++)
{
for (int k = 0; k < ysize; k++)
{
buffer[j+k*xsize] = pixel[i,j,k];
}
}
newDs.WriteRaster(0, 0, xsize, ysize, buffer, xsize, ysize, i+1, null, 0, 0, 0);
newDs.FlushCache();
}
newDs.Dispose();
}

Average value of array elements

I am trying to calculate the value of a single dimensional Array, here is my code:
So when I click "Detect", it should start a threshold through my Image, beginning from i = 0 to Image height and from j = 0 to Image width:
public void detektieren_Click(object sender, RoutedEventArgs e)
{
for (i = 0; i < bitmap.Height; i++)
{
for (j = 0; j < bitmap.Width; j++)
{
stride = bitmap.PixelWidth * (bitmap.Format.BitsPerPixel / 8);
data = new byte[stride * bitmap.PixelHeight];
bitmap.CopyPixels(data, stride, 0);
index = i * stride + 4 * j;
Now accessing the ARGB data:
byte A = data[index + 3];
byte R = data[index + 2];
byte G = data[index + 1];
byte B = data[index];
After the threshold, if there are any Pixels meet the condition R=0 & G=0 & B=255:
if (Convert.ToInt32(R) == 0 && Convert.ToInt32(G) == 0 && Convert.ToInt32(B) == 255)
{
// Create a writer and open the file:
StreamWriter Messdaten;
if (!File.Exists("C:/Users/.../Messdaten.csv"))
{
Messdaten = new StreamWriter("C:/Users/.../Messdaten.csv");
}
else
{
Messdaten = File.AppendText("C:/Users/.../Messdaten.csv");
}
// Write to the file:
Messdaten.WriteLine(j + ";" + i);
// Close the stream:
Messdaten.Close();
for (y = 0; y < bitmap.Height; y++)
{
for (x = 0; x < bitmap.Width; x++)
{
double x_mw = 0; double y_mw = 0;
int[] x_array = new int[(int)bitmap.Width];
int[] y_array = new int[(int)bitmap.Height];
x_array[x] = j;
x_mw = x_array.Average();
y_array[y] = i;
y_mw = y_array.Average();
xy_coord.Content = (int) x_mw + ";" + (int) y_mw;
}
}
}
}
}
}
Everything works perfectly in the CSV file, I can detect a Pixel (e.g. blue with R=0 G=0 B=255). But I also want to copy the data of each single Pixel into Array. But apparently it doesn't really deliver what I want. It doesn't calculate the average value of sum of blue Pixels (= the centroid of the blue Pixels scatter), instead it just Shows x_mw = 0 and y_mw = 0. What did I do wrong?
After I did some modification it works. So this is the code:
public void detektieren_Click(object sender, RoutedEventArgs e)
{
int x_sum = 0; int y_sum = 0; int x_count = 0; int y_count = 0; int x_mw; int y_mw;
int[] x_array = new int[(int)bitmap.Width];
int[] y_array = new int[(int)bitmap.Height];
int[] x_array_copy = new int[(int)bitmap.Width];
int[] y_array_copy = new int[(int)bitmap.Height];
stride = bitmap.PixelWidth * (bitmap.Format.BitsPerPixel / 8);
data = new byte[stride * bitmap.PixelHeight];
bitmap.CopyPixels(data, stride, 0);
for (i = 0; i < (int) bitmap.Height; i++)
{
for (j = 0; j < (int) bitmap.Width; j++)
{
index = i * stride + 4 * j;
byte A = data[index + 3];
byte R = data[index + 2];
byte G = data[index + 1];
byte B = data[index];
if (Convert.ToInt32(R) == 0 && Convert.ToInt32(G) == 0 && Convert.ToInt32(B) == 255)
{
x_array[j] = j;
x_count++;
x_array_copy[j] = x_array_copy[j] + j;
x_sum = (int) x_array_copy.Sum();
x_mw = x_sum / x_count;
y_array[i] = i;
y_count++;
y_array_copy[i] = y_array_copy[i] + i;
y_sum = (int) y_array_copy.Sum();
y_mw = y_sum / y_count;
xy_coord.Content = x_mw + ";" + y_mw;
}
}
}
}

Merge an array of Bitmaps into a single Bitmap

I'm trying to merge an array of Bitmaps into a single Bitmap. Given a Bitmap[,] array b like the following (assume these are images that look like characters):
b[0,0] = 1
b[1,0] = 2
b[0,1] = 3
b[1,1] = 4
I want to generate
result = 12
34
For example, given the following four Bitmaps:
b[0,0] =;
b[1,0] =;
b[0,1] =;
b[1,1] =;
I want to generate
result =;
Here's my code so far:
public static Bitmap Moisac(ref Bitmap[,] b)
{
BitmapData[,] bmData = new BitmapData[b.GetUpperBound(0) + 1, b.GetUpperBound(1) + 1];
IntPtr[,] scan0 = new IntPtr[b.GetUpperBound(0) + 1, b.GetUpperBound(1) + 1];
unsafe
{
byte*[,] p = new byte*[b.GetUpperBound(0) + 1,b.GetUpperBound(1) + 1];
for (int i = 0; i <= b.GetUpperBound(0); i++)
for (int j = 0; j <= b.GetUpperBound(1); j++)
if (b[i, j].Width != b[0, 0].Width | b[i, j].Height != b[0, 0].Height)
throw new ArgumentException(
"Width and Height properties of all elements of b must be equal.",
"b");
int oneW = b[0, 0].Width;
int oneH = b[0, 0].Height;
int overallWidth = oneW * (b.GetUpperBound(0) + 1);
int overallHeight = oneH * (b.GetUpperBound(1) + 1);
Bitmap result = new Bitmap(b[0, 0], overallWidth, overallHeight);
for (int i = 0; i <= b.GetUpperBound(0); i++)
for (int j = 0; j <= b.GetUpperBound(1); j++)
{
bmData[i, j] = b[i, j].LockBits(new Rectangle(0, 0, oneW, oneH),
ImageLockMode.ReadOnly, PixelFormat.Format24bppRgb);
scan0[i, j] = bmData[i, j].Scan0;
p[i, j] = (byte*)(void*)scan0[i, j];
}
BitmapData rbmData = result.LockBits(new Rectangle(0, 0, overallWidth, overallHeight),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
int stride = bmData[0, 0].Stride;
int nOffset = stride - 3*b[0, 0].Width;
int rStride = rbmData.Stride;
IntPtr rScan0 = rbmData.Scan0;
byte* rp = (byte*) (void*) rScan0;
for (int imgY = 0; imgY < b.GetUpperBound(1); ++imgY)
{
for (int imgX = 0; imgX <= b.GetUpperBound(0); ++imgX)
{
byte* currp = p[imgX, imgY];
for (int y = 0; y < oneH; ++y)
{
for (int x = 0; x < 3*oneW; ++x)
{
rp[rStride*(imgY*oneH + y) + 3*imgX*oneW + x] = currp[0];
++currp;
}
currp += nOffset;
}
}
}
for (int i = 0; i <= b.GetUpperBound(0); i++)
for (int j = 0; j <= b.GetUpperBound(1); j++)
b[i, j].UnlockBits(bmData[i,j]);
result.UnlockBits(rbmData);
return result;
}
}
See the images in the album here. All of them won't display here.
I made the most stupid mistake ever. However, if it may help anyone, change
for (int imgY = 0; imgY < b.GetUpperBound(1); ++imgY)
to
for (int imgY = 0; imgY <= b.GetUpperBound(1); ++imgY)
(the < should be <=).
I made a version based on your code that copies lines of pixels rather than pixels. Seems to work (faster) on my box at least. Maybe you like it. Minimal changes just within the for each loops.
Im using it really only for concatenating...before warping images into a donut for a 360 simple degree view of multiple camera images...
public static Bitmap Mosaic(ref Bitmap[,] b)
{
BitmapData[,] bmData = new BitmapData[b.GetUpperBound(0) + 1, b.GetUpperBound(1) + 1];
IntPtr[,] scan0 = new IntPtr[b.GetUpperBound(0) + 1, b.GetUpperBound(1) + 1];
unsafe
{
byte*[,] p = new byte*[b.GetUpperBound(0) + 1, b.GetUpperBound(1) + 1];
for (int i = 0; i <= b.GetUpperBound(0); i++)
for (int j = 0; j <= b.GetUpperBound(1); j++)
if (b[i, j].Width != b[0, 0].Width | b[i, j].Height != b[0, 0].Height)
throw new ArgumentException(
"Width and Height properties of all elements of b must be equal.",
"b");
int oneW = b[0, 0].Width;
int oneH = b[0, 0].Height;
int overallWidth = oneW * (b.GetUpperBound(0) + 1);
int overallHeight = oneH * (b.GetUpperBound(1) + 1);
Bitmap result = new Bitmap(b[0, 0], overallWidth, overallHeight);
for (int i = 0; i <= b.GetUpperBound(0); i++)
for (int j = 0; j <= b.GetUpperBound(1); j++)
{
bmData[i, j] = b[i, j].LockBits(new Rectangle(0, 0, oneW, oneH),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
scan0[i, j] = bmData[i, j].Scan0;
p[i, j] = (byte*)(void*)scan0[i, j];
}
BitmapData rbmData = result.LockBits(new Rectangle(0, 0, overallWidth, overallHeight),
ImageLockMode.ReadWrite, PixelFormat.Format32bppArgb);
int stride = bmData[0, 0].Stride;
int nOffset = stride - 4 * b[0, 0].Width;
int rStride = rbmData.Stride;
IntPtr rScan0 = rbmData.Scan0;
byte* rp = (byte*)(void*)rScan0;
for (int imgY = 0; imgY <= b.GetUpperBound(1); ++imgY)
{
for (int y = 0; y < oneH; ++y)
{
byte* currp = p[0, imgY];
for (int imgX = 0; imgX <= b.GetUpperBound(0); ++imgX)
{
currp = p[imgX, imgY];
currp += stride*y;
byte[] buffer = new byte[stride];
Marshal.Copy(new IntPtr(currp), buffer, 0, buffer.Length);
Marshal.Copy(buffer, 0, new IntPtr(rp), buffer.Length);
rp += stride;
}
}
}
for (int i = 0; i <= b.GetUpperBound(0); i++)
for (int j = 0; j <= b.GetUpperBound(1); j++)
b[i, j].UnlockBits(bmData[i, j]);
result.UnlockBits(rbmData);
return result;
}
}

Categories