c# merge images sent over socket - c#

im trying to send screenshots over socket so i use unsafe pointers to send only the differences:
private unsafe Bitmap GetDiffBitmap(Bitmap bmp, Bitmap bmp2)
{
bmpRes = new Bitmap(1920, 1080,bmp.PixelFormat);
bmData = bmp.LockBits(new Rectangle(0, 0, 1920, 1080), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
bmData2 = bmp2.LockBits(new Rectangle(0, 0, bmp2.Width, bmp2.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp2.PixelFormat);
bmDataRes = bmpRes.LockBits(new Rectangle(0, 0, bmpRes.Width, bmpRes.Height), System.Drawing.Imaging.ImageLockMode.ReadWrite, PixelFormat.Format32bppRgb);
IntPtr scan0 = bmData.Scan0;
IntPtr scan02 = bmData2.Scan0;
IntPtr scan0Res = bmDataRes.Scan0;
int stride = bmData.Stride;
int stride2 = bmData2.Stride;
int strideRes = bmDataRes.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
//for(int y = 0; y < nHeight; y++)
System.Threading.Tasks.Parallel.For(0, nHeight, y =>
{
//define the pointers inside the first loop for parallelizing
byte* p = (byte*)scan0.ToPointer();
p += y * stride;
byte* p2 = (byte*)scan02.ToPointer();
p2 += y * stride2;
byte* pRes = (byte*)scan0Res.ToPointer();
pRes += y * strideRes;
for (int x = 0; x < nWidth; x++)
{
//always get the complete pixel when differences are found
if (p[0] != p2[0] || p[1] != p2[1] || p[2] != p2[2])
{
pRes[0] = p2[0];
pRes[1] = p2[1];
pRes[2] = p2[2];
//alpha (opacity)
pRes[3] = p2[3];
}
p += 4;
p2 += 4;
pRes += 4;
}
});
bmp.UnlockBits(bmData);
bmp2.UnlockBits(bmData2);
bmpRes.UnlockBits(bmDataRes);
return bmpRes;
}
this is the call on the client:
private void startSend()
{
Bitmap curr;
Bitmap pre = screenshot();
byte []bmpBytes = imageToByteArray(pre);
SendVarData(handler, bmpBytes);// this is the first send of the whole screen
while (true)
{
curr = screenshot();
Bitmap diff = GetDiffBitmap(pre, curr);//generate differences.
bmpBytes = imageToByteArray(diff);
SendVarData(handler, bmpBytes);//sending the diff image.
pre = curr;
}
}
SendVarData is a method which send the bytes array over the socket it is not the problem here-leave it.
this is how i get the data in the server side:
public void startListening()
{
Bitmap merge = new Bitmap(1920, 1080);
Graphics g = Graphics.FromImage(merge);
Bitmap prev = byteArrayToImage(ReceiveVarData(client.Client)) as Bitmap;//getting the first full size image.
theImage.Image = prev;//assisning it to picturebox.
while (true)
{
byte[]data = ReceiveVarData(client.Client);
Bitmap curr = byteArrayToImage(data) as Bitmap;//here is the diffrent image
//merge and apply differences
g.DrawImage(prev, 0, 0,1920, 1080);
g.DrawImage(curr, 0, 0, 1920,1080);
theImage.Image = merge;
count++;
prev = merge;
}
}
my problem is that eventhough i merge the two images with the Graphics.Draw it still(after the first dataReceive) looks like not full... this is actually what i see on the server..
i dont know what's wrong here... can anyone light my eyes? :D
#DmitriTrofimov
if (p[0] != p2[0] || p[1] != p2[1] || p[2] != p2[2])
{
pRes[0] = p2[0];
pRes[1] = p2[1];
pRes[2] = p2[2];
//alpha (opacity)
pRes[3] = p2[3];
}
else
pRes[0] = 0;

This is what you get when you keep pixel opacity. You should set Alpha to zero on the pixels that are not different and make sure you are working with 32-bit images (refer to PixelFormat parameter of Bitmap constructor).
P.S. Make sure you compress the diff bitmap otherwise it's no use.

Related

Format16bppGrayScale : a generic error occurred in gdi+

I am trying to use Format16bppGrayScale, but I keep getting an error: "a generic error occurred in gdi+". I have a monochromatic camera that is giving me a 16-bit value and I want to store it in Format16bppGrayScale so I can keep my image from the camera. the 16-bit value is coming in an array that is two array indexes per one 16 bit value
public Bitmap ByteToImage1(byte[] blob)
{
Bitmap bmpRGB = new Bitmap(KeepWidth, KeepHeight, System.Drawing.Imaging.PixelFormat.Format16bppGrayScale);
Rectangle rect = new Rectangle(0, 0, KeepWidth, KeepHeight);
BitmapData bmpData = bmpRGB.LockBits(rect, ImageLockMode.WriteOnly, bmpRGB.PixelFormat);
int padding = bmpData.Stride - 3 * KeepWidth;
unsafe
{
int i = 0;
byte* ptr = (byte*)bmpData.Scan0;
for( int y= 0; y < KeepHeight; y++)
{
for(int x = 0; x < KeepWidth; x++)
{
ptr[1] = blob[i+1];
ptr[0] = blob[i];
i = i + 2;
ptr += 2;
}
ptr += padding;
}
}
bmpRGB.UnlockBits(bmpData);
return bmpRGB;
}
Update: Karsten's code
private Bitmap GenerateDummy16bitImage(byte[] temp)
{
int i2 = 0;
Bitmap b16bpp = new Bitmap(KeepWidth, KeepHeight, System.Drawing.Imaging.PixelFormat.Format16bppGrayScale);
var rect = new Rectangle(0, 0, KeepWidth, KeepHeight);
var bitmapData = b16bpp.LockBits(rect, ImageLockMode.WriteOnly, b16bpp.PixelFormat);
// Calculate the number of bytes required and allocate them.
var numberOfBytes = bitmapData.Stride * KeepHeight;
var bitmapBytes = new short[KeepWidth * KeepHeight];
// Fill the bitmap bytes with random data.
var random = new Random();
for (int x = 0; x < KeepWidth; x++)
{
for (int y = 0; y < KeepHeight; y++)
{
var i = ((y * KeepWidth) + x); // 16bpp
// Generate the next random pixel color value.
var value = (short)temp[i2]; //(short)random.Next(5);
bitmapBytes[i] = value; // GRAY
i2++;
}
}
// Copy the randomized bits to the bitmap pointer.
var ptr = bitmapData.Scan0;
Marshal.Copy(bitmapBytes, 0, ptr, bitmapBytes.Length);
// Unlock the bitmap, we're all done.
b16bpp.UnlockBits(bitmapData);
return b16bpp;
}

Convert image to byte array C# in a proper way

My problem is that I need to convert an image to a byte array to obtain its pixels.
My image size is 268x188 and when I use the property PixelsFormat it returns Format24bppRgb, so I understand that each pixel contains 3 bytes.
If this is true, the size of the pixels should be 268*188*3 = 151152 bytes, but the byte array that I am creating has a size of 4906 bytes, which is the size of the image file in my computer.
I donĀ“t know if there is another way to obtain these pixels or you can only obtain image file size.
If you want to ignore the header and the compression of the file you can do the following.
var path = ...
using(var image = Image.FromFile(path))
using(var bitmap = new Bitmap(image))
{
var bitmapData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadOnly, bitmap.PixelFormat);
var bytesPerPixel = 4; // bitmapData.PixelFormat (image.PixelFormat and bitmapData.PixelFormat can be different)
var ptr = bitmapData.Scan0;
var imageSize = bitmapData.Width * bitmapData.Height * bytesPerPixel;
var data = new byte[imageSize];
for (int x = 0; x < imageSize; x += bytesPerPixel)
{
for(var y = 0; y < bytesPerPixel; y++)
{
data[x + y] = Marshal.ReadByte(ptr);
ptr += 1;
}
}
bitmap.UnlockBits(bitmapData);
}
To get image pixel try this:
public static byte[] GetImageRaw(Bitmap image)
{
if (image == null)
{
throw new ArgumentNullException(nameof(image));
}
if (image.PixelFormat != PixelFormat.Format24bppRgb)
{
throw new NotSupportedException("Invalid pixel format.");
}
const int PixelSize = 3;
var data = image.LockBits(
new Rectangle(Point.Empty, image.Size),
ImageLockMode.ReadWrite,
image.PixelFormat);
try
{
var bytes = new byte[data.Width * data.Height * PixelSize];
for (var y = 0; y < data.Height; ++y)
{
var source = (IntPtr)((long)data.Scan0 + y * data.Stride);
// copy row without padding
Marshal.Copy(source, bytes, y * data.Width * PixelSize, data.Width * PixelSize);
}
return bytes;
}
finally
{
image.UnlockBits(data);
}
}
Take a look at Bitmap.LockBits
I use this code in ASP.NET application. Very simple:
var imagePath = GetFilePathToYourImage();
using (var img = System.IO.File.OpenRead(imagePath))
{
var imageBytes = new byte[img.Length];
img.Read(imageBytes, 0, (int)img.Length);
}

Bitmap to int[,] conversion and vice versa

Consider the following two routines.
//Tested
///Working fine.
public static Bitmap ToBitmap(int [,] image)
{
int Width = image.GetLength(0);
int Height = image.GetLength(1);
int i, j;
Bitmap bitmap = new Bitmap(Width, Height);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, Width, Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
unsafe
{
byte* address = (byte*)bitmapData.Scan0;
for (i = 0; i < bitmapData.Height; i++)
{
for (j = 0; j < bitmapData.Width; j++)
{
// write the logic implementation here
address[0] = (byte)image[j, i];
address[1] = (byte)image[j, i];
address[2] = (byte)image[j, i];
address[3] = (byte)255;
//4 bytes per pixel
address += 4;
}//end for j
//4 bytes per pixel
address += (bitmapData.Stride - (bitmapData.Width * 4));
}//end for i
}//end unsafe
bitmap.UnlockBits(bitmapData);
return bitmap;// col;
}
//Tested
///Working fine.
public static int[,] ToInteger(Bitmap bitmap)
{
int[,] array2D = new int[bitmap.Width, bitmap.Height];
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height),
ImageLockMode.ReadWrite,
PixelFormat.Format32bppRgb);
unsafe
{
byte* address = (byte*)bitmapData.Scan0;
int paddingOffset = bitmapData.Stride - (bitmap.Width * 4);//4 bytes per pixel
for (int i = 0; i < bitmap.Width; i++)
{
for (int j = 0; j < bitmap.Height; j++)
{
byte[] temp = new byte[4];
temp[0] = address[0];
temp[1] = address[1];
temp[2] = address[2];
temp[3] = address[3];
array2D[j, i] = BitConverter.ToInt32(temp, 0);
//4-bytes per pixel
address += 4;//4-channels
}
address += paddingOffset;
}
}
bitmap.UnlockBits(bitmapData);
return array2D;
}
These two routines work fine for 32bpp images. These routines only work when pixel format is set to PixelFormat.Format32bpp. If I use PixelFormat.Format8bppIndexed, it generates an exception.
In order to avoid that exception (also, I couldn't achieve seamless conversion between byte and int because of address calculation problem), I need to convert that 32 bit Bitmap to gray-scale every time the int[,] is converted back to a Bitmap. I want to get rid of this problem.
Bitmap grayscale = Grayscale.ToGrayscale(InputImage);
//Here, the Bitmap is treated as a 32bit image
//to avoid the exception eventhough it is already
//an 8bpp grayscale image.
int[,] i1 = ImageDataConverter.ToInteger(grayscale);
Complex[,] comp = ImageDataConverter.ToComplex(i1);
int[,] i2 = ImageDataConverter.ToInteger(comp);
Bitmap b2 = ImageDataConverter.ToBitmap(i2);
//It is already a Grayscale image.
//But, the problem is, b2.PixelFormat is set to
//PixelFormat.Formap32bpp because of those routines.
//Hence the unnecessay conversion.
b2 = Grayscale.ToGrayscale(b2);
I need to modify them to operate on 8bpp indexed (grayscale) images only.
How can I achieve that?
If you want to deal with an indexed bitmap, you need to read each byte of the image, and lookup the color from the palette. When you save the image, you'll need to do the reverse logic:
public static Bitmap ToBitmap(int[,] image)
{
int width = image.GetLength(0);
int height = image.GetLength(1);
Bitmap bitmap = new Bitmap(width, height, PixelFormat.Format8bppIndexed);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.WriteOnly, PixelFormat.Format8bppIndexed);
int stride = bitmapData.Stride;
// A dictionary of colors to their index values
Dictionary<int, int> palette = new Dictionary<int, int>();
// A flat list of colors
List<Color> paletteList = new List<Color>();
unsafe
{
byte* address = (byte*)bitmapData.Scan0;
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
// Get the color from the Bitmap
int color = image[x, y];
if (!palette.ContainsKey(color))
{
// This color isn't in the palette, go ahead and add it
palette.Add(color, palette.Count);
paletteList.Add(Color.FromArgb(color));
if (palette.Count >= 256)
{
// The palette is too big. Ideally this function would
// dither some pixels so it could handle this condition
// but that would make this example overly complicated
throw new InvalidOperationException("Too many colors in image");
}
}
// And lookup the index of the color in the palette and
// add it to the BitmapData's memory
address[stride * y + x] = (byte)palette[color];
}
}
}
bitmap.UnlockBits(bitmapData);
// Each time you call Bitmap.Palette it actually returns
// a Clone of the object, so we need to ask for a cloned
// copy here.
var newPalette = bitmap.Palette;
// For each one of our colors, add it to the palette object
for (int i = 0; i < paletteList.Count; i++)
{
newPalette.Entries[i] = paletteList[i];
}
// And since this is a clone, assign it back to the bitmap
// so it'll take effect.
bitmap.Palette = newPalette;
return bitmap;
}
public static int[,] ToInteger(Bitmap bitmap)
{
if (bitmap.Palette.Entries.Length == 0)
{
// This doesn't appear to have a palette, so this operation doesn't
// make sense
throw new InvalidOperationException("bitmap is not an indexed bitmap");
}
int width = bitmap.Width;
int height = bitmap.Height;
int[,] array2D = new int[width, height];
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.ReadOnly,
PixelFormat.Format8bppIndexed);
unsafe
{
// Pull out the stride to prevent asking for it many times
int stride = bitmapData.Stride;
byte* address = (byte*)bitmapData.Scan0;
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
// Lookup the color based off the pixel, and set it's value
// to the return array
array2D[x, y] = bitmap.Palette.Entries[address[stride * y + x]].ToArgb();
}
}
}
bitmap.UnlockBits(bitmapData);
return array2D;
}
public static int[,] ToInteger(Bitmap bitmap)
{
int[,] array2D = new int[bitmap.Width, bitmap.Height];
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height),
ImageLockMode.ReadWrite,
PixelFormat.Format8bppIndexed);
int bytesPerPixel = sizeof(byte);
unsafe
{
byte* address = (byte*)bitmapData.Scan0;
int paddingOffset = bitmapData.Stride - (bitmap.Width * bytesPerPixel);
for (int i = 0; i < bitmap.Width; i++)
{
for (int j = 0; j < bitmap.Height; j++)
{
byte[] temp = new byte[bytesPerPixel];
for (int k = 0; k < bytesPerPixel; k++)
{
temp[k] = address[k];
}
int iii = 0;
if (bytesPerPixel >= sizeof(int))
{
iii = BitConverter.ToInt32(temp, 0);
}
else
{
iii = (int)temp[0];
}
array2D[j, i] = iii;
address += bytesPerPixel;
}
address += paddingOffset;
}
}
bitmap.UnlockBits(bitmapData);
return array2D;
}
public static Bitmap ToBitmap(int[,] image)
{
int Width = image.GetLength(0);
int Height = image.GetLength(1);
int i, j;
Bitmap bitmap = new Bitmap(Width, Height, PixelFormat.Format8bppIndexed);
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, Width, Height),
ImageLockMode.ReadOnly, PixelFormat.Format8bppIndexed);
int bytesPerPixel = sizeof(byte);
unsafe
{
byte* address = (byte*)bitmapData.Scan0;
for (i = 0; i < bitmapData.Height; i++)
{
for (j = 0; j < bitmapData.Width; j++)
{
byte[] bytes = BitConverter.GetBytes(image[j, i]);
for (int k = 0; k < bytesPerPixel; k++)
{
address[k] = bytes[k];
}
address += bytesPerPixel;
}
address += (bitmapData.Stride - (bitmapData.Width * bytesPerPixel));
}
}
bitmap.UnlockBits(bitmapData);
Grayscale.SetGrayscalePalette(bitmap);
return bitmap;
}

libtiff.net writeScanLine returns false

I'm using libtiff.net to make a tiff from a jpeg.
The problem apears when I try to write, because the tiff.writeScanLine returns false, meaning the image isn't written in the tiff.
Why is this happening? And how can I figure out what's wrong?
Here's the code:
private bool creatingTiff()
{
using (Bitmap bmp = new Bitmap(targetFile))
{
using (Tiff tif = Tiff.Open("BitmapTo24BitColorTiff.tif", "w"))
{
byte[] raster = getImageRasterBytes(bmp, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
tif.SetField(TiffTag.IMAGEWIDTH, bmp.Width);
tif.SetField(TiffTag.IMAGELENGTH, bmp.Height);
tif.SetField(TiffTag.COMPRESSION, Compression.OJPEG);
tif.SetField(TiffTag.PHOTOMETRIC, Photometric.YCBCR);
tif.SetField(TiffTag.SUBFILETYPE, 0);
tif.SetField(TiffTag.ROWSPERSTRIP, bmp.Height);
tif.SetField(TiffTag.ORIENTATION, BitMiracle.LibTiff.Classic.Orientation.TOPLEFT);
tif.SetField(TiffTag.XRESOLUTION, bmp.HorizontalResolution);
tif.SetField(TiffTag.YRESOLUTION, bmp.VerticalResolution);
tif.SetField(TiffTag.RESOLUTIONUNIT, ResUnit.INCH);
tif.SetField(TiffTag.BITSPERSAMPLE, 8);
tif.SetField(TiffTag.SAMPLESPERPIXEL, 3);
tif.SetField(TiffTag.JPEGIFOFFSET, 768);
tif.SetField(TiffTag.PLANARCONFIG, PlanarConfig.CONTIG);
for (int i = 0, offset = 0; i < bmp.Height; i++)
{
bool b = tif.WriteScanline(raster, offset, i, 0);
Console.WriteLine("write succes: " + b);
offset += stride;
}
}
System.Diagnostics.Process.Start("BitmapTo24BitColorTiff.tif");
return true;
}
}
private static byte[] getImageRasterBytes(Bitmap bmp, System.Drawing.Imaging.PixelFormat format)
{
System.Drawing.Rectangle rect = new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height);
byte[] bits = null;
try
{
// Lock the managed memory
BitmapData bmpdata = bmp.LockBits(rect, ImageLockMode.ReadWrite, format);
// Declare an array to hold the bytes of the bitmap.
bits = new byte[bmpdata.Stride * bmpdata.Height];
// Copy the values into the array.
System.Runtime.InteropServices.Marshal.Copy(bmpdata.Scan0, bits, 0, bits.Length);
// Release managed memory
bmp.UnlockBits(bmpdata);
}
catch
{
return null;
}
return bits;
}
private static void convertSamples(byte[] data, int width, int height)
{
int stride = data.Length / height;
const int samplesPerPixel = 3;
for (int y = 0; y < height; y++)
{
int offset = stride * y;
int strideEnd = offset + width * samplesPerPixel;
for (int i = offset; i < strideEnd; i += samplesPerPixel)
{
byte temp = data[i + 2];
data[i + 2] = data[i];
data[i] = temp;
}
}
}
The tif-tags are written, but the image itself isn't. Perhaps if someone can point me in the direction of the library developers blog (BitMiracle), I can direct my problem to them directly.
I think your code has the following errors:
You can not use Compression.OJPEG for new images. Old-JPEGs can only be de-compressed.
You probably should not specify TiffTag.JPEGIFOFFSET value by hand. The library will specify proper value itself.
You are trying to write the whole strip using WriteScanline method. You should use WriteEncodedStrip instead.
It also helps to review warnings emitted by the library (it emits them into console).

Improving Image compositing Algorithm c# .NET

I was wondering if anyone could shed some light on improvements I can do in making this compositing algorithm faster. What is does is takes 3 images splits them up to get the 1st Images Red Channel, 2nd Images Green channel and the 3rd Images Blue channel and composites them together into 1 new image. Now it works but at an excruciatingly slow pace. The reason i think down to the pixel by pixel processing it has to do on all image components.
The process is to :
For all images:
Extract respective R G and B values -> composite into 1 image -> Save new Image.
foreach (Image[] QRE2ImgComp in QRE2IMGArray)
{
Globals.updProgress = "Processing frames: " + k + " of " + QRE2IMGArray.Count + " frames done.";
QRMProgressUpd(EventArgs.Empty);
Image RedLayer = GetRedImage(QRE2ImgComp[0]);
QRE2ImgComp[0] = RedLayer;
Image GreenLayer = GetGreenImage(QRE2ImgComp[1]);
QRE2ImgComp[1] = GreenLayer;
Image BlueLayer = GetBlueImage(QRE2ImgComp[2]);
QRE2ImgComp[2] = BlueLayer;
Bitmap composite = new Bitmap(QRE2ImgComp[0].Height, QRE2ImgComp[0].Width);
Color Rlayer,Glayer,Blayer;
byte R, G, B;
for (int y = 0; y < composite.Height; y++)
{
for (int x = 0; x < composite.Width; x++)
{
//pixelColorAlpha = composite.GetPixel(x, y);
Bitmap Rcomp = new Bitmap(QRE2ImgComp[0]);
Bitmap Gcomp = new Bitmap(QRE2ImgComp[1]);
Bitmap Bcomp = new Bitmap(QRE2ImgComp[2]);
Rlayer = Rcomp.GetPixel(x, y);
Glayer = Gcomp.GetPixel(x, y);
Blayer = Bcomp.GetPixel(x, y);
R = (byte)(Rlayer.R);
G = (byte)(Glayer.G);
B = (byte)(Blayer.B);
composite.SetPixel(x, y, Color.FromArgb((int)R, (int)G, (int)B));
}
}
Globals.updProgress = "Saving frame...";
QRMProgressUpd(EventArgs.Empty);
Image tosave = composite;
Globals.QRFrame = tosave;
tosave.Save("C:\\QRItest\\E" + k + ".png", ImageFormat.Png);
k++;
}
For reference here is the red channel filter method relatively the same for blue and green:
public Image GetRedImage(Image sourceImage)
{
Bitmap bmp = new Bitmap(sourceImage);
Bitmap redBmp = new Bitmap(sourceImage.Width, sourceImage.Height);
for (int x = 0; x < bmp.Width; x++)
{
for (int y = 0; y < bmp.Height; y++)
{
Color pxl = bmp.GetPixel(x, y);
Color redPxl = Color.FromArgb((int)pxl.R, 0, 0);
redBmp.SetPixel(x, y, redPxl);
}
}
Image tout = (Image)redBmp;
return tout;
}
Move these
Bitmap Rcomp = new Bitmap(QRE2ImgComp[0]);
Bitmap Gcomp = new Bitmap(QRE2ImgComp[1]);
Bitmap Bcomp = new Bitmap(QRE2ImgComp[2]);
outside the for-loops!
Other very important points:
avoid using GetPixel - it is VERY SLOW!
Checkout LockBits etc. - this is how pixel-level access is usually done in .NET
Consider using a 3rd-party library (free or commercial)... several have some optimized method built-in to do what you are trying to achieve...
I totally agree with the points Yahia listed in his answer to improve performance. I'd like to add one more point regarding performance. You could use the Parallel class of the .Net Framework to parallelize the execution of your for loops. The following example makes use of the LockBits method and the Parallel class to improve performance (assuming 32 bits per pixel (PixelFormat.Format32bppArgb)):
public unsafe static Bitmap GetBlueImagePerf(Image sourceImage)
{
int width = sourceImage.Width;
int height = sourceImage.Height;
Bitmap bmp = new Bitmap(sourceImage);
Bitmap redBmp = new Bitmap(width, height, bmp.PixelFormat);
BitmapData bd = bmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadOnly, PixelFormat.Format32bppRgb);
BitmapData bd2 = redBmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.WriteOnly, PixelFormat.Format32bppRgb);
byte* source = (byte*)bd.Scan0.ToPointer();
byte* target = (byte*)bd2.Scan0.ToPointer();
int stride = bd.Stride;
Parallel.For(0, height, (y1) =>
{
byte* s = source + (y1 * stride);
byte* t = target + (y1 * stride);
for (int x = 0; x < width; x++)
{
// use t[1], s[1] to access green channel
// use t[2], s[2] to access red channel
t[0] = s[0];
t += 4; // Add bytes per pixel to current position.
s += 4; // For other pixel formats this value is different.
}
});
bmp.UnlockBits(bd);
redBmp.UnlockBits(bd2);
return redBmp;
}
public unsafe static void DoImageConversion()
{
Bitmap RedLayer = GetRedImagePerf(Image.FromFile("image_path1"));
Bitmap GreenLayer = GetGreenImagePerf(Image.FromFile("image_path2"));
Bitmap BlueLayer = GetBlueImagePerf(Image.FromFile("image_path3"));
Bitmap composite =
new Bitmap(RedLayer.Width, RedLayer.Height, RedLayer.PixelFormat);
BitmapData bd = composite.LockBits(new Rectangle(0, 0, RedLayer.Width, RedLayer.Height), ImageLockMode.WriteOnly, PixelFormat.Format32bppArgb);
byte* comp = (byte*)bd.Scan0.ToPointer();
BitmapData bdRed = RedLayer.LockBits(new Rectangle(0, 0, RedLayer.Width, RedLayer.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
BitmapData bdGreen = GreenLayer.LockBits(new Rectangle(0, 0, RedLayer.Width, RedLayer.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
BitmapData bdBlue = BlueLayer.LockBits(new Rectangle(0, 0, RedLayer.Width, RedLayer.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
byte* red = (byte*)bdRed.Scan0.ToPointer();
byte* green = (byte*)bdGreen.Scan0.ToPointer();
byte* blue = (byte*)bdBlue.Scan0.ToPointer();
int stride = bdRed.Stride;
Parallel.For(0, bdRed.Height, (y1) =>
{
byte* r = red + (y1 * stride);
byte* g = green + (y1 * stride);
byte* b = blue + (y1 * stride);
byte* c = comp + (y1 * stride);
for (int x = 0; x < bdRed.Width; x++)
{
c[0] = b[0];
c[1] = g[1];
c[2] = r[2];
r += 4; // Add bytes per pixel to current position.
g += 4; // For other pixel formats this value is different.
b += 4; // Use Image.GetPixelFormatSize to get number of bits per pixel
c += 4;
}
});
composite.Save("save_image_path", ImageFormat.Jpeg);
}
Hope, this answer gives you a starting point for improving your code.

Categories