I try to create a 16 bit bmp / jpg file using following code:
public static void CreateBitmap_Rgb48(int width, int height, double dpiX, double dpiY, string fn)
{
int bytesperpixel = 6; // BytesPerChannel = 2,ChannelCount = 3 (bgr)
int channelCount = 3;
int stride = width * bytesperpixel;
byte[] imgdata = new byte[width * height * bytesperpixel];
int rectDim = 40;
ushort[] intPixelData = new ushort[width * height * channelCount];
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width * channelCount; col += channelCount)
{
if (((col / channelCount / rectDim) % 2) != ((row / rectDim) % 2))
{
intPixelData[row * width * channelCount + col + 0] = 0x0000;
intPixelData[row * width * channelCount + col + 1] = 0x0000;
intPixelData[row * width * channelCount + col + 2] = 0xffff;
}
else
{
intPixelData[row * width * channelCount + col + 0] = 0x0000;
intPixelData[row * width * channelCount + col + 1] = 0xffff;
intPixelData[row * width * channelCount + col + 2] = 0x0000;
}
}
}
Buffer.BlockCopy(intPixelData, 0, imgdata, 0, imgdata.Length);
// compose the BitmapImage
var image = BitmapSource.Create(width, height, dpiX, dpiY, PixelFormats.Rgb48, null, imgdata, stride);
BmpBitmapEncoder encoder = new BmpBitmapEncoder();
encoder.Frames.Add(BitmapFrame.Create(image));
using (var fileStream = new FileStream(fn, FileMode.Create))
{
encoder.Save(fileStream);
}
}
Similarly, for jpeg, I used:
JpegBitmapEncoder encoder = new JpegBitmapEncoder();
But the file generated size is wrong and BmpBitmapDecoder show the format is default for bmp and rgb24 for jpg. they are not rgb48. what is wrong?
Related
I have this part of code which converts a bitmap with 32bppArgb pixel format to an 1d byte[] array:
using (var bitmap = new Bitmap(width, height, PixelFormat.Format32bppArgb))
{
var boundsRect = new Rectangle(0, 0, width, height);
// Copy pixels from screen capture Texture to GDI bitmap
var mapDest = bitmap.LockBits(boundsRect, ImageLockMode.ReadOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destPtr = mapDest.Scan0;
for (int y = 0; y < height; y++)
{
// Copy a single line
Utilities.CopyMemory(destPtr, sourcePtr, width * 4);
// Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destPtr = IntPtr.Add(destPtr, mapDest.Stride);
}
// Release source and dest locks
bitmap.UnlockBits(mapDest);
device.ImmediateContext.UnmapSubresource(screenTexture, 0);
using (var ms = new MemoryStream())
{
bitmap.Save(ms, ImageFormat.Bmp);
ScreenRefreshed?.Invoke(this, ms.ToArray());
_init = true;
}
}
I call my function ReplacePixels() to read and replace rgba values like this:
data = ReplacePixels(data);
data is byte[] array received from code above.
The example function which i use but without success:
private byte[] ReplacePixels (byte[] data)
{
int width = Screen.PrimaryScreen.Bounds.Width;
int height = Screen.PrimaryScreen.Bounds.Height;
Int32 curRowOffs = 0;
Int32 stride = 4 * (width * 4 + 31) / 32;
try
{
for (uint y = 0; y < height; y++)
{
// Set offset to start of current row
Int32 curOffs = curRowOffs;
for (uint x = 0; x < width; x++)
{
// ARGB = bytes [B,G,R,A]
var b = data[curOffs];
var g = data[curOffs + 1];
var r = data[curOffs + 2];
var a = data[curOffs + 3];
//bgra changes here..
//apply bgra values
data[offset] = Convert.ToByte(b);
data[offset + 1] = Convert.ToByte(g);
data[offset + 2] = Convert.ToByte(r);
data[offset + 3] = Convert.ToByte(a);
// Increase offset to next colour
curOffs += 4;
}
// Increase row offset
curRowOffs += stride;
}
}
catch (System.Exception e)
{
Debug.WriteLine(e);
}
return data;
}
The question is: how can i read and replace the argb values from this array?
Edit: this is the solution that i found
public byte[] ReplacePixels(byte[] data)
{
int width = Screen.PrimaryScreen.Bounds.Width;
int height = Screen.PrimaryScreen.Bounds.Height;
Int32 stride = width * 4;
Int32 curRowOffs = (((width * height * 4) + 54) - 1) - stride;
for (uint y = 0; y < height; y++)
{
uint index = (uint)curRowOffs;
for (uint x = 0; x < width; x++)
{
// ARGB = bytes [B,G,R,A]
if (index >= 0)
{
//var rgba = GetRGB(data, index);
var b = data[index];
var g = data[index + 1];
var r = data[index + 2];
var a = data[index + 3];
//bgra changes here...
data[index] = b;
data[index + 1] = g;
data[index + 2] = r;
data[index + 3] = a;
}
index += 4;
}
curRowOffs -= stride;
}
return data;
}
I'm trying to implement a webcam capture app which should take still frames, display them on the screen and save to the disk.
Since I'm using SharpDX already to capture the screen, I thought it would be nice to use that library. I was not sure if SharpDX had any video capture capabilities, so I started searching and found parts of what it looks like a webcam capture prototype:
var attributes = new MediaAttributes(1);
attributes.Set<Guid>(CaptureDeviceAttributeKeys.SourceType, CaptureDeviceAttributeKeys.SourceTypeVideoCapture.Guid);
var activates = MediaFactory.EnumDeviceSources(attributes);
var dic = new Dictionary<string, Activate>();
foreach (var activate in activates)
{
var uid = activate.Get(CaptureDeviceAttributeKeys.SourceTypeVidcapSymbolicLink);
dic.Add(uid, activate);
}
var camera = dic.First().Value;
It outputs camera with a strange uid. I'm not sure if it's correct.
What I am supposed to do after this?
Edit
I got this code kind of working. I still don't understand why the output is strange.
var attributes = new MediaAttributes(1);
attributes.Set(CaptureDeviceAttributeKeys.SourceType.Guid, CaptureDeviceAttributeKeys.SourceTypeVideoCapture.Guid);
var mediaSource = MediaFactory.EnumDeviceSources(attributes)[0].ActivateObject<MediaSource>();
mediaSource.CreatePresentationDescriptor(out var presentationDescriptor);
var reader = new SourceReader(mediaSource);
var mediaTypeIndex = 0;
int width, height;
using (var mt = reader.GetNativeMediaType(0, mediaTypeIndex))
{
UnpackLong(mt.Get(MediaTypeAttributeKeys.FrameSize), out width, out height);
UnpackLong(mt.Get(MediaTypeAttributeKeys.FrameRate), out var frameRateNumerator, out var frameRateDenominator);
UnpackLong(mt.Get(MediaTypeAttributeKeys.PixelAspectRatio), out var aspectRatioNumerator, out var aspectRatioDenominator);
}
var sample = reader.ReadSample(SourceReaderIndex.AnyStream, SourceReaderControlFlags.None, out var readStreamIndex, out var readFlags, out var timestamp);
if (sample == null)
sample = reader.ReadSample(SourceReaderIndex.AnyStream, SourceReaderControlFlags.None, out readStreamIndex, out readFlags, out timestamp);
var sourceBuffer = sample.GetBufferByIndex(0); // sample.ConvertToContiguousBuffer();
var sourcePointer = sourceBuffer.Lock(out var maxLength, out var currentLength);
var data = new byte[sample.TotalLength];
Marshal.Copy(sourcePointer, data, 0, sample.TotalLength);
var newData = new byte[width * 4 * height];
var partWidth = width / 4;
var partHeight = height / 3;
for (var i = 0; i < sample.TotalLength; i += 4)
{
//X8R8B8G8 -> BGRA = 4
newData[i] = data[i + 3];
newData[i + 1] = data[i + 2];
newData[i + 2] = data[i + 1];
newData[i + 3] = 255; //data[i];
}
//var source = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgra32, null, data, ((width * 24 + 31) / 32) * 4);
var source = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgra32, null, newData, width * 4);
sourceBuffer.Unlock();
sourceBuffer.Dispose();
The output image is this (I was showing a color spectrum to my webcam):
The image is repeating 4 times, each part has a grayscale image and a color version with half the height.
Two thirds of the image is transparent.
your output is NV12, here's some sample code to convert nv12 to rgb
unsafe private static void TransformImage_NV12(IntPtr pDest, int lDestStride, IntPtr pSrc, int lSrcStride, int dwWidthInPixels, int dwHeightInPixels)
{
uint imageWidth = (uint)dwWidthInPixels;
uint widthHalf = imageWidth / 2;
uint imageHeight = (uint)dwHeightInPixels;
byte* nv12Data = (byte*)pSrc;
byte* rgbData = (byte*)pDest;
uint dataSize = imageWidth * imageHeight * 3;
for (uint y = 0; y < imageHeight; y++)
{
for (uint x = 0; x < imageWidth; x++)
{
uint xEven = x & 0xFFFFFFFE;
uint yEven = y & 0xFFFFFFFE;
uint yIndex = y * imageWidth + x;
uint cIndex = imageWidth * imageHeight + yEven * widthHalf + xEven;
byte yy = nv12Data[yIndex];
byte cr = nv12Data[cIndex + 0];
byte cb = nv12Data[cIndex + 1];
uint outputIndex = (dataSize - (y * imageWidth + x) * 3) - 3;
rgbData[outputIndex + 0] = (byte)Math.Min(Math.Max((yy + 1.402 * (cr - 128)), 0), 255);
rgbData[outputIndex + 1] = (byte)Math.Min(Math.Max((yy - 0.344 * (cb - 128) - 0.714 * (cr - 128)), 0), 255);
rgbData[outputIndex + 2] = (byte)Math.Min(Math.Max((yy + 1.772 * (cb - 128)), 0), 255);
}
}
}
I am using below code to read the pixel data of image into byte[]. This solution works for me but I need to optimize as it takes a longer time is there any other workaround for same.
private byte[] GetPixelBytes(string fileName)
{
if (string.IsNullOrEmpty(fileName))
throw new ArgumentException("FileName cal not be null or Blank");
var bitmapImageSource = new BitmapImage(new System.Uri(fileName));
System.Drawing.Bitmap imgo = new System.Drawing.Bitmap(fileName);
var height = imgo.Height;
var width = imgo.Width;
//System.Drawing.Image.GetPixelFormatSize(imgo.PixelFormat);
var bitsPerPixel = bitmapImageSource.Format.BitsPerPixel;
var bytesPerPixel = (bitmapImageSource.Format.BitsPerPixel + 7) / 8;
int size = (int)(bitmapImageSource.Width * bitmapImageSource.Height * bytesPerPixel);
byte[] pixels = new byte[size];
for (int i = 0; i < imgo.Width; i++)
{
for (int j = 0; j < imgo.Height; j++)
{
System.Drawing.Color pixel = imgo.GetPixel(i, j);
//int offset = y * imgo.Width * 4 + x * 4; (x is the column and y is the row)
int offset = j * imgo.Width * bytesPerPixel + i * bytesPerPixel;
switch (bytesPerPixel)
{
case 4:
pixels[offset + 0] = pixel.B;
pixels[offset + 1] = pixel.G;
pixels[offset + 2] = pixel.R;
pixels[offset + 3] = pixel.A;
break;
case 3:
pixels[offset + 0] = pixel.B;
pixels[offset + 1] = pixel.G;
pixels[offset + 2] = pixel.R;
break;
default:
throw new InvalidCastException("Only 32 and 24 BPP images are supported");
break;
}
}
}
return pixels;
}
I have tried below option to optimize the
var bitmapData = imgo.LockBits(new System.Drawing.Rectangle(0,0,imgo.Width,imgo.Height),ImageLockMode.ReadOnly,imgo.PixelFormat);
var length = bitmapData.Stride * bitmapData.Height;
byte[] bytes = new byte[length];
System.Runtime.InteropServices.Marshal.Copy(bitmapData.Scan0, bytes, 0, length);
imgo.UnlockBits(bitmapData);
But in this case bytes copied contains different value than what I get with my original function. I am using color Image having below properties.
Dimension: 3072*3072
Width: 3072 Pixel
Height: 3072 Pixel
Resolution: 96 dpi (Both)
Bit depth: 24
Resolved with below code.
private byte[] ReadByte()
{
System.Drawing.Bitmap imgo= new System.Drawing.Bitmap(filename);
var bitmapData = imgo.LockBits(new System.Drawing.Rectangle(0,0,imgo.Width,imgo.Height),ImageLockMode.ReadOnly,imgo.PixelFormat);
var length = bitmapData.Stride * bitmapData.Height;
byte[] bytes = new byte[length];
System.Runtime.InteropServices.Marshal.Copy(bitmapData.Scan0, bytes, 0, length);
imgo.UnlockBits(bitmapData);
return bytes;
}
private static BitmapSource ByteToImage(byte[] buffer, int width, int height, PixelFormat pixelFormat, string fileName, int bytesPerPixel = 0)
{
var stride = ((width * pixelFormat.BitsPerPixel + 31) / 32) * 4;
switch (bytesPerPixel)
{
case 1:
pixelFormat = PixelFormats.Gray8;
break;
case 3:
pixelFormat = PixelFormats.Rgb24;
break;
case 4:
pixelFormat = PixelFormats.Bgr32;
break;
}
var imago = new WriteableBitmap(width, height, 96, 96, pixelFormat, null);
imago.WritePixels(new Int32Rect(0, 0, width, height), buffer, width * bytesPerPixel, 0);
return imago;
}
I'm working on a simple drawing application where I can 'paint' on top of an existing image. I've made a little headway, but I've noticed a weird issue with the alpha channel of the bitmap I'm displaying that doesn't seem quite right. My draw brush function looks like this:
public unsafe void BitmapDrawBrush(double _x, double _y, double _radius, double _falloff, double _strength)
{
if (DisplayBmp == null)
{
DisplayBmp = new Bitmap(ImageWidth, ImageHeight, PixelFormat.Format32bppArgb);
}
const int pixelSize = 4; // 32 bits per pixel
Bitmap target = new Bitmap(DisplayBmp.Width, DisplayBmp.Height, PixelFormat.Format32bppArgb);
BitmapData sourceData = null, targetData = null;
try
{
sourceData = DisplayBmp.LockBits(new Rectangle(0, 0, DisplayBmp.Width, DisplayBmp.Height),ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
targetData = target.LockBits(new Rectangle(0, 0, target.Width, target.Height),ImageLockMode.WriteOnly, PixelFormat.Format32bppArgb);
for (int y = 0; y < DisplayBmp.Height; ++y)
{
byte* sourceRow = (byte*)sourceData.Scan0 + (y * sourceData.Stride);
byte* targetRow = (byte*)targetData.Scan0 + (y * targetData.Stride);
for (int x = 0; x < DisplayBmp.Width; ++x)
{
byte b = sourceRow[x * pixelSize + 0];
byte g = sourceRow[x * pixelSize + 1];
byte r = sourceRow[x * pixelSize + 2];
byte a = sourceRow[x * pixelSize + 3];
double nx = x / (double)ImageWidth;
double ny = y/(double)ImageHeight;
double xDist = nx - _x;
double yDist = ny - _y;
if ((xDist * xDist) + (yDist * yDist) <= (_radius * _radius))
{
double pxDist = 1.0 - (((xDist * xDist) + (yDist * yDist)) / (_radius * _radius));
r = (byte)(255 * pxDist);
g = (byte)(255 * pxDist);
b = (byte)(255 * pxDist);
a = (byte)(255 * pxDist * _strength); // <-the alpha channel value
}
targetRow[x * pixelSize + 0] = b;
targetRow[x * pixelSize + 1] = g;
targetRow[x * pixelSize + 2] = r;
targetRow[x * pixelSize + 3] = a;
}
}
}
finally
{
if (sourceData != null)
DisplayBmp.UnlockBits(sourceData);
if (targetData != null)
target.UnlockBits(targetData);
}
DisplayBmp = target;
UpdateBitmap();
}
Yet, when I run my application and place a few brush strokes (actually the draw brush function is only called on mouse down at the moment so I'm only drawing dots really), you see that the alpha channel doesn't seem to be fading toward the edge. What I would expect is that as the 'dot' gets more black, so too does the alpha channel diminish. See below. Any ideas as to why this is happening?
I'm having a problem with writing to files using lock bits. I'm working on an edge detection software which has a strange distortion effect with most images. I've tried to isolate the problem, and it seems very random. It is not associated with format, but rather the only images that seem to work are pictures made for desktop wallpapers, and I don't really know why. I only switched to writing to files using lockbits recently, so I am sure the problem is with that (there were no problems when I was reading with lockbits and writing with set pixel). Here's a screenshot of the effect:
As you can see, the edge detection works, but the image is distorted horizontally, making the image into a parallelogram.
Here's a code snippet of the method that handles all this (in C#):
private void analyze()
{
//When the analyze button is pressed
percentageInt = float.Parse(textBox1.Text);
float scale = 1;
if (comboBox1.SelectedItem == "Auto")
{
scale = pic.Width / pictureBox1.Width;
}
else if (comboBox1.SelectedItem == "1/2")
{
scale = 2;
}
else if (comboBox1.SelectedItem == "1/4")
{
scale = 4;
}
else if (comboBox1.SelectedItem == "Original")
{
scale = 1;
}
else
{
scale = pic.Width / pictureBox1.Width;
}
int tempWidth = 1;
int tempHeight = 1;
if (scale >= 1)
{
tempWidth = (int)Math.Floor(pic.Width / scale);
tempHeight = (int)Math.Floor(pic.Height / scale);
}
else
{
tempWidth = pic.Width;
tempHeight = pic.Height;
}
width = pic.Width;
height = pic.Height;
edgeData = new Boolean[pic.Width, pic.Height];
img = (Bitmap)resizeImage(pic, new Size(tempWidth, tempHeight));
pic2 = new Bitmap(tempWidth, tempHeight);
Bitmap img2 = (Bitmap)pic2;
Color[] pixels = null;
BitmapData data = img.LockBits(new Rectangle(0, 0, img.Width, img.Height),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
int scaledPercent = (int)(Math.Round(percentageInt * 255));
Debug.WriteLine("percent " + scaledPercent);
unsafe
{
Debug.WriteLine("Woah there, unsafe stuff");
byte* prevLine = (byte*)data.Scan0;
byte* currLine = prevLine + data.Stride;
byte* nextLine = currLine + data.Stride;
for (int y = 1; y < img.Height - 1; y++)
{
byte* pp = prevLine + 3;
byte* cp = currLine + 3;
byte* np = nextLine + 3;
for (int x = 1; x < img.Width - 1; x++)
{
if (IsEdgeOptimized(pp, cp, np, scaledPercent))
{
edgeData[x, y] = true;
//Debug.WriteLine("x " + x + "y " + y);
//img2.SetPixel(x, y, Color.Black);
//bytes[(y * img.Width + x) * 3 + 2] = 255;
}
else
{
bytes[(y * img.Width + x) * 3] = 255;
bytes[(y * img.Width + x) * 3 + 1] = 255;
bytes[(y * img.Width + x) * 3 + 2] = 255;
//img2.SetPixel(x, y, Color.White);
}
pp += 3; cp += 3; np += 3;
}
prevLine = currLine;
currLine = nextLine;
nextLine += data.Stride;
}
}
System.Runtime.InteropServices.Marshal.Copy(bytes, 0, data.Scan0, size);
img.UnlockBits(data);
pictureBox2.Image = img;
} // end analyze
So what is causing the problem, and how can I fix it? If you need more details, feel free to comment.
You're initializing your bytes buffer with stride x height bytes:
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
But then using the width (instead of stride) when you write to it:
bytes[(y * img.Width + x) * 3] = 255;