I'm having a problem with writing to files using lock bits. I'm working on an edge detection software which has a strange distortion effect with most images. I've tried to isolate the problem, and it seems very random. It is not associated with format, but rather the only images that seem to work are pictures made for desktop wallpapers, and I don't really know why. I only switched to writing to files using lockbits recently, so I am sure the problem is with that (there were no problems when I was reading with lockbits and writing with set pixel). Here's a screenshot of the effect:
As you can see, the edge detection works, but the image is distorted horizontally, making the image into a parallelogram.
Here's a code snippet of the method that handles all this (in C#):
private void analyze()
{
//When the analyze button is pressed
percentageInt = float.Parse(textBox1.Text);
float scale = 1;
if (comboBox1.SelectedItem == "Auto")
{
scale = pic.Width / pictureBox1.Width;
}
else if (comboBox1.SelectedItem == "1/2")
{
scale = 2;
}
else if (comboBox1.SelectedItem == "1/4")
{
scale = 4;
}
else if (comboBox1.SelectedItem == "Original")
{
scale = 1;
}
else
{
scale = pic.Width / pictureBox1.Width;
}
int tempWidth = 1;
int tempHeight = 1;
if (scale >= 1)
{
tempWidth = (int)Math.Floor(pic.Width / scale);
tempHeight = (int)Math.Floor(pic.Height / scale);
}
else
{
tempWidth = pic.Width;
tempHeight = pic.Height;
}
width = pic.Width;
height = pic.Height;
edgeData = new Boolean[pic.Width, pic.Height];
img = (Bitmap)resizeImage(pic, new Size(tempWidth, tempHeight));
pic2 = new Bitmap(tempWidth, tempHeight);
Bitmap img2 = (Bitmap)pic2;
Color[] pixels = null;
BitmapData data = img.LockBits(new Rectangle(0, 0, img.Width, img.Height),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
int scaledPercent = (int)(Math.Round(percentageInt * 255));
Debug.WriteLine("percent " + scaledPercent);
unsafe
{
Debug.WriteLine("Woah there, unsafe stuff");
byte* prevLine = (byte*)data.Scan0;
byte* currLine = prevLine + data.Stride;
byte* nextLine = currLine + data.Stride;
for (int y = 1; y < img.Height - 1; y++)
{
byte* pp = prevLine + 3;
byte* cp = currLine + 3;
byte* np = nextLine + 3;
for (int x = 1; x < img.Width - 1; x++)
{
if (IsEdgeOptimized(pp, cp, np, scaledPercent))
{
edgeData[x, y] = true;
//Debug.WriteLine("x " + x + "y " + y);
//img2.SetPixel(x, y, Color.Black);
//bytes[(y * img.Width + x) * 3 + 2] = 255;
}
else
{
bytes[(y * img.Width + x) * 3] = 255;
bytes[(y * img.Width + x) * 3 + 1] = 255;
bytes[(y * img.Width + x) * 3 + 2] = 255;
//img2.SetPixel(x, y, Color.White);
}
pp += 3; cp += 3; np += 3;
}
prevLine = currLine;
currLine = nextLine;
nextLine += data.Stride;
}
}
System.Runtime.InteropServices.Marshal.Copy(bytes, 0, data.Scan0, size);
img.UnlockBits(data);
pictureBox2.Image = img;
} // end analyze
So what is causing the problem, and how can I fix it? If you need more details, feel free to comment.
You're initializing your bytes buffer with stride x height bytes:
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
But then using the width (instead of stride) when you write to it:
bytes[(y * img.Width + x) * 3] = 255;
Related
I'm currently in the development phase of a photoconverter program and in the process of developing a blur filter. At the initial stages of prototyping this feature, i devised a algorithm in which I had an accumulator for each color channel and add all the pixels in a radius of the target pixel. Afterwards the program would divide the accum by the amount of pixels read(not counting those offscreen). At first I thought this would be fine but when it started to work, I had the problem of this filter taking an hour to render with this being the result at the lowest setting. So I opted to utilize parallel processing in C# to make this process much easier and faster to run. With the boost of speed came the cost of the image becoming very glitched out. Here's the image before, and Here's the image afterwards
This is the code I wrote for the filter
public static DirectBitmap NewBlur (DirectBitmap image, int radius)
{
int sectorDiam = 128;
DirectBitmap newimage = image;
List<Rectangle> renderSectors = new List<Rectangle>();
Rectangle rect;
for (int x = 0; x < (image.Width / sectorDiam); x++)
{
int xwidth = sectorDiam;
for (int y = 0; y < (image.Height / sectorDiam); y++)
{
int yheight = sectorDiam;
rect = new Rectangle(x * sectorDiam, y * sectorDiam, xwidth, yheight);
renderSectors.Add(rect);
}
}
var Picrect = new Rectangle(0, 0, image.Width, image.Height);
var data = image.Bitmap.LockBits(Picrect, ImageLockMode.ReadWrite, image.Bitmap.PixelFormat);
var depth = Bitmap.GetPixelFormatSize(data.PixelFormat) / 8; //bytes per pixel
var buffer = new byte[data.Width * data.Height * depth];
Marshal.Copy(data.Scan0, buffer, 0, buffer.Length);
Parallel.ForEach(renderSectors, sector =>
{
BlurSection(buffer, sector, Picrect, radius, image.Width, image.Height, depth);
}
);
Marshal.Copy(buffer, 0, data.Scan0, buffer.Length);
image.Bitmap.UnlockBits(data);
return image;
}
And here's the method for each section of the image to be blurred.
public static void BlurSection(byte[] buffer, Rectangle blurSector, Rectangle bitmaprect, int radius, int width, int height, int depth)
{
int[] Accum = new int[4];
for (int x = blurSector.X; x < blurSector.Width+ blurSector.X; x++)
{
for (int y = blurSector.Y; y < blurSector.Height + blurSector.Y; y++)
{
Accum[0] = 0;
Accum[1] = 0;
Accum[2] = 0;
Accum[3] = 0;
for (int i = -radius; i <= radius; i++)
{
for (int j = -radius; j <= radius; j++)
{
var offset = 0;
offset = (((y+j) * width) + (x+i)) * depth;
if (bitmaprect.Contains(new Point(x + i, y + j))){
Accum[0] += buffer[offset + 0];
Accum[1] += buffer[offset + 1];
Accum[2] += buffer[offset + 2];
Accum[3]++;
}
}
}
Accum[0] = Accum[0] / Accum[3];
if (Accum[0] > 255)
{
Accum[0] = 255;
}
Accum[1] = Accum[1] / Accum[3];
if (Accum[1] > 255)
{
Accum[1] = 255;
}
Accum[2] = Accum[2] / Accum[3];
if (Accum[2] > 255)
{
Accum[2] = 255;
}
var newoffset = ((y * width) + (x * depth*2));
buffer[newoffset + 0] = (byte)Accum[0];
buffer[newoffset + 1] = (byte)Accum[1];
buffer[newoffset + 2] = (byte)Accum[2];
}
}
}
It's also worth noting that I'm using a Bitmap class to make access to pixel data much easier, the "DirectBitmap" you can find here: https://stackoverflow.com/a/34801225/15473435. Is there anything that I'm missing or not aware of that's causing this algorithm not to function?
I have this part of code which converts a bitmap with 32bppArgb pixel format to an 1d byte[] array:
using (var bitmap = new Bitmap(width, height, PixelFormat.Format32bppArgb))
{
var boundsRect = new Rectangle(0, 0, width, height);
// Copy pixels from screen capture Texture to GDI bitmap
var mapDest = bitmap.LockBits(boundsRect, ImageLockMode.ReadOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destPtr = mapDest.Scan0;
for (int y = 0; y < height; y++)
{
// Copy a single line
Utilities.CopyMemory(destPtr, sourcePtr, width * 4);
// Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destPtr = IntPtr.Add(destPtr, mapDest.Stride);
}
// Release source and dest locks
bitmap.UnlockBits(mapDest);
device.ImmediateContext.UnmapSubresource(screenTexture, 0);
using (var ms = new MemoryStream())
{
bitmap.Save(ms, ImageFormat.Bmp);
ScreenRefreshed?.Invoke(this, ms.ToArray());
_init = true;
}
}
I call my function ReplacePixels() to read and replace rgba values like this:
data = ReplacePixels(data);
data is byte[] array received from code above.
The example function which i use but without success:
private byte[] ReplacePixels (byte[] data)
{
int width = Screen.PrimaryScreen.Bounds.Width;
int height = Screen.PrimaryScreen.Bounds.Height;
Int32 curRowOffs = 0;
Int32 stride = 4 * (width * 4 + 31) / 32;
try
{
for (uint y = 0; y < height; y++)
{
// Set offset to start of current row
Int32 curOffs = curRowOffs;
for (uint x = 0; x < width; x++)
{
// ARGB = bytes [B,G,R,A]
var b = data[curOffs];
var g = data[curOffs + 1];
var r = data[curOffs + 2];
var a = data[curOffs + 3];
//bgra changes here..
//apply bgra values
data[offset] = Convert.ToByte(b);
data[offset + 1] = Convert.ToByte(g);
data[offset + 2] = Convert.ToByte(r);
data[offset + 3] = Convert.ToByte(a);
// Increase offset to next colour
curOffs += 4;
}
// Increase row offset
curRowOffs += stride;
}
}
catch (System.Exception e)
{
Debug.WriteLine(e);
}
return data;
}
The question is: how can i read and replace the argb values from this array?
Edit: this is the solution that i found
public byte[] ReplacePixels(byte[] data)
{
int width = Screen.PrimaryScreen.Bounds.Width;
int height = Screen.PrimaryScreen.Bounds.Height;
Int32 stride = width * 4;
Int32 curRowOffs = (((width * height * 4) + 54) - 1) - stride;
for (uint y = 0; y < height; y++)
{
uint index = (uint)curRowOffs;
for (uint x = 0; x < width; x++)
{
// ARGB = bytes [B,G,R,A]
if (index >= 0)
{
//var rgba = GetRGB(data, index);
var b = data[index];
var g = data[index + 1];
var r = data[index + 2];
var a = data[index + 3];
//bgra changes here...
data[index] = b;
data[index + 1] = g;
data[index + 2] = r;
data[index + 3] = a;
}
index += 4;
}
curRowOffs -= stride;
}
return data;
}
I'm trying to implement a webcam capture app which should take still frames, display them on the screen and save to the disk.
Since I'm using SharpDX already to capture the screen, I thought it would be nice to use that library. I was not sure if SharpDX had any video capture capabilities, so I started searching and found parts of what it looks like a webcam capture prototype:
var attributes = new MediaAttributes(1);
attributes.Set<Guid>(CaptureDeviceAttributeKeys.SourceType, CaptureDeviceAttributeKeys.SourceTypeVideoCapture.Guid);
var activates = MediaFactory.EnumDeviceSources(attributes);
var dic = new Dictionary<string, Activate>();
foreach (var activate in activates)
{
var uid = activate.Get(CaptureDeviceAttributeKeys.SourceTypeVidcapSymbolicLink);
dic.Add(uid, activate);
}
var camera = dic.First().Value;
It outputs camera with a strange uid. I'm not sure if it's correct.
What I am supposed to do after this?
Edit
I got this code kind of working. I still don't understand why the output is strange.
var attributes = new MediaAttributes(1);
attributes.Set(CaptureDeviceAttributeKeys.SourceType.Guid, CaptureDeviceAttributeKeys.SourceTypeVideoCapture.Guid);
var mediaSource = MediaFactory.EnumDeviceSources(attributes)[0].ActivateObject<MediaSource>();
mediaSource.CreatePresentationDescriptor(out var presentationDescriptor);
var reader = new SourceReader(mediaSource);
var mediaTypeIndex = 0;
int width, height;
using (var mt = reader.GetNativeMediaType(0, mediaTypeIndex))
{
UnpackLong(mt.Get(MediaTypeAttributeKeys.FrameSize), out width, out height);
UnpackLong(mt.Get(MediaTypeAttributeKeys.FrameRate), out var frameRateNumerator, out var frameRateDenominator);
UnpackLong(mt.Get(MediaTypeAttributeKeys.PixelAspectRatio), out var aspectRatioNumerator, out var aspectRatioDenominator);
}
var sample = reader.ReadSample(SourceReaderIndex.AnyStream, SourceReaderControlFlags.None, out var readStreamIndex, out var readFlags, out var timestamp);
if (sample == null)
sample = reader.ReadSample(SourceReaderIndex.AnyStream, SourceReaderControlFlags.None, out readStreamIndex, out readFlags, out timestamp);
var sourceBuffer = sample.GetBufferByIndex(0); // sample.ConvertToContiguousBuffer();
var sourcePointer = sourceBuffer.Lock(out var maxLength, out var currentLength);
var data = new byte[sample.TotalLength];
Marshal.Copy(sourcePointer, data, 0, sample.TotalLength);
var newData = new byte[width * 4 * height];
var partWidth = width / 4;
var partHeight = height / 3;
for (var i = 0; i < sample.TotalLength; i += 4)
{
//X8R8B8G8 -> BGRA = 4
newData[i] = data[i + 3];
newData[i + 1] = data[i + 2];
newData[i + 2] = data[i + 1];
newData[i + 3] = 255; //data[i];
}
//var source = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgra32, null, data, ((width * 24 + 31) / 32) * 4);
var source = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgra32, null, newData, width * 4);
sourceBuffer.Unlock();
sourceBuffer.Dispose();
The output image is this (I was showing a color spectrum to my webcam):
The image is repeating 4 times, each part has a grayscale image and a color version with half the height.
Two thirds of the image is transparent.
your output is NV12, here's some sample code to convert nv12 to rgb
unsafe private static void TransformImage_NV12(IntPtr pDest, int lDestStride, IntPtr pSrc, int lSrcStride, int dwWidthInPixels, int dwHeightInPixels)
{
uint imageWidth = (uint)dwWidthInPixels;
uint widthHalf = imageWidth / 2;
uint imageHeight = (uint)dwHeightInPixels;
byte* nv12Data = (byte*)pSrc;
byte* rgbData = (byte*)pDest;
uint dataSize = imageWidth * imageHeight * 3;
for (uint y = 0; y < imageHeight; y++)
{
for (uint x = 0; x < imageWidth; x++)
{
uint xEven = x & 0xFFFFFFFE;
uint yEven = y & 0xFFFFFFFE;
uint yIndex = y * imageWidth + x;
uint cIndex = imageWidth * imageHeight + yEven * widthHalf + xEven;
byte yy = nv12Data[yIndex];
byte cr = nv12Data[cIndex + 0];
byte cb = nv12Data[cIndex + 1];
uint outputIndex = (dataSize - (y * imageWidth + x) * 3) - 3;
rgbData[outputIndex + 0] = (byte)Math.Min(Math.Max((yy + 1.402 * (cr - 128)), 0), 255);
rgbData[outputIndex + 1] = (byte)Math.Min(Math.Max((yy - 0.344 * (cb - 128) - 0.714 * (cr - 128)), 0), 255);
rgbData[outputIndex + 2] = (byte)Math.Min(Math.Max((yy + 1.772 * (cb - 128)), 0), 255);
}
}
}
I'm working on a simple drawing application where I can 'paint' on top of an existing image. I've made a little headway, but I've noticed a weird issue with the alpha channel of the bitmap I'm displaying that doesn't seem quite right. My draw brush function looks like this:
public unsafe void BitmapDrawBrush(double _x, double _y, double _radius, double _falloff, double _strength)
{
if (DisplayBmp == null)
{
DisplayBmp = new Bitmap(ImageWidth, ImageHeight, PixelFormat.Format32bppArgb);
}
const int pixelSize = 4; // 32 bits per pixel
Bitmap target = new Bitmap(DisplayBmp.Width, DisplayBmp.Height, PixelFormat.Format32bppArgb);
BitmapData sourceData = null, targetData = null;
try
{
sourceData = DisplayBmp.LockBits(new Rectangle(0, 0, DisplayBmp.Width, DisplayBmp.Height),ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
targetData = target.LockBits(new Rectangle(0, 0, target.Width, target.Height),ImageLockMode.WriteOnly, PixelFormat.Format32bppArgb);
for (int y = 0; y < DisplayBmp.Height; ++y)
{
byte* sourceRow = (byte*)sourceData.Scan0 + (y * sourceData.Stride);
byte* targetRow = (byte*)targetData.Scan0 + (y * targetData.Stride);
for (int x = 0; x < DisplayBmp.Width; ++x)
{
byte b = sourceRow[x * pixelSize + 0];
byte g = sourceRow[x * pixelSize + 1];
byte r = sourceRow[x * pixelSize + 2];
byte a = sourceRow[x * pixelSize + 3];
double nx = x / (double)ImageWidth;
double ny = y/(double)ImageHeight;
double xDist = nx - _x;
double yDist = ny - _y;
if ((xDist * xDist) + (yDist * yDist) <= (_radius * _radius))
{
double pxDist = 1.0 - (((xDist * xDist) + (yDist * yDist)) / (_radius * _radius));
r = (byte)(255 * pxDist);
g = (byte)(255 * pxDist);
b = (byte)(255 * pxDist);
a = (byte)(255 * pxDist * _strength); // <-the alpha channel value
}
targetRow[x * pixelSize + 0] = b;
targetRow[x * pixelSize + 1] = g;
targetRow[x * pixelSize + 2] = r;
targetRow[x * pixelSize + 3] = a;
}
}
}
finally
{
if (sourceData != null)
DisplayBmp.UnlockBits(sourceData);
if (targetData != null)
target.UnlockBits(targetData);
}
DisplayBmp = target;
UpdateBitmap();
}
Yet, when I run my application and place a few brush strokes (actually the draw brush function is only called on mouse down at the moment so I'm only drawing dots really), you see that the alpha channel doesn't seem to be fading toward the edge. What I would expect is that as the 'dot' gets more black, so too does the alpha channel diminish. See below. Any ideas as to why this is happening?
I'm working on a screen sharing app, which runs a loop and grab fast screenshots using GDI methods . example here
Of course I also use a flood fill algorithm to find the changes areas between 2 images (previous screenshot and current).
I use another small trick - I downscale the snapshot resolution in 10, because processing 1920*1080=2073600 pixels very constantly is not very efficient.
However when I find the rectangle bounds - I apply it on the original full size bitmap and I just multiply by 10 the dimension (including top, left, width, height).
This is the scanning code:
unsafe bool ArePixelsEqual(byte* p1, byte* p2, int bytesPerPixel)
{
for (int i = 0; i < bytesPerPixel; ++i)
if (p1[i] != p2[i])
return false;
return true;
}
private unsafe List<Rectangle> CodeImage(Bitmap bmp, Bitmap bmp2)
{
List<Rectangle> rec = new List<Rectangle>();
var bmData1 = bmp.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
var bmData2 = bmp2.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp2.PixelFormat);
int bytesPerPixel = 4;
IntPtr scan01 = bmData1.Scan0;
IntPtr scan02 = bmData2.Scan0;
int stride1 = bmData1.Stride;
int stride2 = bmData2.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
bool[] visited = new bool[nWidth * nHeight];
byte* base1 = (byte*)scan01.ToPointer();
byte* base2 = (byte*)scan02.ToPointer();
for (int y = 0; y < nHeight; y ++)
{
byte* p1 = base1;
byte* p2 = base2;
for (int x = 0; x < nWidth; ++x)
{
if (!ArePixelsEqual(p1, p2, bytesPerPixel) && !(visited[x + nWidth * y]))
{
// fill the different area
int minX = x;
int maxX = x;
int minY = y;
int maxY = y;
var pt = new Point(x, y);
Stack<Point> toBeProcessed = new Stack<Point>();
visited[x + nWidth * y] = true;
toBeProcessed.Push(pt);
while (toBeProcessed.Count > 0)
{
var process = toBeProcessed.Pop();
var ptr1 = (byte*)scan01.ToPointer() + process.Y * stride1 + process.X * bytesPerPixel;
var ptr2 = (byte*)scan02.ToPointer() + process.Y * stride2 + process.X * bytesPerPixel;
//Check pixel equality
if (ArePixelsEqual(ptr1, ptr2, bytesPerPixel))
continue;
//This pixel is different
//Update the rectangle
if (process.X < minX) minX = process.X;
if (process.X > maxX) maxX = process.X;
if (process.Y < minY) minY = process.Y;
if (process.Y > maxY) maxY = process.Y;
Point n; int idx;
//Put neighbors in stack
if (process.X - 1 >= 0)
{
n = new Point(process.X - 1, process.Y); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.X + 1 < nWidth)
{
n = new Point(process.X + 1, process.Y); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.Y - 1 >= 0)
{
n = new Point(process.X, process.Y - 1); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.Y + 1 < nHeight)
{
n = new Point(process.X, process.Y + 1); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
}
//finaly set a rectangle.
Rectangle r = new Rectangle(minX * 10, minY * 10, (maxX - minX + 1) * 10, (maxY - minY + 1) * 10);
rec.Add(r);
//got the rectangle now i'll do whatever i want with that.
//notify i scaled everything by x10 becuse i want to apply the changes on the originl 1920x1080 image.
}
p1 += bytesPerPixel;
p2 += bytesPerPixel;
}
base1 += stride1;
base2 += stride2;
}
bmp.UnlockBits(bmData1);
bmp2.UnlockBits(bmData2);
return rec;
}
This is my call:
private void Start()
{
full1 = GetDesktopImage();//the first,intial screen.
while (true)
{
full2 = GetDesktopImage();
a = new Bitmap(full1, 192, 108);//resizing for faster processing the images.
b = new Bitmap(full2, 192, 108); // resizing for faster processing the images.
CodeImage(a, b);
count++; // counter for the performance.
full1 = full2; // assign old to current bitmap.
}
}
However, after all the tricks and techniques I used, the algorithm runs quite slow... on my machine - Intel i5 4670k 3.4ghz - it runs only 20 times (at the maximum! It might get lower)! It maybe sounds fast (don't forget I have to send each changed area over the network after), but I'm looking to achieve more processed image per second. I think the main bottleneck is in the resizing of the 2 images - but I just thought it would be even faster after resizing - because it would have to loop through less pixels... 192*108=200,000 only..
I would appreciate any help, any improvement. Thanks.