The following routine is to sharpen an 8 bit indexed grayscale only.
This code seems to have no effect on the input image. That is, what is going in, the same is coming out.
If I increase the value of strength the image seems to be getting darker, but, never filtered.
What could be possibly going wrong?
I am using the following kernel,
double[,] _numericalKernel = new double[,]
{ { -1, -1, -1, },
{ -1, 9, -1, },
{ -1, -1, -1, }, };
The following is my source code for sharpening,
public static Bitmap NonfftSharpen(Bitmap image, double[,] mask, double strength)
{
Bitmap bitmap = (Bitmap)image.Clone();
if (bitmap != null)
{
int width = bitmap.Width;
int height = bitmap.Height;
if (mask.GetLength(0) != mask.GetLength(1))
{
throw new Exception("_numericalKernel dimensions must be same");
}
// Create sharpening filter.
int filterSize = mask.GetLength(0);
double[,] filter = (double[,])mask.Clone();
int channels = sizeof(byte);
double bias = 1.0 - strength;
double factor = strength / 16.0;
int halfOfFilerSize = filterSize / 2;
byte[,] result = new byte[bitmap.Width, bitmap.Height];
// Lock image bits for read/write.
BitmapData bitmapData = bitmap.LockBits(new Rectangle(0, 0, width, height),
ImageLockMode.ReadWrite,
PixelFormat.Format8bppIndexed);
// Declare an array to hold the bytes of the bitmap.
int memorySize = bitmapData.Stride * height;
byte[] memory = new byte[memorySize];
// Copy the RGB values into the local array.
Marshal.Copy(bitmapData.Scan0, memory, 0, memorySize);
int rgb;
// Fill the color array with the new sharpened color values.
for (int y = halfOfFilerSize; y < height - halfOfFilerSize; y++)
{
for (int x = halfOfFilerSize; x < width - halfOfFilerSize; x++)
{
for (int filterY = 0; filterY < filterSize; filterY++)
{
double grayShade = 0.0;
for (int filterX = 0; filterX < filterSize; filterX++)
{
int imageX = (x - halfOfFilerSize + filterX + width) % width;
int imageY = (y - halfOfFilerSize + filterY + height) % height;
rgb = imageY * bitmapData.Stride + channels * imageX;
grayShade += memory[rgb + 0] * filter[filterX, filterY];
}
rgb = y * bitmapData.Stride + channels * x;
int b = Math.Min(Math.Max((int)(factor * grayShade + (bias * memory[rgb + 0])), 0), 255);
result[x, y] = (byte)b;
}
}
}
// Update the image with the sharpened pixels.
for (int x = halfOfFilerSize; x < width - halfOfFilerSize; x++)
{
for (int y = halfOfFilerSize; y < height - halfOfFilerSize; y++)
{
rgb = y * bitmapData.Stride + channels * x;
memory[rgb + 0] = result[x, y];
}
}
// Copy the RGB values back to the bitmap.
Marshal.Copy(memory, 0, bitmapData.Scan0, memorySize);
// Release image bits.
bitmap.UnlockBits(bitmapData);
return bitmap;
}
else
{
throw new Exception("input image can't be null");
}
}
I've changed your function a bit to make it work.
Take care that the strength parameter has no effect in my function. You can play with the bias and factor values to get different results in brightness and so on.
public static Bitmap NonfftSharpen(Bitmap image, double[,] mask, double strength)
{
Bitmap bitmap = (Bitmap)image.Clone();
if (bitmap != null)
{
int width = bitmap.Width;
int height = bitmap.Height;
if (mask.GetLength(0) != mask.GetLength(1))
{
throw new Exception("_numericalKernel dimensions must be same");
}
// Create sharpening filter.
int filterSize = mask.GetLength(0);
double[,] filter = (double[,])mask.Clone();
int channels = sizeof(byte);
double bias = 0.0; // 1.0 - strength;
double factor = 1.0; // strength / 16.0;
byte[,] result = new byte[bitmap.Width, bitmap.Height];
// Lock image bits for read/write.
BitmapData bitmapData = bitmap.LockBits(new System.Drawing.Rectangle(0, 0, width, height),
ImageLockMode.ReadWrite,
System.Drawing.Imaging.PixelFormat.Format8bppIndexed);
// Declare an array to hold the bytes of the bitmap.
int memorySize = bitmapData.Stride * height;
byte[] memory = new byte[memorySize];
// Copy the RGB values into the local array.
Marshal.Copy(bitmapData.Scan0, memory, 0, memorySize);
int pixel;
// Fill the color array with the new sharpened color values.
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
double grayShade = 0.0;
for (int filterY = 0; filterY < filterSize; filterY++)
{
for (int filterX = 0; filterX < filterSize; filterX++)
{
int imageX = (x - filterSize / 2 + filterX + width) % width;
int imageY = (y - filterSize / 2 + filterY + height) % height;
pixel = imageY * bitmapData.Stride + channels * imageX;
grayShade += memory[pixel] * filter[filterX, filterY];
}
int newPixel = Math.Min(Math.Max((int)(factor * grayShade + bias), 0), 255);
result[x, y] = (byte)newPixel;
}
}
}
// Update the image with the sharpened pixels.
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
pixel = y * bitmapData.Stride + channels * x;
memory[pixel] = result[x, y];
}
}
// Copy the values back to the bitmap.
Marshal.Copy(memory, 0, bitmapData.Scan0, memorySize);
// Release image bits.
bitmap.UnlockBits(bitmapData);
return bitmap;
}
else
{
throw new Exception("input image can't be null");
}
}
I hope this gets you going :)
Regards
Related
I am trying to convert some Java code into C# for an application. I can't seem to find the exact equivalent of setPixels method in android.Graphics.Bitmap in C# .NET.
Here is the Java code:
BitMatrix result = some code .....
int w = result.getWidth();
int h = result.getHeight();
int[] pixels = new int[w * h];
for (int y = 0; y < h; y++) {
int offset = y * w;
for (int x = 0; x < w; x++) {
pixels[offset + x] = result.get(x, y) ? BLACK : WHITE;
}
}
Bitmap bitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
bitmap.setPixels(pixels, 0, 480, 0, 0, w, h);
Here is my attempt at converting it into C#:
int w = result.Width;
int h = result.Height;
int[] pixels = new int[w * h];
for (int y = 0; y < h; y++)
{
int offset = y * w;
for (int x = 0; x < w; x++)
{
pixels[offset + x] = result[x, y] ? Color.Black.ToArgb() : Color.White.ToArgb();
}
}
Bitmap bitmap = new Bitmap(w, h);
// how to convert the line below
bitmap.setPixels(pixels, 0, 480, 0, 0, w, h);
How should I go about converting the last line into C#. Any suggestions?
The simplest solution is to replace
pixels[offset + x] = result.get(x, y) ? BLACK : WHITE;
with
bitmap.SetPixel(x, y, result[x, y] ? Color.Black : Color.White);
This will be rather slow, since it will take some locks for each pixel set. You will also need to create the bitmap first.
A faster way would be to use unsafe code to get a pointer to the bitmap data. See fast work with bitmaps. Assuming an Argb color bitmap
var bData = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadWrite, bitmap.PixelFormat);
byte* scan0 = (int*)bData.Scan0.ToPointer();
for (int y = 0; y < bData.Height; ++y)
{
var rowPtr = scan0 + y * bData.Stride * 4;
for (int x = 0; x < bData.Width; ++x)
{
rowPtr[x] = result[x, y] ? Color.Black.ToArgb() : Color.White.ToArgb();
}
}
I'm currently in the development phase of a photoconverter program and in the process of developing a blur filter. At the initial stages of prototyping this feature, i devised a algorithm in which I had an accumulator for each color channel and add all the pixels in a radius of the target pixel. Afterwards the program would divide the accum by the amount of pixels read(not counting those offscreen). At first I thought this would be fine but when it started to work, I had the problem of this filter taking an hour to render with this being the result at the lowest setting. So I opted to utilize parallel processing in C# to make this process much easier and faster to run. With the boost of speed came the cost of the image becoming very glitched out. Here's the image before, and Here's the image afterwards
This is the code I wrote for the filter
public static DirectBitmap NewBlur (DirectBitmap image, int radius)
{
int sectorDiam = 128;
DirectBitmap newimage = image;
List<Rectangle> renderSectors = new List<Rectangle>();
Rectangle rect;
for (int x = 0; x < (image.Width / sectorDiam); x++)
{
int xwidth = sectorDiam;
for (int y = 0; y < (image.Height / sectorDiam); y++)
{
int yheight = sectorDiam;
rect = new Rectangle(x * sectorDiam, y * sectorDiam, xwidth, yheight);
renderSectors.Add(rect);
}
}
var Picrect = new Rectangle(0, 0, image.Width, image.Height);
var data = image.Bitmap.LockBits(Picrect, ImageLockMode.ReadWrite, image.Bitmap.PixelFormat);
var depth = Bitmap.GetPixelFormatSize(data.PixelFormat) / 8; //bytes per pixel
var buffer = new byte[data.Width * data.Height * depth];
Marshal.Copy(data.Scan0, buffer, 0, buffer.Length);
Parallel.ForEach(renderSectors, sector =>
{
BlurSection(buffer, sector, Picrect, radius, image.Width, image.Height, depth);
}
);
Marshal.Copy(buffer, 0, data.Scan0, buffer.Length);
image.Bitmap.UnlockBits(data);
return image;
}
And here's the method for each section of the image to be blurred.
public static void BlurSection(byte[] buffer, Rectangle blurSector, Rectangle bitmaprect, int radius, int width, int height, int depth)
{
int[] Accum = new int[4];
for (int x = blurSector.X; x < blurSector.Width+ blurSector.X; x++)
{
for (int y = blurSector.Y; y < blurSector.Height + blurSector.Y; y++)
{
Accum[0] = 0;
Accum[1] = 0;
Accum[2] = 0;
Accum[3] = 0;
for (int i = -radius; i <= radius; i++)
{
for (int j = -radius; j <= radius; j++)
{
var offset = 0;
offset = (((y+j) * width) + (x+i)) * depth;
if (bitmaprect.Contains(new Point(x + i, y + j))){
Accum[0] += buffer[offset + 0];
Accum[1] += buffer[offset + 1];
Accum[2] += buffer[offset + 2];
Accum[3]++;
}
}
}
Accum[0] = Accum[0] / Accum[3];
if (Accum[0] > 255)
{
Accum[0] = 255;
}
Accum[1] = Accum[1] / Accum[3];
if (Accum[1] > 255)
{
Accum[1] = 255;
}
Accum[2] = Accum[2] / Accum[3];
if (Accum[2] > 255)
{
Accum[2] = 255;
}
var newoffset = ((y * width) + (x * depth*2));
buffer[newoffset + 0] = (byte)Accum[0];
buffer[newoffset + 1] = (byte)Accum[1];
buffer[newoffset + 2] = (byte)Accum[2];
}
}
}
It's also worth noting that I'm using a Bitmap class to make access to pixel data much easier, the "DirectBitmap" you can find here: https://stackoverflow.com/a/34801225/15473435. Is there anything that I'm missing or not aware of that's causing this algorithm not to function?
I am trying to get color from specific area in an Image.
Assume that , this is image , and I want to get color inside image.(the result should be red of the above image) This color may be different position in image. Because I don't know exact position of color where it starting, so I can't get exact result.
Until now, I cropped image giving manually position of x and y, and then cropped image and I got average color of cropped image. But I know , this is not exact color.
What I tried :
private RgbDto GetRGBvalueCroppedImage(Image croppedImage)
{
var avgRgb = new RgbDto();
var bm = new Bitmap(croppedImage);
BitmapData srcData = bm.LockBits(
new Rectangle(0, 0, bm.Width, bm.Height),
ImageLockMode.ReadOnly,
PixelFormat.Format32bppArgb);
int stride = srcData.Stride;
IntPtr Scan0 = srcData.Scan0;
long[] totals = new long[] { 0, 0, 0 };
int width = bm.Width;
int height = bm.Height;
unsafe
{
byte* p = (byte*)(void*)Scan0;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
for (int color = 0; color < 3; color++)
{
int idx = (y * stride) + x * 4 + color;
totals[color] += p[idx];
}
}
}
}
avgRgb.avgB = (int)totals[0] / (width * height);
avgRgb.avgG = (int)totals[1] / (width * height);
avgRgb.avgR = (int)totals[2] / (width * height);
return avgRgb;
}
How can I get exact position to crop? May be I can convert image to byte array, then I can find different color and take position of it and then crop. But I have no clue how do this.
You can use something this extension method to get dominant color in a region of an image in case they are not all the same
public static Color GetDominantColor(this Bitmap bitmap, int startX, int startY, int width, int height) {
var maxWidth = bitmap.Width;
var maxHeight = bitmap.Height;
//TODO: validate the region being requested
//Used for tally
int r = 0;
int g = 0;
int b = 0;
int totalPixels = 0;
for (int x = startX; x < (startX + width); x++) {
for (int y = startY; y < (startY + height); y++) {
Color c = bitmap.GetPixel(x, y);
r += Convert.ToInt32(c.R);
g += Convert.ToInt32(c.G);
b += Convert.ToInt32(c.B);
totalPixels++;
}
}
r /= totalPixels;
g /= totalPixels;
b /= totalPixels;
Color color = Color.FromArgb(255, (byte)r, (byte)g, (byte)b);
return color;
}
You can then use it like
Color pixelColor = myBitmap.GetDominantColor(xPixel, yPixel, 5, 5);
there is room for improvement, like using a Point and Size, or even a Rectangle
public static Color GetDominantColor(this Bitmap bitmap, Rectangle area) {
return bitmap.GetDominantColor(area.X, area.Y, area.Width, area.Height);
}
and following this link:
https://www.c-sharpcorner.com/UploadFile/0f68f2/color-detecting-in-an-image-in-C-Sharp/
If you want to get the image colors, you don't need to do any cropping at all. Just loop on image pixels and find the two different colors. (Assuming that you already know the image will have exactly 2 colors, as you said in comments). I've written a small function that will do that. However, I didn't test it in an IDE, so expect some small mistakes:
private static Color[] GetColors(Image image)
{
var bmp = new Bitmap(image);
var colors = new Color[2];
colors[0] = bmp.GetPixel(0, 0);
for (int i = 0; i < bmp.Width; i++)
{
for (int j = 0; j < bmp.Height; j++)
{
Color c = bmp.GetPixel(i, j);
if (c == colors[0]) continue;
colors[1] = c;
return colors;
}
}
return colors;
}
I'm using the following code to remove whitespace around an image.
static Bitmap TrimBitmap(Bitmap source)
{
Rectangle srcRect = default(Rectangle);
BitmapData data = null;
try
{
data = source.LockBits(new Rectangle(0, 0, source.Width, source.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
byte[] buffer = new byte[data.Height * data.Stride];
Marshal.Copy(data.Scan0, buffer, 0, buffer.Length);
int xMin = int.MaxValue,
xMax = int.MinValue,
yMin = int.MaxValue,
yMax = int.MinValue;
bool foundPixel = false;
// Find xMin
for (int x = 0; x < data.Width; x++)
{
bool stop = false;
for (int y = 0; y < data.Height; y++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
xMin = x;
stop = true;
foundPixel = true;
break;
}
}
if (stop)
break;
}
// Image is empty...
if (!foundPixel)
return null;
// Find yMin
for (int y = 0; y < data.Height; y++)
{
bool stop = false;
for (int x = xMin; x < data.Width; x++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
yMin = y;
stop = true;
break;
}
}
if (stop)
break;
}
// Find xMax
for (int x = data.Width - 1; x >= xMin; x--)
{
bool stop = false;
for (int y = yMin; y < data.Height; y++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
xMax = x;
stop = true;
break;
}
}
if (stop)
break;
}
// Find yMax
for (int y = data.Height - 1; y >= yMin; y--)
{
bool stop = false;
for (int x = xMin; x <= xMax; x++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
yMax = y;
stop = true;
break;
}
}
if (stop)
break;
}
srcRect = Rectangle.FromLTRB(xMin, yMin, xMax , yMax);
}
finally
{
if (data != null)
source.UnlockBits(data);
}
Bitmap dest = new Bitmap(srcRect.Width, srcRect.Height);
Rectangle destRect = new Rectangle(0, 0, srcRect.Width, srcRect.Height);
using (Graphics graphics = Graphics.FromImage(dest))
{
graphics.DrawImage(source, destRect, srcRect, GraphicsUnit.Pixel);
}
return dest;
}
I'm trying to trim a Bitmap with Text Drawn on it.The Proper Image should look like this after trimming
But after trimming i get the following result ..the bottom portion clipped off
What i'm i doing wrong? Please advice..
This is actually a problem with Rectangle.FromLTRB !
Looking closer at the images you will find that you have actually lost only one row of pixels. (The strong magnification had me fooled for a while..)
The algorithm to determine the Height (and Width) of the rectangle is basically right, but off by one.
If you use this
srcRect = Rectangle.FromLTRB(xMin, yMin, xMax + 1 , yMax + 1);
or this:
srcRect = new Rectangle(xMin, yMin, xMax - xMin + 1 , yMax - yMin + 1);
it should work.
You can test with pen and paper: Say: 1st pixel row with a color = 4, first from bottom on a 10 pixel square: 8, that makes 5 not 4 net data: 4,5,6,7,8.
Do note that this issue is inherent in FromLTRB:
Rectangle myRectangle = Rectangle.FromLTRB(0, 0, 10, 10);
..results in a Rectangle with Height=10 even though 0..10 should cover 11 pixels rows! So the Right-Bottom coordinate is actually excluded from the result!!
I think the whole issue with rectangles being off by one stems from legacy ways to use a Pen with its alignment. When using the same rectangles to fill with a Brush all works as expected.
I'm working on a screen sharing app, which runs a loop and grab fast screenshots using GDI methods . example here
Of course I also use a flood fill algorithm to find the changes areas between 2 images (previous screenshot and current).
I use another small trick - I downscale the snapshot resolution in 10, because processing 1920*1080=2073600 pixels very constantly is not very efficient.
However when I find the rectangle bounds - I apply it on the original full size bitmap and I just multiply by 10 the dimension (including top, left, width, height).
This is the scanning code:
unsafe bool ArePixelsEqual(byte* p1, byte* p2, int bytesPerPixel)
{
for (int i = 0; i < bytesPerPixel; ++i)
if (p1[i] != p2[i])
return false;
return true;
}
private unsafe List<Rectangle> CodeImage(Bitmap bmp, Bitmap bmp2)
{
List<Rectangle> rec = new List<Rectangle>();
var bmData1 = bmp.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
var bmData2 = bmp2.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp2.PixelFormat);
int bytesPerPixel = 4;
IntPtr scan01 = bmData1.Scan0;
IntPtr scan02 = bmData2.Scan0;
int stride1 = bmData1.Stride;
int stride2 = bmData2.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
bool[] visited = new bool[nWidth * nHeight];
byte* base1 = (byte*)scan01.ToPointer();
byte* base2 = (byte*)scan02.ToPointer();
for (int y = 0; y < nHeight; y ++)
{
byte* p1 = base1;
byte* p2 = base2;
for (int x = 0; x < nWidth; ++x)
{
if (!ArePixelsEqual(p1, p2, bytesPerPixel) && !(visited[x + nWidth * y]))
{
// fill the different area
int minX = x;
int maxX = x;
int minY = y;
int maxY = y;
var pt = new Point(x, y);
Stack<Point> toBeProcessed = new Stack<Point>();
visited[x + nWidth * y] = true;
toBeProcessed.Push(pt);
while (toBeProcessed.Count > 0)
{
var process = toBeProcessed.Pop();
var ptr1 = (byte*)scan01.ToPointer() + process.Y * stride1 + process.X * bytesPerPixel;
var ptr2 = (byte*)scan02.ToPointer() + process.Y * stride2 + process.X * bytesPerPixel;
//Check pixel equality
if (ArePixelsEqual(ptr1, ptr2, bytesPerPixel))
continue;
//This pixel is different
//Update the rectangle
if (process.X < minX) minX = process.X;
if (process.X > maxX) maxX = process.X;
if (process.Y < minY) minY = process.Y;
if (process.Y > maxY) maxY = process.Y;
Point n; int idx;
//Put neighbors in stack
if (process.X - 1 >= 0)
{
n = new Point(process.X - 1, process.Y); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.X + 1 < nWidth)
{
n = new Point(process.X + 1, process.Y); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.Y - 1 >= 0)
{
n = new Point(process.X, process.Y - 1); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.Y + 1 < nHeight)
{
n = new Point(process.X, process.Y + 1); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
}
//finaly set a rectangle.
Rectangle r = new Rectangle(minX * 10, minY * 10, (maxX - minX + 1) * 10, (maxY - minY + 1) * 10);
rec.Add(r);
//got the rectangle now i'll do whatever i want with that.
//notify i scaled everything by x10 becuse i want to apply the changes on the originl 1920x1080 image.
}
p1 += bytesPerPixel;
p2 += bytesPerPixel;
}
base1 += stride1;
base2 += stride2;
}
bmp.UnlockBits(bmData1);
bmp2.UnlockBits(bmData2);
return rec;
}
This is my call:
private void Start()
{
full1 = GetDesktopImage();//the first,intial screen.
while (true)
{
full2 = GetDesktopImage();
a = new Bitmap(full1, 192, 108);//resizing for faster processing the images.
b = new Bitmap(full2, 192, 108); // resizing for faster processing the images.
CodeImage(a, b);
count++; // counter for the performance.
full1 = full2; // assign old to current bitmap.
}
}
However, after all the tricks and techniques I used, the algorithm runs quite slow... on my machine - Intel i5 4670k 3.4ghz - it runs only 20 times (at the maximum! It might get lower)! It maybe sounds fast (don't forget I have to send each changed area over the network after), but I'm looking to achieve more processed image per second. I think the main bottleneck is in the resizing of the 2 images - but I just thought it would be even faster after resizing - because it would have to loop through less pixels... 192*108=200,000 only..
I would appreciate any help, any improvement. Thanks.