Perform Dougman't Method (Rubber Sheet Model) in Emgu CV - c#

I've a problem with how to make rubber sheet model from circle in emgu cv , this is my code in c# :
// looking for iris
CircleF[] circles = cannyEdges.HoughCircles(
cannyThreshold,
circleAccumulatorThreshold,
3.6, //Resolution of the accumulator used to detect centers of the circles
cannyEdges.Height / 2, //min distance
2, //min radius
0 //max radius
)[0]; //Get the circles from the first channel
var img = myImage.Clone();
var img2 = myImage.Clone();
foreach (CircleF circle in circles)
img.Draw(circle, new Bgr(Color.Brown), 10);
pictureBox3.SizeMode = PictureBoxSizeMode.StretchImage;
pictureBox3.Image = img.ToBitmap();

I solve it with my own code. This code return value from input image to sheet model 116 x 360 pixel.
// Fungsi untuk merubah bentuk donnut menjadi lembaran
public Image<Gray, Byte> dougman(Image<Gray,Byte> cit, Double radiris)
{
double xP, yP, r, theta;
Image<Gray, Byte> grayT = new Image<Gray, Byte>(360, 116);
for (int i = 0; i < 116; i++)
{
for (int j = 0; j < 360; j++)
{
r = i;
theta = 2.0 * Math.PI * j / 360;
xP = r * Math.Cos(theta);
yP = r * Math.Sin(theta);
xP = xP + radiris + 10; //sekitar 115
yP = yP + radiris + 10;
grayT[116 - 1- i, j] = cit[(int)xP, (int)yP];
}
}
return grayT;
}

Related

Why does my procedural grid mesh not triangulate properly for grids bigger than 256x256?

I've been making procedural terrain height maps with the diamond square algorithm and the mesh with the triangulation method below:
public Map GenerateMap()
{
Mesh mapMesh = new();
vertices = new Vector3[(Resolution + 1) * (Resolution + 1)];
Vector2[] uv1 = new Vector2[vertices.Length];
Vector2[] uv2 = new Vector2[vertices.Length];
Vector2[] uv3 = new Vector2[vertices.Length];
DiamondSquare diamondSquare = new(Resolution, Roughness, Seed, HeightLevels);
float[,] heightFloatMap = diamondSquare.DoDiamondSquare();
tex = new Texture2D(Resolution, Resolution);
for (int y = 0, i = 0; y <= Resolution; y++)
{
for (int x = 0; x <= Resolution; x++, i++)
{
//float height = heightMap.GetPixel(x,y).r;
float height = heightFloatMap[x, y];
vertices[i] = new Vector3(x * CellSize.x, height * CellSize.y, y * CellSize.z);
tex.SetPixel(x, y, new Color(height, height, height, 1));
if (height == 0)
uv1[i] = new Vector2(vertices[i].x, vertices[i].z);
else if (height < 0.4)
uv2[i] = new Vector2(vertices[i].x, vertices[i].z);
else if (height < 0.4)
uv3[i] = new Vector2(vertices[i].x, vertices[i].z);
}
}
mapMesh.vertices = vertices;
mapMesh.uv = uv1;
mapMesh.uv2 = uv2;
int[] triangles = new int[Resolution * Resolution * 6];
Cell[,] cellMap = new Cell[Resolution / 4, Resolution / 4];
for (int ti = 0, vi = 0, y = 0; y < Resolution; y++, vi++)
{
for (int x = 0; x < Resolution; x++, ti += 6, vi++)
{
triangles[ti] = vi;
triangles[ti + 3] = triangles[ti + 2] = vi + 1;
triangles[ti + 4] = triangles[ti + 1] = vi + Resolution + 1;
triangles[ti + 5] = vi + Resolution + 2;
Vector3[] cellVerts = new Vector3[]
{
vertices[vi], vertices[vi + 1], vertices[vi + Resolution + 1], vertices[vi + Resolution + 2]
};
Cell cell = new(new Vector2Int(x, y), cellVerts, CalculateCellGeometry(cellVerts));
cellMap[x / 4, y / 4] = cell;
}
}
mapMesh.triangles = triangles;
mapMesh.RecalculateNormals();
mapMesh.RecalculateTangents();
mapMesh.RecalculateBounds();
Map map = new(mapMesh, cellMap, heightFloatMap, vertices);
return map;
}
}
This works fine with grid sizes 16x16, 32x32... 256x256 but breaks when I try it on 512x512 or above
256x256
Mesh is perfect
512x512
It successfully triangulates up until the rows starting y=128
On the underside of the terrain there are these bars
I've mapped out the vertices generated from 512x512 and above resolutions and they are all good so I'm 99% sure its down to the triangulation.
I'm new to procedural meshes and am stumped by this issue, any help would be greatly appreciated.
Turns out it wasn't triangulation, the vertex limit was being reached as my mesh was set to use a 16-bit index buffer.
I added this line
mapMesh.indexFormat = UnityEngine.Rendering.IndexFormat.UInt32;
and the issue is fixed. An annoying oversight on my part but that's part of the learning process!

How to implement unsharp masking on emgucv c#

I am trying to implement the unsharp masking method on emgucv using c#.
The python code I have now is (ref):
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
"""Return a sharpened version of the image, using an unsharp mask."""
# For details on unsharp masking, see:
# https://en.wikipedia.org/wiki/Unsharp_masking
# https://homepages.inf.ed.ac.uk/rbf/HIPR2/unsharp.htm
blurred = cv.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
The c# code I have now cannot do the work as the above code does. Does anyone know how to implement it emgu cv using c#?
public static void GetMat(Image<Gray, byte> srcimg, Image<Gray, byte> imgBlurred, ref Mat dst, int nAmount = 200)
{
float amount = nAmount / 100f;
using (Image<Gray, byte> dst_temp = new Image<Gray, byte>(srcimg.Width, srcimg.Height))
{
for (int v = 0; v < srcimg.Height; v++)
{
for (int u = 0; u < srcimg.Width; u++)
{
byte a = srcimg.Data[v, u, 0]; //Get Pixel Color | fast way
byte b = imgBlurred.Data[v, u, 0];
int c = (int)(a * (1 + amount) - (amount * b));
if (c < 0) c = 0;
if (c > 255) c = 255;
dst_temp.Data[v, u, 0] = (byte)c;
}
}
dst = dst_temp.Mat.Clone();
}
}
public static void getSharpenImage(Mat src, ref Mat dst, int nAmount = 200, double sigma = 3, int threshold = 0)
{
float amount = nAmount / 100.0F;
using (Mat imgBlurred = new Mat())
{
CvInvoke.GaussianBlur(src, imgBlurred, new System.Drawing.Size(0, 0), sigma, sigma);
using (Mat mask_temp = new Mat())
{
CvInvoke.AbsDiff(src, imgBlurred, mask_temp);
using (Mat lowcontrastmask = new Mat())
{
CvInvoke.Threshold(mask_temp, lowcontrastmask, threshold, 255, ThresholdType.BinaryInv);
GetMat(src.ToImage<Gray, byte>(), imgBlurred.ToImage<Gray, byte>(), ref dst);
src.CopyTo(dst, lowcontrastmask);
}
}
}
}
https://www.idtools.com.au/unsharp-masking-python-opencv/ has a python solution.
the following works in C#:
Mat blurredImage = new Mat();
Mat lapImage = new Mat();
CvInvoke.MedianBlur(grayImage, blurredImage, 1);
CvInvoke.Laplacian(blurredImage, lapImage, blurredImage.Depth);
blurredImage -= (0.9*lapImage);

Detect passing of rectangle over yellow pixel

I have a query regarding the best approach to detect when a moving and potentially rotated rectangle passes over a yellow pixel of a Panel's background image.
I have a method which accepts an Image and a Point, and returns true if that point is that of a yellow pixel. I require this colour detection for the function of my game, which resets the car (player) if it drives over the yellow borders of the track. This method is shown below:
private Boolean isYellow(Image image, Point point)
{
Bitmap bitmap = new Bitmap(image);
Color color = bitmap.GetPixel(point.X, point.Y);
return (color.R > 220 && color.G > 220 && color.B < 200);
}
Previously, to detect if the player rectangle passes over yellow, I checked against the location of the rectangle, as provided by the X and Y values of the object. The issue with this is that the location is the top left corner of a horizontal rectangle, meaning the car can drive almost entirely off the track without detection occurring.
I'd like to fix this by checking all points covered by the rectangle. This is not as simple as it may seem as the rectangle is likely to be rotated. My drawing and movement logic is shown below:
public void draw(Graphics g)
{
int dx = rectangle.X + (rectangle.Height / 2);
int dy = rectangle.Y + (rectangle.Width / 2);
g.ScaleTransform(xScale, yScale);
g.TranslateTransform(dx, dy);
g.RotateTransform((float) ((180 * angle) / Math.PI));
g.TranslateTransform(-dx, -dy);
g.DrawImage(image, rectangle.X, rectangle.Y);
g.ResetTransform();
}
public void move(uRaceGame game, Panel panel)
{
double cos = Math.Cos(angle), sin = Math.Sin(angle);
int xLocation = 200;
int yLocation = 200;
xLocation = (int) Math.Floor(rectangle.X + (cos * game.moveDir * 60));
yLocation = (int) Math.Floor(rectangle.Y + (sin * game.moveDir * 60));
angle = (angle + (game.rotateDir * (Math.PI / 128))) % (Math.PI * 2);
if (xLocation * xScale > panel.Width - (rectangle.Width * cos) || yLocation * yScale > panel.Height - (rectangle.Width * sin) - 5 || xLocation * xScale < 0 || yLocation * yScale < 5) return;
rectangle.Location = new Point(xLocation, yLocation);
}
I tried but failed to create a method which translates the coords of the corner and figures out the middle of the rectangle, but this does not work, and the yellow detection fires in very obscure places:
public Point getCentre()
{
int cX = (int) (rectangle.X + ((rectangle.Width / 2) / xScale)), cY = (int) (rectangle.Y + ((rectangle.Height / 2) / yScale));
float tempX = (rectangle.X - cX), tempY = (rectangle.Y - cY);
double rX = (tempX * Math.Cos(angle)) - (tempY * Math.Sin(angle));
double rY = (tempX * Math.Sin(angle)) - (tempY * Math.Cos(angle));
return new Point((int) ((rX + cX) * xScale), (int) ((rY + cY) * yScale));
}
I'd really appreciate any suggestions on how to tackle this. I included the translation and yellow detection code in case I'm miles off in my attempt and someone else has a better idea.
Thank you very much.
There are two approaches that come to my mind:
You can create loops that go along the tilted sides of the car rectangle
Or you can copy the car to an untilted bitmap and loop over it normally.
Here is an example of the second approach.
It uses a LockBits method that detects Yellow with your code in a Bitmap.
And it prepares that bitmap by copying it from the original BackgroundImage un-rotated.
Here is the result, including a control Panel that shows the untilted Rectangle:
Here is the yellow finder function. It uses Lockbits for speed:
using System.Runtime.InteropServices;
using System.Drawing.Imaging;
public bool testForYellowBitmap(Bitmap bmp)
{
Size s1 = bmp.Size;
PixelFormat fmt = new PixelFormat();
fmt = bmp.PixelFormat;
Rectangle rect = new Rectangle(0, 0, s1.Width, s1.Height);
BitmapData bmp1Data = bmp.LockBits(rect, ImageLockMode.ReadOnly, fmt);
byte bpp1 = 4;
if (fmt == PixelFormat.Format24bppRgb) bpp1 = 3;
else if (fmt == PixelFormat.Format32bppArgb) bpp1 = 4; else return false; // throw!!
int size1 = bmp1Data.Stride * bmp1Data.Height;
byte[] data1 = new byte[size1];
System.Runtime.InteropServices.Marshal.Copy(bmp1Data.Scan0, data1, 0, size1);
for (int y = 0; y < s1.Height; y++)
{
for (int x = 0; x < s1.Width; x++)
{
Color c1;
int index1 = y * bmp1Data.Stride + x * bpp1;
if (bpp1 == 4)
c1 = Color.FromArgb(data1[index1 + 3], data1[index1 + 2],
data1[index1 + 1], data1[index1 + 0]);
else c1 = Color.FromArgb(255, data1[index1 + 2],
data1[index1 + 1], data1[index1 + 0]);
if (c1.R > 220 && c1.G > 220 && c1.B < 200)
{ bmp.UnlockBits(bmp1Data); return true; }
}
}
bmp.UnlockBits(bmp1Data);
return false;
}
I prepare the Bitmap to compare in the MouseMove. The variables w, h, w2, h2 hold the width, height and halves of that of the car's size. The source bitmap is in drawPanel1.BackgroundImage. The current angle is in a TrackBar tr_a.Value. For further control I also display the rotated car rectangle in White.
private void drawPanel1_MouseMove(object sender, MouseEventArgs e)
{
if (e.Button.HasFlag(MouseButtons.Left))
{
Size sz = drawPanel1.BackgroundImage.Size;
Rectangle rectSrc = new Rectangle(e.X - w2, e.Y - h2, w, h);
Rectangle rectTgt = new Rectangle(e.X - w, e.Y - h, 2 * w, 2 * h);
using (Graphics g = drawPanel1.CreateGraphics()) // start optional
{
g.TranslateTransform(e.X, e.Y);
g.RotateTransform(trb_a.Value);
g.TranslateTransform(-e.X, -e.Y);
drawPanel1.Refresh();
g.DrawRectangle(Pens.White, rectSrc);
}
using (Graphics g = drawPanel2.CreateGraphics())
{ // end optional
using (Bitmap bmp = new Bitmap(sz.Width, sz.Height))
using (Graphics g2 = Graphics.FromImage(bmp))
{
g2.TranslateTransform(e.X, e.Y);
g2.RotateTransform(-trb_a.Value);
g2.TranslateTransform(-e.X, -e.Y);
g2.DrawImage(drawPanel1.BackgroundImage, rectTgt, rectTgt,
GraphicsUnit.Pixel);
drawPanel2.Refresh();
g.DrawImage(bmp, rectSrc, rectSrc, GraphicsUnit.Pixel);
Text = testForYellowBitmap(bmp) ? "!!YELLOW!!" : "";
}
}
}
The first approach would use a similar LockBits method, but with loops inside that go along the rotated sides of the car rectangle, using floats wth the loop variables to calculate the x-coordinates. Those data should be prepared on each change of car size or angle. The code is a little longer but should be a bit faster, too.
The advantage if the second approach is that by using a ClippingRegion on the Graphics object one could check an arbitrary shape while the first method can be easily modified for concave polygons but not for curved shapes.
Here is the adapted version of the checking code for the first version:
public bool testForYellowBitmapTilt(Bitmap bmp, List<int> leftPts,
List<int> rightPts, Point topLeft)
{
Size s1 = bmp.Size;
PixelFormat fmt = new PixelFormat();
fmt = bmp.PixelFormat;
Rectangle rect = new Rectangle(0, 0, s1.Width, s1.Height);
BitmapData bmp1Data = bmp.LockBits(rect, ImageLockMode.ReadOnly, fmt);
byte bpp1 = 4;
if (fmt == PixelFormat.Format24bppRgb) bpp1 = 3;
else if (fmt == PixelFormat.Format32bppArgb) bpp1 = 4;
else return false; // or throw!!
if (leftPts.Count != rightPts.Count) return false; // or throw!!
int size1 = bmp1Data.Stride * bmp1Data.Height;
byte[] data1 = new byte[size1];
System.Runtime.InteropServices.Marshal.Copy(bmp1Data.Scan0, data1, 0, size1);
for (int y = 0; y < (leftPts.Count); y++)
{
for (int x = leftPts[y] + topLeft.X; x < rightPts[y] + topLeft.X; x++)
{
Color c1;
int index1 = (y + topLeft.Y) * bmp1Data.Stride + x * bpp1;
if (index1 > 0)
{
if (bpp1 == 4)
c1 = Color.FromArgb(data1[index1 + 3], data1[index1 + 2],
data1[index1 + 1], data1[index1 + 0]);
else c1 = Color.FromArgb(255, data1[index1 + 2],
data1[index1 + 1], data1[index1 + 0]);
if (c1.R > 220 && c1.G > 220 && c1.B < 200)
{ bmp.UnlockBits(bmp1Data); return true; }
}
}
}
bmp.UnlockBits(bmp1Data);
return false;
}
The left- and rightside coordinates are stored here:
List<int> leftPts = new List<int>();
List<int> rightPts = new List<int>();
Point top = Point.Empty;
void getOuterPoints(List<PointF> corners, out List<int> leftPts,
out List<int> rightPts, out Point top)
{
leftPts = new List<int>();
rightPts = new List<int>();
PointF left = corners.Select(x => x).OrderBy(x => x.X).First();
PointF right = corners.Select(x => x).OrderByDescending(x => x.X).First();
top = Point.Round(corners.Select(x => x).OrderBy(x => x.Y).First());
PointF bottom = corners.Select(x => x).OrderByDescending(x => x.Y).First();
int w1 = -(int)(top.X - left.X);
int w2 = -(int)(left.X - bottom.X );
int h1 = (int)(left.Y - top.Y);
int h2 = (int)(bottom.Y - left.Y);
float d1 = 1f * w1 / h1;
float d2 = 1f * w2 / h2;
for (int y = 0; y < h1; y++) leftPts.Add( (int)(y * d1) );
for (int y = 0; y < h2; y++) leftPts.Add( (int)(y * d2 + w1));
for (int y = 0; y < h2; y++) rightPts.Add( (int)(y * d2));
for (int y = 0; y < h1; y++) rightPts.Add( (int)(y * d1 + w2));
}
You need to feed in the four corners as a List<PointF> in any order; the top can be anything, it will be set in the method. The coodinates are relative to the car, so they don't change when the car moves..

how to control mouse pointer with head movement using emguCV C#?

I am developing a system just like Camera mouse or other face control mouse, I have implemented all the functionality, the mouse pointer is also moving well, but I want to create the movement smooth just like the mouse control the pointer. the code I am using is:
if (startButton == true)
{
try
{
cap = new Capture();
pictureBox1.Image = cap.QueryFrame().ToImage<Bgr, Byte>().Bitmap;
}
catch (Exception exp)
{
MessageBox.Show("Error:" + exp);
}
_cascadeClassifier = new CascadeClassifier(Application.StartupPath + "/haarcascade_frontalface_default.xml");
eye_cascadeClassifier = new CascadeClassifier(Application.StartupPath + "/haarcascade_eye.xml");
timer1.Start();
}
private void timer1_Tick(object sender, EventArgs e)
{
using (var imageFrame = cap.QueryFrame().ToImage<Bgr, Byte>().Flip(FlipType.Horizontal))
{
if (imageFrame != null)
{
var grayframe = imageFrame.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.1, 10, Size.Empty); //the actual face detection happens here
foreach (var face in faces)
{
if(Configure.FaceBoxCheck==true)
imageFrame.Draw(face, new Bgr(Color.LightGreen), 2); //the detected face(s) is highlighted here using a box that is drawn around it/them
Int32 yCoordStartSearchEyes = face.Top + (face.Height * 3 / 11);
Point startingPointSearchEyes = new Point(face.X, yCoordStartSearchEyes);
Size searchEyesAreaSize = new Size(face.Width, (face.Height * 3 / 11));
Rectangle possibleROI_eyes = new Rectangle(startingPointSearchEyes, searchEyesAreaSize);
int widthNav = (imageFrame.Width / 11 * 3);
int heightNav = (imageFrame.Height / 11 * 3);
Rectangle nav = new Rectangle(new Point(imageFrame.Width / 2 - widthNav / 2, imageFrame.Height / 2 - heightNav / 2), new Size(widthNav, heightNav));
imageFrame.Draw(nav, new Bgr(Color.Lavender), 3);
Point cursor = new Point(face.X + searchEyesAreaSize.Width / 2, yCoordStartSearchEyes + searchEyesAreaSize.Height / 2);
grayframe.ROI = possibleROI_eyes;
var eyes = eye_cascadeClassifier.DetectMultiScale(grayframe, 2.15, 3, Size.Empty);
foreach (var eye in eyes)
{
//imageFrame.Draw(eye, new Bgr(Color.Red), 2);
if(Configure.EyeBoxCheck==true)
imageFrame.Draw(possibleROI_eyes, new Bgr(Color.DarkGreen), 2);
if (nav.Left < cursor.X && cursor.X < (nav.Left + nav.Width) && nav.Top < cursor.Y && cursor.Y < nav.Top + nav.Height)
{
LineSegment2D CursorDraw = new LineSegment2D(cursor, new Point(cursor.X, cursor.Y + 1));
imageFrame.Draw(CursorDraw, new Bgr(Color.White), 3);
//we compute new cursor coordinate using a simple scale based on frame width and height
int xCoord = (imageFrame.Width * (cursor.X - nav.Left)) / nav.Width;
int yCoord = (imageFrame.Height * (cursor.Y - nav.Top)) / nav.Height;
//We set our new cursor position
Cursor.Position = new Point(xCoord * 2, yCoord *2);
}
}
}
Ok, I'm sure there are a lot of other better ways, but this is a quick&dirty way of moving cursor position in a "Smooth" way from point a to point b. Of course this implementation can and should be optimized using a different thread instead of using Application.DoEvents() to avoid blocking the UI thread, but i hope this gets you on the track. First, how you should use it. Instead of:
Cursor.Position = new Point(xCoord * 2, yCoord *2);
You should do this:
MoveCursorSmooth(Cursor.Position, new Point(xCoord * 2, yCoord *2));
Now, the implementation of MoveCursorSmooth:
private void MoveCursorSmooth(Point a, Point b)
{
var step = 5;
var left = Math.Min(a.X, b.X);
var right = Math.Max(a.X, b.X);
int width = right - left;
var top = a.Y;
var bottom = b.Y;
int height = bottom - top;
if (width > height)
{
double slope = (double)height / (double)width;
if (a.X <= b.X)
for (int x = 1; x < width; ++x)
{
Cursor.Position = new Point((left + x), (a.Y + ((int)(slope * x + 0.5))));
System.Threading.Thread.Sleep(step);
Application.DoEvents();
}
else
for (int x = 1; x < width; ++x) // xOffset
{
Cursor.Position = new Point((right - x), (a.Y + ((int)(slope * x + 0.5))));
System.Threading.Thread.Sleep(step);
Application.DoEvents();
}
}
else
{
double slope = (double)width / (double)height;
if (a.X <= b.X)
{
for (int y = 1; y < height; ++y)
{
Cursor.Position = new Point((a.X + ((int)(slope * y + 0.5))), (top + y));
System.Threading.Thread.Sleep(step);
Application.DoEvents();
}
}
else
{
for (int y = 1; y < height; ++y)
{
Cursor.Position = new Point((b.X + ((int)(slope * y + 0.5))), (bottom - y));
System.Threading.Thread.Sleep(step);
Application.DoEvents();
}
}
}
}
This method is based on this answer

Circular Fisheye Image dewarp to flat image

UPDATE as on 12 Nov 2015
I used PanoTools plugin with Photoshop and Hugin and played with all those parameters. End up i found the parameters for projection, HFOV and image output size that fulfill my lowest requirement.
Parameteres:
Processed Output:
My question is then how can i convert all these parameters and values into C# algorithm coding so that when I provide the original image, i will get the corrected output image?
Thanks a lot.
I have a square image captured from a circular fisheye camera. The size is 2650 * 2650 pixels.
Now, i will need to programmatically dewarp the image to a flat panorama image using C# language.
I had look around from internet with different algorithm example from Link for code below , Link1 and Link2 but just can't make it success. My maths sincerely sucks and can't help me with that. Hopefully someone able to guide me through this.
Thanks a lot.
Example of image output from the camera:
--Image grabbed from Wikipedia Fisheye Lens & size modified to fit my sample pixel.
The code i tried to dewarp it but no luck:
Bitmap sourceImage = (Bitmap)Bitmap.FromFile("circularfisheye.jpg");
double factor = 0.5;
Boolean autoCrop = false;
Color backgroundColor = Color.White;
Bitmap StartImage = null;
BitmapData srcBitmapData = null;
Byte[] srcPixels = null;
Byte[] dstPixels = null;
Bitmap NewImage = null;
BitmapData dstBitmapData = null;
try
{
// Checks whether bpp ​​( Bits Per Pixel ) is 8 , 24, or 32
int Depth = System.Drawing.Bitmap.GetPixelFormatSize(sourceImage.PixelFormat);
if (Depth != 8 && Depth != 24 && Depth != 32)
{
throw new ArgumentException("Only 8, 24 and 32 bpp images are supported.");
}
// Retrieves the count of the color components
int cCount = Depth / 8;
Size baseSize = new Size(sourceImage.Width, sourceImage.Height);
// check if a low image resize and need to improve the quality
// and not generate image aliasing
Int32 maxSize = Math.Max(sourceImage.Width, sourceImage.Height);
if (maxSize < 3000)
{
float percent = 3000F / (float)maxSize;
baseSize = new Size((Int32)((float)sourceImage.Width * percent), (Int32)((float)sourceImage.Height * percent));
}
StartImage = new Bitmap(baseSize.Width, baseSize.Height, sourceImage.PixelFormat);
StartImage.SetResolution(sourceImage.HorizontalResolution, sourceImage.VerticalResolution);
// Create the drawing object and white background
Graphics g = Graphics.FromImage(StartImage);
g.SmoothingMode = SmoothingMode.AntiAlias;
g.InterpolationMode = InterpolationMode.HighQualityBicubic;
g.PixelOffsetMode = PixelOffsetMode.HighQuality;
g.DrawImage(sourceImage, new Rectangle(-1, -1, baseSize.Width + 1, baseSize.Height + 1), 0, 0, sourceImage.Width, sourceImage.Height, GraphicsUnit.Pixel);
g.Dispose();
// Locks the source image and copies it to the byte array and releases the source image
srcBitmapData = StartImage.LockBits(new Rectangle(0, 0, StartImage.Width, StartImage.Height), ImageLockMode.ReadOnly, StartImage.PixelFormat);
srcPixels = new byte[StartImage.Width * StartImage.Height * (Depth / 8)];
Marshal.Copy(srcBitmapData.Scan0, srcPixels, 0, srcPixels.Length);
StartImage.UnlockBits(srcBitmapData);
srcBitmapData = null;
// Create the target image byte array
dstPixels = new Byte[srcPixels.Length];
// Fill the entire frame with the selected background color
Int32 index = ((1 * StartImage.Width) + 1) * cCount; //index = ((Y * Width) + X) * cCount
do
{
if (Depth == 32) //For 32 bpp defines Red , Green, Blue and Alpha
{
dstPixels[index++] = backgroundColor.B;
dstPixels[index++] = backgroundColor.G;
dstPixels[index++] = backgroundColor.R;
dstPixels[index++] = backgroundColor.A; // a
}
if (Depth == 24) //For 24 bpp defines Red , Green and Blue
{
dstPixels[index++] = backgroundColor.B;
dstPixels[index++] = backgroundColor.G;
dstPixels[index++] = backgroundColor.R;
}
if (Depth == 8)
// For 8 bpp defines the value of color ( Red , Green and Blue to be the same thing)
{
dstPixels[index++] = backgroundColor.B;
}
} while (index < srcPixels.Length);
// Calculate the maximum possible extent for the image and multiply by the desired factor
double amp = 0;
double ang = Math.PI * 0.5;
for (Int32 a = 0; a < StartImage.Height; a++)
{
int y = (int)((StartImage.Height / 2) - amp * Math.Sin(ang));
if ((y < 0) || (y > StartImage.Height))
break;
amp = a;
}
amp = (amp - 2) * (factor < -1 ? -1 : (factor > 1 ? 1 : factor));
// Define variables that calculates the cutoff points (if any)
Int32 x1, y1, x2, y2;
x1 = StartImage.Width;
y1 = StartImage.Height;
x2 = 0;
y2 = 0;
// Copy pixel by pixel for the new positions
index = ((1 * StartImage.Width) + 1) * cCount;
do
{
Int32 y = (Int32)((index / cCount) / StartImage.Width);
Int32 x = (index / cCount) - (y * StartImage.Width);
Point pt = NewPoint(new Point(x, y), StartImage.Width, StartImage.Height, amp, factor < 0);
//Values ​​for crop
if (factor >= 0)
{
if (x == StartImage.Width / 2)
{
if (pt.Y < y1)
y1 = pt.Y;
if (pt.Y > y2)
y2 = pt.Y;
}
if (y == StartImage.Height / 2)
{
if (pt.X < x1)
x1 = pt.X;
if (pt.X > x2)
x2 = pt.X;
}
}
else
{
if ((x == 1) && (y == 1))
{
y1 = pt.Y;
x1 = pt.X;
}
if ((x == StartImage.Width - 1) && (y == StartImage.Height - 1))
{
y2 = pt.Y;
x2 = pt.X;
}
}
//Bytes Index which will apply the pixel
Int32 dstIndex = ((pt.Y * StartImage.Width) + pt.X) * cCount;
if (Depth == 32)
{
dstPixels[dstIndex] = srcPixels[index++];
dstPixels[dstIndex + 1] = srcPixels[index++];
dstPixels[dstIndex + 2] = srcPixels[index++];
dstPixels[dstIndex + 3] = srcPixels[index++]; // a
}
if (Depth == 24)
{
dstPixels[dstIndex] = srcPixels[index++];
dstPixels[dstIndex + 1] = srcPixels[index++];
dstPixels[dstIndex + 2] = srcPixels[index++];
}
if (Depth == 8)
{
dstPixels[dstIndex] = srcPixels[index++];
}
} while (index < srcPixels.Length);
//Creates a new image based on the byte array previously created
NewImage = new Bitmap(StartImage.Width, StartImage.Height, StartImage.PixelFormat);
NewImage.SetResolution(StartImage.HorizontalResolution, StartImage.VerticalResolution);
dstBitmapData = NewImage.LockBits(new Rectangle(0, 0, StartImage.Width, StartImage.Height), ImageLockMode.WriteOnly, StartImage.PixelFormat);
Marshal.Copy(dstPixels, 0, dstBitmapData.Scan0, dstPixels.Length);
NewImage.UnlockBits(dstBitmapData);
//Generates the final image to crop or resize the real coo
Bitmap FinalImage = new Bitmap(sourceImage.Width + 1, sourceImage.Height, StartImage.PixelFormat);
NewImage.SetResolution(StartImage.HorizontalResolution, StartImage.VerticalResolution);
Graphics g1 = Graphics.FromImage(FinalImage);
g1.SmoothingMode = SmoothingMode.AntiAlias;
g1.InterpolationMode = InterpolationMode.HighQualityBicubic;
g1.PixelOffsetMode = PixelOffsetMode.HighQuality;
//Performs the cut if enabled automatic cutting and there is need to cut
if ((autoCrop) && ((x1 > 0) || (y1 > 0) || (x2 < NewImage.Height) || (y2 < NewImage.Height)))
{
Rectangle cropRect = new Rectangle(x1, y1, x2 - x1, y2 - y1);
g1.DrawImage(NewImage, new Rectangle(-1, -1, FinalImage.Width + 1, FinalImage.Height + 1), cropRect.X, cropRect.Y, cropRect.Width, cropRect.Height, GraphicsUnit.Pixel);
}
else
{
g1.DrawImage(NewImage, new Rectangle(-1, -1, FinalImage.Width + 1, FinalImage.Height + 1), 0, 0, NewImage.Width, NewImage.Height, GraphicsUnit.Pixel);
}
g1.Dispose();
g1 = null;
NewImage = null;
FinalImage.Save("output.jpg");
FinalImage.Dispose();
}
finally
{
srcBitmapData = null;
srcPixels = null;
dstPixels = null;
dstBitmapData = null;
}
Such a distortion as a symmetry of revolution.
In polar coordinates, with the pole at the center of the image, it is expressed as
r' = f(r)
Θ' = Θ
where the quote indicates the distorted coordinates. The function f is unknown and should be measured empirically, by calibration (looking at a regular target).
To correct the image, you need to invert the function f and apply the reverse transform to the image. In fact, it is easier to measure g directly by calibration. As a starting approximation, a simple model like
r = r' + a.r'³
can do.
Most probably you don't have a picture of a grid taken with the same lens. Your last resort is to implement the undistortion function with adjustable parameters, and optimize these by trial and error.
It should also be possible to derive the calibration curve by looking at the deformation of straight lines, but this is more "technical".
In Cartesian coordinates, you can express the correction transform as
x = g(r').x'/r'
y = g(r').y'/r'
where r' = √x'²+y'².
Use the algorithm from here:
http://www.helviojunior.com.br/fotografia/barrel-and-pincushion-distortion/
It worked for me
I've made some revamp to the HelvioJunior's library (that was linked by #Tarek.Mh), I think this may suit your need:
Below, the code:
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using static System.Math;
namespace HelvioJunior
{
//https://www.helviojunior.com.br/fotografia/barrel-and-pincushion-distortion/
public class Program
{
private static void Main(string[] args)
{
Bitmap source = (Bitmap)Image.FromFile(#"JpwX0.png");
Bitmap bmp = BarrelDistortion(source, 4/10f, true);
bmp.Save(#"test.png");
bmp.Dispose();
source.Dispose();
}
static public Bitmap BarrelDistortion(Bitmap sourceImage, double factor = 0, bool autoCrop = true, uint previewRectangleWidth = 0, Color? fillerColor = null)
{
int sourceRight = sourceImage.Width - 1, sourceBottom = sourceImage.Height - 1;
// Vertical amplitude is half the height times factor
// Horizontal amplitude is missing ; vertical amplitude's applied to both directions
double amp = sourceBottom / 2f * factor;
// Inner shrinking area points
RePoint[] lPts;
bool inverse = factor < 0;
// Shrinking area coordinates (center point is considered always available)
double x1 = sourceRight / 2f,
y1 = sourceBottom / 2f,
x2 = sourceRight / 2f,
y2 = sourceBottom / 2f;
if (inverse)
{
lPts = new RePoint[]
{
new RePoint(0, 0),
new RePoint(0, sourceBottom),
new RePoint(sourceRight, sourceBottom),
new RePoint(sourceRight, 0)
};
}
else
{
lPts = new RePoint[]
{
new RePoint(sourceRight * 1 / 2f, 0),
new RePoint(0, sourceBottom * 1 / 2f),
new RePoint(sourceRight, sourceBottom * 1 / 2f),
new RePoint(sourceRight * 1 / 2f, sourceBottom)
};
}
foreach (var pN in lPts.Select(pt => NewPoint(pt, sourceImage.Width, sourceImage.Height, amp, inverse)))
{
if (pN.Y < y1) y1 = pN.Y;
if (pN.Y > y2) y2 = pN.Y;
if (pN.X < x1) x1 = pN.X;
if (pN.X > x2) x2 = pN.X;
}
// Bytes per color from bit per pixel (bpp) format
int bpcCount = Image.GetPixelFormatSize(sourceImage.PixelFormat) / 8;
Rectangle sourceRectangle = new Rectangle(0, 0, sourceImage.Width, sourceImage.Height);
int srcLength = sourceImage.Width * sourceImage.Height * bpcCount;
// Gets sourceImage byte array as srcpixels
BitmapData srcBitmapData = sourceImage.LockBits(sourceRectangle, ImageLockMode.ReadOnly, sourceImage.PixelFormat);
byte[] srcPixels = new byte[srcLength];
Marshal.Copy(srcBitmapData.Scan0, srcPixels, 0, srcLength);
sourceImage.UnlockBits(srcBitmapData);
srcBitmapData = null;
// Destination byte array preparation as dstPixels
byte[] dstPixels = new byte[srcLength];
int dstIndex = 0;
// Filler color preparation
Color fillColor = fillerColor ?? Color.Transparent;
if (!autoCrop)
{
if (bpcCount <= 4) // Depth > 32bpp may not work as expected, filler color's not applied for bit safety reason
do
{
dstPixels[dstIndex++] = fillColor.B;
if (bpcCount > 1)
{
dstPixels[dstIndex++] = fillColor.G;
dstPixels[dstIndex++] = fillColor.R;
if (bpcCount > 3)
dstPixels[dstIndex++] = fillColor.A; // a
}
} while (dstIndex < srcLength);
}
// Byte-to-byte copy (incl. Point transformation)
int index = 0, srcBpcLength = srcLength - bpcCount;
do
{
int comp = index / bpcCount; // comp yields the current "pixel" position
int y = comp / sourceImage.Width; // Each line is sourceImage.Width bytes wide
int x = comp - (y * sourceImage.Width); // Remaining (comp - lines) bytes is target column (ranges from 0 to width - 1)
// Destination "pixel"
RePoint pt = NewPoint(new RePoint(x, y), sourceImage.Width, sourceImage.Height, amp, inverse);
dstIndex = (((int)pt.Y * sourceImage.Width) + (int)pt.X) * bpcCount; // dstIndex++ overflows when |amp| >= 2
if (dstIndex >= 0 && dstIndex <= srcBpcLength)
for (int i = 0; i++ < bpcCount;)
dstPixels[dstIndex++] = srcPixels[index++];
else
index += bpcCount;
} while (index < srcLength);
srcPixels = null;
// Destination bytes application
BitmapData dstBitmapData = sourceImage.LockBits(sourceRectangle, ImageLockMode.WriteOnly, sourceImage.PixelFormat);
Marshal.Copy(dstPixels, 0, dstBitmapData.Scan0, srcLength);
sourceImage.UnlockBits(dstBitmapData);
dstBitmapData = null;
dstPixels = null;
// Final Image area
Rectangle cropRect = new Rectangle((int)Ceiling(x1), (int)Ceiling(y1), (int)Ceiling(x2 - x1), (int)Ceiling(y2 - y1));
Rectangle destRectangle = autoCrop ? cropRect : sourceRectangle;
// Final image preparation
Bitmap FinalImage = new Bitmap(destRectangle.Width, destRectangle.Height, sourceImage.PixelFormat);
FinalImage.SetResolution(sourceImage.HorizontalResolution, sourceImage.VerticalResolution);
Graphics g1 = Graphics.FromImage(FinalImage);
g1.DrawImage(sourceImage, -destRectangle.X, -destRectangle.Y);
// Previsualization rectangle
if (previewRectangleWidth > 0)
g1.DrawRectangle(new Pen(Color.Red, previewRectangleWidth), cropRect.X - 1, cropRect.Y - 1, cropRect.Width + previewRectangleWidth, cropRect.Height + previewRectangleWidth);
g1.Dispose();
g1 = null;
return FinalImage;
}
private static RePoint NewPoint(RePoint aP, double Width, double Height, double Amplitude, bool inverse)
{
double h = aP.Y / (Height - 1);
double w = aP.X / (Width - 1);
// Works ok for [0/2] to [1/2]
// Floating point error(s) here, in the range of ]1/2] to [2/2] (No workaround found)
double sinX = Round(Sin(PI * w), 15); // Range of [0] to [1] * PI ; result ranges from 0 (far from center) to 1 (at center)
double sinY = Round(Sin(PI * h), 15);
double caX = Amplitude * (1 - 2 * w);
double caY = Amplitude * (1 - 2 * h);
double aY = 0, aX = 0;
if (inverse)
{
aX = -caX;
aY = -caY;
}
double pY = aP.Y + aY + caY * sinX;
double pX = aP.X + aX + caX * sinY;
return new RePoint(pX, pY);
}
private struct RePoint
{
public double X;
public double Y;
public RePoint(double x, double y)
{
X = x;
Y = y;
}
}
}
}

Categories