Issue with emgucv C# and Capture - c#

I am having an issue with a System.StackOverflowException with the Capture function. Here is the code, please ignore the loadScript function but take a look at the constructor and FaceDetect function:
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Data;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.Util;
using System.Runtime.InteropServices;
using PAD_SCRIPT;
using Emgu.CV.GPU;
using Emgu.CV.UI;
namespace PAD_CORE_ENGINE
{
class VisionCore
{
private Capture capture;
private HaarCascade haarCascade;
double[,] faceData = new double[100, 5];
double[,] eyeData = new double[100, 10];
double[,] circleData = new double[100, 5];
int numberOfFaces;
private Image<Bgr, Byte> image;
private System.Windows.Forms.Timer myTimer = new System.Windows.Forms.Timer();
public VisionCore()
{
capture = new Capture(0);
capture.ImageGrabbed += updateFaceDetect;
capture.Start();
//haarCascade = new HaarCascade(#"haarcascade_frontalface_default.xml");
}
public double[,] getDetectFaceInfo()
{
return faceData;
}
public double[,] getEyeInfo()
{
return eyeData;
}
public double[,] getCircleData()
{
return circleData;
}
public double getDetectFaceX(int index)
{
return faceData[index, 0];
}
public double getDetectFaceY(int index)
{
return faceData[index, 1];
}
public double getDetectFaceWidth(int index)
{
return faceData[index, 2];
}
public double getDetectFaceHeight(int index)
{
return faceData[index, 3];
}
public double getEyeX(int index)
{
return eyeData[index, 0];
}
public double getEyeY(int index)
{
return eyeData[index, 1];
}
public double getEyeWidth(int index)
{
return eyeData[index, 2];
}
public double getEyeHeight(int index)
{
return eyeData[index, 3];
}
private void DetectFace(Image<Bgr, Byte> image, String faceFileName, String eyeFileName, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
Stopwatch watch;
if (GpuInvoke.HasCuda)
{
using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName))
{
watch = Stopwatch.StartNew();
using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
{
Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
faces.AddRange(faceRegion);
foreach (Rectangle f in faceRegion)
{
using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
{
//For some reason a clone is required.
//Might be a bug of GpuCascadeClassifier in opencv
using (GpuImage<Gray, Byte> clone = faceImg.Clone(null))
{
Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);
foreach (Rectangle e in eyeRegion)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
}
watch.Stop();
}
}
else
{
//Read the HaarCascade objects
using (CascadeClassifier face = new CascadeClassifier(faceFileName))
using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
{
watch = Stopwatch.StartNew();
using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
{
//normalizes brightness and increases contrast of the image
gray._EqualizeHist();
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(
gray,
1.1,
10,
new Size(20, 20),
Size.Empty);
faces.AddRange(facesDetected);
foreach (Rectangle f in facesDetected)
{
//Set the region of interest on the faces
gray.ROI = f;
Rectangle[] eyesDetected = eye.DetectMultiScale(
gray,
1.1,
10,
new Size(20, 20),
Size.Empty);
gray.ROI = Rectangle.Empty;
foreach (Rectangle e in eyesDetected)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
watch.Stop();
}
}
detectionTime = watch.ElapsedMilliseconds;
capture.QueryFrame();
}
protected void updateFaceDetect(object sender, EventArgs e)
{
//Image<Bgr, Byte> image, String faceFileName, String eyeFileName, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime
Image<Bgr, Byte> image = capture.RetrieveBgrFrame();
string faceFileName = "haarcascade_frontalface_default.xml";
string eyeFileName = "haarcascade_eye.xml";
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
long detectionTime = 0;
DetectFace(image, faceFileName, eyeFileName, faces, eyes, out detectionTime);
DisplayImage(image);
}
public Image<Bgr, Byte> getImage()
{
return image;
}
public void DisplayImage(Image<Bgr, Byte> img)
{
try
{
ImageViewer.Show(image, String.Format(
"Completed face and eye detection using {0}",
GpuInvoke.HasCuda ? "GPU" : "CPU"
));
}
catch (Exception i)
{
Console.WriteLine(i.Message);
}
}
public Image<Bgr, Byte> processFaces(Image<Bgr, Byte> img, List<Rectangle> faces)
{
foreach (Rectangle face in faces)
img.Draw(face, new Bgr(Color.Red), 2);
return img;
}
public Image<Bgr, Byte> processEyes(Image<Bgr, Byte> img, List<Rectangle> eyes)
{
foreach (Rectangle eye in eyes)
img.Draw(eye, new Bgr(Color.Blue), 2);
return img;
}
public void testVision()
{
DisplayImage(capture.RetrieveBgrFrame());
}
public CircleF[] detectCircles(Image<Bgr, Byte> img)
{
Image<Gray, Byte> gray = img.Convert<Gray, Byte>().PyrDown().PyrUp();
Stopwatch watch = Stopwatch.StartNew();
double cannyThreshold = 180.0;
double circleAccumulatorThreshold = 120;
CircleF[] circles = gray.HoughCircles(new Gray(cannyThreshold), new Gray(circleAccumulatorThreshold), 2.0, 20.0, 5, 0)[0];
watch.Stop();
return circles;
}
public Image<Bgr, Byte> ProcessCircles(Image<Bgr, Byte> img, CircleF[] circles)
{
foreach (CircleF circle in circles)
{
img.Draw(circle, new Bgr(Color.Brown), 2);
}
return img;
}
//display the image
public int getNumOfFaces()
{
return numberOfFaces;
}
public PADScript loadScript(PADScript l)
{
l.addLuaCommand("getNumOfFaces", this);
l.addLuaCommand("getDetectFaceInfo", this);
l.addLuaCommand("getImage", this);
l.addLuaCommand("getDetectFaceInfo", this);
l.addLuaCommand("getEyeInfo", this);
l.addLuaCommand("getDetectFaceX", this);
l.addLuaCommand("getDetectFaceY", this);
l.addLuaCommand("getDetectFaceWidth", this);
l.addLuaCommand("getDetectFaceHeight", this);
l.addLuaCommand("getEyeX", this);
l.addLuaCommand("getEyeY", this);
l.addLuaCommand("getEyeWidth", this);
l.addLuaCommand("getEyeHeight", this);
l.addLuaCommand("testVision", this);
return l;
}
}
}
I am thinking that the capture is querying too many captures at once, but I also get the error in using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)). I am not really for sure where this error is coming from. Thank you in advance for your help!
Edit: the exception is: An unhandled exception of type 'System.StackOverflowException' occurred in Emgu.CV.GPU.dll

Related

Use SharpDX to capture screenshot of rotated monitor

The code below successfully takes screenshots of the monitors attached to my Windows 10 laptop computer, so long as the monitors are not "flipped". When the monitors are flipped to any orientation except "landscape" the captured images are all black pixels (r,g,b,a = 0,0,0,255)
How can I modify the code below so that it will also work with flipped monitors?
Target Framework is:
Net 4.8
Referenced packages are:
SharpDX 4.2.0
SharpDX.Direct2D1 4.2.0
SharpDX.Direct3D11 4.2.0
SharpDX.DXGI 4.2.0
SharpDX.DXGI 4.2.0
using System;
using System.IO;
using System.Runtime.ExceptionServices;
using SharpDX;
namespace ScreenCast {
internal static class Program {
[STAThread]
private static void Main() {
var captureCount = 0;
using var factory = new SharpDX.DXGI.Factory4();
foreach (var adapter in factory.Adapters1) {
using var device = new SharpDX.Direct3D11.Device(adapter);
foreach (var output in adapter.Outputs) {
if (output.Description.IsAttachedToDesktop) {
var description = output.Description;
using var output1 = output.QueryInterface<SharpDX.DXGI.Output1>();
Capture($"{captureCount++}.bmp", device, output1);
}
output.Dispose();
}
adapter.Dispose();
}
}
private static void Capture(string outputFileName, SharpDX.Direct3D11.Device device, SharpDX.DXGI.Output1 output1) {
int width = output1.Description.DesktopBounds.Right - output1.Description.DesktopBounds.Left;
int height = output1.Description.DesktopBounds.Bottom - output1.Description.DesktopBounds.Top;
using var stagingScreenTexture = new SharpDX.Direct3D11.Texture2D(device, new SharpDX.Direct3D11.Texture2DDescription {
Width = width,
Height = height,
CpuAccessFlags = SharpDX.Direct3D11.CpuAccessFlags.Read,
BindFlags = SharpDX.Direct3D11.BindFlags.None,
Format = SharpDX.DXGI.Format.B8G8R8A8_UNorm,
OptionFlags = SharpDX.Direct3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = SharpDX.Direct3D11.ResourceUsage.Staging
});
using var duplicatedOutput = output1.DuplicateOutput(device);
SharpDX.DXGI.Resource screenResource = null;
SharpDX.DXGI.OutputDuplicateFrameInformation duplicateFrameInformation;
AcquireFrame(duplicatedOutput, out duplicateFrameInformation, out screenResource);
duplicatedOutput.ReleaseFrame();
AcquireFrame(duplicatedOutput, out duplicateFrameInformation, out screenResource);
// copy resource into memory that can be accessed by the CPU
using var screenTexture = screenResource.QueryInterface<SharpDX.Direct3D11.Texture2D>();
device.ImmediateContext.CopyResource(screenTexture, stagingScreenTexture);
// Get the desktop capture texture
var mapSource = device.ImmediateContext.MapSubresource(stagingScreenTexture, 0, SharpDX.Direct3D11.MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
using var bmp = new System.Drawing.Bitmap(width, height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
var bmpBounds = new System.Drawing.Rectangle(0, 0, width, height);
var bmpData = bmp.LockBits(bmpBounds, System.Drawing.Imaging.ImageLockMode.WriteOnly, bmp.PixelFormat);
var src = mapSource.DataPointer;
var dest = bmpData.Scan0;
for (var y = 0; y < height; y++) {
SharpDX.Utilities.CopyMemory(dest, src, width * 4);
src += mapSource.RowPitch;
dest += bmpData.Stride;
}
bmp.UnlockBits(bmpData);
bmp.Save(outputFileName);
device.ImmediateContext.UnmapSubresource(stagingScreenTexture, 0);
screenResource.Dispose();
duplicatedOutput.ReleaseFrame();
// Display the texture using system associated viewer
System.Diagnostics.Process.Start(Path.GetFullPath(Path.Combine(Environment.CurrentDirectory, outputFileName)));
}
static void AcquireFrame(SharpDX.DXGI.OutputDuplication duplication, out SharpDX.DXGI.OutputDuplicateFrameInformation info, out SharpDX.DXGI.Resource resource) {
while (true) {
try {
duplication.AcquireNextFrame(100, out info, out resource);
return;
} catch (SharpDXException x) {
if (x.ResultCode.Code != SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code)
ExceptionDispatchInfo.Capture(x).Throw();
}
}
}
}
}
First, I only used SharpDX for a few days, so I'm by no means an expert, but I ran into a similar problem when capturing from rotated monitor and from what I've been able to deduce captured frame is not rotated.
e.g. Your monitor is rotated 90 deg to portrait (Width x Height 1080x1920) so you'd expect the captured frame to be portrait as well, right? Nope, you get the 1920 x 1080 landscape bitmap so your screen width= bitmap height and vice versa:
Here is the code I used in my capture class, still work in progress:
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using System.Windows.Forms;
using SharpDX;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Mathematics.Interop;
using Device = SharpDX.Direct3D11.Device;
using MapFlags = SharpDX.Direct3D11.MapFlags;
namespace EXM.ExampleCapture
{
public class DXScreenCaptureUtil {
private static ImageCodecInfo jpegCodec = ImageCodecInfo.GetImageEncoders()
.First(c => c.FormatID == ImageFormat.Jpeg.Guid);
private static EncoderParameters jpegParams = new EncoderParameters() { Param = new[] { new EncoderParameter(Encoder.Quality, 60L) } };
//Cache objects
private static Factory1 factory = new Factory1();
private static Adapter adapter;
private static Device device;
/// <summary>
/// Gets target device (Display) based on the rectangle we want to capture
/// </summary>
/// <param name="sourceRect">Rectangle we want to capture</param>
/// <returns>Screen which contains the area we want to capture or null if no device contains our area of interest</returns>
private static Screen GetTargetScreen(Rectangle sourceRect) {
foreach (var scr in Screen.AllScreens)
{
if (sourceRect.X >= scr.Bounds.X && sourceRect.Y >= scr.Bounds.Y
&& sourceRect.Right <= scr.Bounds.Width + scr.Bounds.X
&& sourceRect.Bottom <= scr.Bounds.Height + scr.Bounds.Y
)
{
return scr;
}
}
return null;
}
public static (byte[], int) Capture(Rectangle sourceRect, int jpegQuality) {
Screen targetScreen = GetTargetScreen(sourceRect);
if (targetScreen == null) {
throw new Exception($#"Could not find target screen for capture rectangle {sourceRect}");
}
//This is to instruct client receiving the image to rotate it, seems like a reasonable thing to offload it to client and save a bit of CPU time on server
int rotation = 0;
byte[] imageBytes = null;
// Width/Height of desktop to capture
int width = targetScreen.Bounds.Width;
int height = targetScreen.Bounds.Height;
Rectangle cropRect = new Rectangle(sourceRect.X - targetScreen.Bounds.X, sourceRect.Y - targetScreen.Bounds.Y, sourceRect.Width, sourceRect.Height);
// Create DXGI Factory1
if (adapter == null) { adapter = factory.Adapters.Where(x => x.Outputs.Any(o => o.Description.DeviceName == targetScreen.DeviceName)).FirstOrDefault(); }
// Create device from Adapter
if (device == null) { device = new Device(adapter); }
//using (var output = adapter.Outputs.Where(o => o.Description.DeviceName == targetScreen.DeviceName).FirstOrDefault()) //This creates a memory leak!
Output output = null;
//I'm open to suggestions here:
for (int i = 0; i < adapter.GetOutputCount(); i++) {
output = adapter.GetOutput(i);
if (output.Description.DeviceName == targetScreen.DeviceName) {
break;
}
else {
output.Dispose();
}
}
using (var output1 = output.QueryInterface<Output1>()) {
if (output1.Description.Rotation == DisplayModeRotation.Rotate90) {
width = targetScreen.Bounds.Height;
height = targetScreen.Bounds.Width;
int offsetX = targetScreen.Bounds.X - sourceRect.X;
cropRect = new Rectangle(
sourceRect.Y - targetScreen.Bounds.Y,
targetScreen.Bounds.Width - (sourceRect.Width + offsetX),
sourceRect.Height, sourceRect.Width);
rotation = 90;
}
else if (output1.Description.Rotation == DisplayModeRotation.Rotate270) {
width = targetScreen.Bounds.Height;
height = targetScreen.Bounds.Width;
int offsetY = targetScreen.Bounds.Y - sourceRect.Y;
cropRect = new Rectangle(
targetScreen.Bounds.Height - (sourceRect.Height + offsetY),
targetScreen.Bounds.X - sourceRect.X,
sourceRect.Height, sourceRect.Width);
rotation = 270;
}
else if (output1.Description.Rotation == DisplayModeRotation.Rotate180) {
rotation = 180;
}
// Create Staging texture CPU-accessible
var textureDesc = new Texture2DDescription {
CpuAccessFlags = CpuAccessFlags.Read,
BindFlags = BindFlags.None,
Format = Format.B8G8R8A8_UNorm,
Width = width,
Height = height,
OptionFlags = ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = ResourceUsage.Staging
};
using (var screenTexture = new Texture2D(device, textureDesc))
//Duplicate the output
using (var duplicatedOutput = output1.DuplicateOutput(device)) {
bool captureDone = false;
SharpDX.DXGI.Resource screenResource = null;
OutputDuplicateFrameInformation duplicateFrameInformation;
for (int i = 0; !captureDone; i++) {
try {
//Try to get duplicated frame within given time
duplicatedOutput.AcquireNextFrame(1000, out duplicateFrameInformation, out screenResource);
//Ignore first call, this always seems to return a black frame
if (i == 0) {
screenResource.Dispose();
continue;
}
//copy resource into memory that can be accessed by the CPU
using (var screenTexture2D = screenResource.QueryInterface<Texture2D>()) {
device.ImmediateContext.CopyResource(screenTexture2D, screenTexture);
}
//Get the desktop capture texture
var mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0, MapMode.Read, MapFlags.None);
var boundsRect = new System.Drawing.Rectangle(0, 0, width, height);
//Create Drawing.Bitmap
using (var bitmap = new System.Drawing.Bitmap(width, height, PixelFormat.Format32bppArgb)) {
//Copy pixels from screen capture Texture to GDI bitmap
var bitmapData = bitmap.LockBits(boundsRect, ImageLockMode.WriteOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destinationPtr = bitmapData.Scan0;
for (int y = 0; y < height; y++) {
//Copy a single line
Utilities.CopyMemory(destinationPtr, sourcePtr, width * 4);
//Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destinationPtr = IntPtr.Add(destinationPtr, bitmapData.Stride);
}
//Release source and dest locks
bitmap.UnlockBits(bitmapData);
device.ImmediateContext.UnmapSubresource(screenTexture, 0);
//Save the output
imageBytes = CropBitmapToJPEGBytes(bitmap, cropRect, jpegQuality);
}
//Capture done
captureDone = true;
}
catch (SharpDXException e) {
if (e.ResultCode.Code != SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code) {
throw;
}
}
finally {
//Dispose manually
if (screenResource != null) {
screenResource.Dispose();
}
duplicatedOutput.ReleaseFrame();
}
}
}
}
output.Dispose();
return (imageBytes, rotation);
}
/// <summary>
/// Crop bitmap
/// </summary>
/// <param name="orig">Original bitmap</param>
/// <param name="cropRect">Crop rectangle</param>
/// <returns>Cropped bitmap</returns>
static byte[] CropBitmapToJPEGBytes(Bitmap orig, Rectangle cropRect, int jpegQuality) {
EncoderParameter qualityParam = new EncoderParameter(Encoder.Quality, (long)jpegQuality);
jpegParams.Param[0] = qualityParam;
byte[] imageBytes;
using (Bitmap nb = new Bitmap(cropRect.Width, cropRect.Height)) {
using (Graphics g = Graphics.FromImage(nb)) {
g.DrawImage(orig, -cropRect.X, -cropRect.Y);
using (MemoryStream s = new MemoryStream()) {
nb.Save(s, jpegCodec, jpegParams);
imageBytes = s.ToArray();
}
}
}
return imageBytes;
}
}
}

AccessViolationException in EmguCV

I'm currently scanning few images and store ROIs in a List. Now I loop through the list and check if the ROI has a bar code in it. It works with few images and throws an AccessViolationException at converting mat to image for others and it is not getting caught in the catch block.
The below code is used to check if bar code exists.
List<Mat> subMats = new List<Mat>();
Mat mat = new Mat(image.Mat, enlargeROI(image.Mat,NEWRECTANGLE,PADDING));
subMats.Add(mat);
using (var mImage = new Image<Bgr, byte>(image.Width, image.Height))
{
var barcodeReader = new BarcodeReader();
foreach (var matt in subMats)
{
//TODO : check if the submat is a barcode
try
{
Image<Bgr, byte> img = matt.ToImage<Bgr, byte>(); // EXCEPTION THROWN AT THIS LINE
Bitmap bmp = img.ToBitmap();
if (barcodeReader.Decode(bmp) != null)
{
CvInvoke.Rectangle(image, NEWRECTANGLE, new MCvScalar(0.0, 0.0, 0.0), -1);
}
}
catch (Exception e)
{
Console.WriteLine("**" + e.Message);
}
}
}
The Exception says
"Attempted to read or write protected memory. This is often an indication that other memory is corrupt."
Tried to read it using images like below, but it wouldn't detect the barcodes properly.
public static List<Rectangle> rois = new List<Rectangle>();
rois.Add(NEWRECTANGLE);
using (var mImage = image)
{
var barcodeReader = new BarcodeReader();
int count = 0;
foreach (var roi in rois)
{
//TODO : check if the submat is a barcode
try
{
mImage.ROI = roi;
Bitmap bmp = mImage.ToBitmap();
if (barcodeReader.Decode(bmp) != null)
{
CvInvoke.Rectangle(image, NEWRECTANGLE, new MCvScalar(0.0, 0.0, 0.0), -1);
}
}
catch (Exception e)
{
Console.WriteLine("**" + e.Message);
}
}
}
What could go wrong? Any help would be appreciated.
Thanks.
EDIT
I have identified all my ROIs by finding contours. Looping through the contours I find the rectangles and store them in a List.
CvInvoke.FindContours(fullGrad, contours, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone);
for (int i = 0; i < contours.Size; i++)
{
if (CvInvoke.ContourArea(contours[i]) < 2000) {
continue;
}
RotatedRect RECT = CvInvoke.MinAreaRect(contours[i]);
PointF[] VERTIXES = RECT.GetVertices();
int X = (int)VERTIXES[1].X;
int Y = (int)VERTIXES[2].Y;
//OBTAIN RECTANGLE THAT SURROUNDS THE DETECTED CONTOUR
int WIDTH = (int)(VERTIXES[3].X - VERTIXES[1].X);
int HEIGHT = (int)(VERTIXES[0].Y - VERTIXES[2].Y);
Rectangle NEWRECTANGLE = new Rectangle(X, Y, WIDTH, HEIGHT);
Mat mat = new Mat(image.Mat, enlargeROI(image.Mat,NEWRECTANGLE,PADDING));
subMats.Add(mat);

Find coordinates of barcode in image c#

I have some images which contains 3-4 bar codes. I want to mark all the bar codes irrespective of the position. I'm trying to get all the rectangles in the images using below code, but they return empty or do not mark the bar codes. Am I missing something? Any pointers would be greatly appreciated.
I also tried to follow this tutorial and tried to port it to EmguCV and was not sure what to pass for missing params of certain functions. Commented part are the ones which I'm not sure. Please guide me to correct direction.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.Structure;
using System.IO;
using static System.Net.Mime.MediaTypeNames;
using Emgu.CV.CvEnum;
using Emgu.CV.Util;
using System.Windows.Forms;
namespace ConsoleApplication4
{
class Program
{
//public static Mat mat = new Mat();
// public static Mat kernel = new Mat();
// private static Image<Bgr, byte> gradX = mat.ToImage<Bgr,byte>();
// private static Image<Bgr, byte> gradY = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> gradient = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> blur = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> thresh = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> closed = mat.ToImage<Bgr, byte>();
static void Main(string[] args)
{
Image<Bgr, byte> gambar = new Image<Bgr, byte>("source.jpg");
Image<Bgr, byte> kotak = detectBarcode(gambar);
kotak.ToBitmap().Save("destination.jpg");
Console.ReadKey();
}
private static Image<Bgr, byte> detectBarcode(Image<Bgr, byte> image)
{
try
{
Image<Gray, byte> imageGrey = image.Convert<Gray, byte>();
//CvInvoke.Sobel(imageGrey, gradX, DepthType.Cv32F, 1, 0, -1);
//CvInvoke.Sobel(imageGrey, gradY, DepthType.Cv32F, 0, 1, -1);
//CvInvoke.Subtract(gradX, gradY, gradient);
//CvInvoke.ConvertScaleAbs(gradient, gradient, 0, 0);
//CvInvoke.Blur(gradient, blur, new System.Drawing.Size(new System.Drawing.Point(9, 9)), new System.Drawing.Point(9, 9));
//CvInvoke.Threshold(blur, thresh, 255, 255, ThresholdType.Binary);
//kernel = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(new System.Drawing.Point(9, 9)), new System.Drawing.Point(9, 9));
//CvInvoke.MorphologyEx(thresh,closed,MorphOp.Close,kernel,);
//CvInvoke.Erode(closed,closed, new System.Drawing.Point(0, 0),4,BorderType.Default,);
//CvInvoke.Dilate(closed, closed, new System.Drawing.Point(0, 0), 4, BorderType.Default,);
List<RotatedRect> boxList = new List<RotatedRect>();
UMat cannyEdges = new UMat();
double cannyThreshold = 180.0;
double cannyThresholdLinking = 120.0;
CvInvoke.Canny(imageGrey, cannyEdges, cannyThreshold, cannyThresholdLinking);
using (VectorOfVectorOfPoint countours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, countours, null, RetrType.List,
ChainApproxMethod.ChainApproxSimple);
int count = countours.Size;
for (int i = 0; i < count; i++)
{
using (VectorOfPoint kontur = countours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(kontur, approxContour, CvInvoke.ArcLength(kontur, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250
{
if (approxContour.Size == 4) //rectangle
{
//determine if allthe angles in the contour are within [80,100] degree
bool isRectangle = true;
System.Drawing.Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = Emgu.CV.PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double angle = Math.Abs(
edges[(j + i) % edges.Length].GetExteriorAngleDegree(edges[j]));
if (angle < 80 || angle > 100)
{
isRectangle = false;
break;
}
}
if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
Image<Bgr, byte> triRectImage = image.Copy();
foreach (RotatedRect box in boxList)
triRectImage.Draw(box, new Bgr(0, 0, 0), 5);
return triRectImage;
}
catch (Exception e) {
Console.WriteLine(e.StackTrace);
return null;
}
}
}
}
I find myself referring you to, for example
public static void Sobel(IInputArray src ,IOutputArray dst,
DepthType ddepth, int xorder, int yorder, int kSize = 3, double scale = 1, double delta = 0, BorderType borderType =
BorderType.Reflect101 )
There follows a detailed list of the parameters and what they mean. If you don't actually understand any of this then I would suggest you need to read the tutorials thoroughly because otherwise you will need an expert in Emgu CV to tell you how to write your program, which isn't exactly the point of this site.
I don't wish to sound unkind but you at least need to have a stab at whatever it is you are trying to do.

Emgu CV EigenObjectRecognizer not working

I've tried to code a face recognition program and need some help from the community.
The code posted below compiled with no error but the recognizer seems to be not working?
Basically target.jpg contain a person crop out of the pic1.jpg(3 person inside) so the recognizer should be able to detect it more easily.
The code below run with no errors but all 3 person in pic1.jpg is boxed, and the GetEigenDistances for all 3 faces is 0. By right only the person in pic1.jpg(person in target.jpg) should be boxed.
Any idea on where have i gone wrong? Thanks in advance.
I'm using emgu cv 2.4 with c# 2010 express
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.Util;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
namespace FaceReco
{
public partial class Form1 : Form
{
private HaarCascade haar;
List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
Image<Gray, byte> TrainedFace, UnknownFace = null;
MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
// adjust path to find your XML file
haar = new HaarCascade("haarcascade_frontalface_alt_tree.xml");
//Read an target image
Image TargetImg = Image.FromFile(Environment.CurrentDirectory + "\\target\\target.jpg");
Image<Bgr, byte> TargetFrame = new Image<Bgr, byte>(new Bitmap(TargetImg));
//FACE DETECTION FOR TARGET FACE
if (TargetImg != null) // confirm that image is valid
{
//convert the image to gray scale
Image<Gray, byte> grayframe = TargetFrame.Convert<Gray, byte>();
var faces = grayframe.DetectHaarCascade(haar, 1.4, 4,
HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(25, 25))[0];
foreach (var face in faces)
{
//add into training array
TrainedFace = TargetFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
trainingImages.Add(TrainedFace);
break;
}
TargetImageBox.Image = TrainedFace;
}
//Read an unknown image
Image UnknownImg = Image.FromFile(Environment.CurrentDirectory + "\\img\\pic1.jpg");
Image<Bgr, byte> UnknownFrame = new Image<Bgr, byte>(new Bitmap(UnknownImg));
//FACE DETECTION PROCESS
if (UnknownFrame != null) // confirm that image is valid
{
//convert the image to gray scale
Image<Gray, byte> grayframe = UnknownFrame.Convert<Gray, byte>();
//Detect faces from the gray-scale image and store into an array of type 'var',i.e 'MCvAvgComp[]'
var faces = grayframe.DetectHaarCascade(haar, 1.4, 4,
HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(25, 25))[0];
//draw a green rectangle on each detected face in image
foreach (var face in faces)
{
UnknownFace = UnknownFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
MCvTermCriteria termCrit = new MCvTermCriteria(16, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), ref termCrit);
// if recognise face, draw green box
if (recognizer.Recognize(UnknownFace) != null)
{
UnknownFrame.Draw(face.rect, new Bgr(Color.Green), 3);
}
float f = recognizer.GetEigenDistances(UnknownFace)[0];
// display threshold
UnknownFrame.Draw(f.ToString("R"), ref font, new Point(face.rect.X - 3, face.rect.Y - 3), new Bgr(Color.Red));
}
//Display the image
CamImageBox.Image = UnknownFrame;
}
}
}
}
This area is not yet my specialty, but if I can help I will try. This is what I am using and its working quite nicely.
Try to do all your work with the GPU, its a lot faster than the CPU for doing this stuff!
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
RightCameraImage = RightCameraImageCapture.QueryFrame().Resize(480, 360, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //Read the files as an 8-bit Bgr image
//Emgu.CV.GPU.GpuInvoke.HasCuda
if (GpuInvoke.HasCuda)
{
Video.DetectFace.UsingGPU(RightCameraImage, Main.FaceGpuCascadeClassifier, Main.EyeGpuCascadeClassifier, faces, eyes, out detectionTime);
}
else
{
Video.DetectFace.UsingCPU(RightCameraImage, Main.FaceCascadeClassifier, Main.EyeCascadeClassifier, faces, eyes, out detectionTime);
}
string PersonsName = string.Empty;
Image<Gray, byte> GreyScaleFaceImage;
foreach (Rectangle face in faces)
{
RightCameraImage.Draw(face, new Bgr(Color.Red), 2);
GreyScaleFaceImage = RightCameraImage.Copy(face).Convert<Gray, byte>().Resize(200, 200, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
if (KnownFacesList.Count > 0)
{
// MCvTermCriteria for face recognition...
MCvTermCriteria mCvTermCriteria = new MCvTermCriteria(KnownFacesList.Count, 0.001);
// Recognize Known Faces with Eigen Object Recognizer...
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(KnownFacesList.ToArray(), KnownNamesList.ToArray(), eigenDistanceThreashhold, ref mCvTermCriteria);
EigenObjectRecognizer.RecognitionResult recognitionResult = recognizer.Recognize(GreyScaleFaceImage);
if (recognitionResult != null)
{
// Set the Persons Name...
PersonsName = recognitionResult.Label;
// Draw the label for each face detected and recognized...
RightCameraImage.Draw(PersonsName, ref mCvFont, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
else
{
// Draw the label for each face NOT Detected...
RightCameraImage.Draw(FaceUnknown, ref mCvFont, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
}
}
My Code in the Class: Video.DetectFace:
using System;
using Emgu.CV;
using Emgu.CV.GPU;
using System.Drawing;
using Emgu.CV.Structure;
using System.Diagnostics;
using System.Collections.Generic;
namespace Video
{
//-----------------------------------------------------------------------------------
// Copyright (C) 2004-2012 by EMGU. All rights reserved. Modified by Chris Sykes.
//-----------------------------------------------------------------------------------
public static class DetectFace
{
// Use me like this:
/*
//Emgu.CV.GPU.GpuInvoke.HasCuda
if (GpuInvoke.HasCuda)
{
DetectUsingGPU(...);
}
else
{
DetectUsingCPU(...);
}
*/
private static Stopwatch watch;
public static void UsingGPU(Image<Bgr, Byte> image, GpuCascadeClassifier face, GpuCascadeClassifier eye, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
watch = Stopwatch.StartNew();
using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
{
Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
faces.AddRange(faceRegion);
foreach (Rectangle f in faceRegion)
{
using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
{
//For some reason a clone is required.
//Might be a bug of GpuCascadeClassifier in opencv
using (GpuImage<Gray, Byte> clone = faceImg.Clone())
{
Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);
foreach (Rectangle e in eyeRegion)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
}
watch.Stop();
detectionTime = watch.ElapsedMilliseconds;
}
public static void UsingCPU(Image<Bgr, Byte> image, CascadeClassifier face, CascadeClassifier eye, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
watch = Stopwatch.StartNew();
using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
{
//normalizes brightness and increases contrast of the image
gray._EqualizeHist();
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(gray, 1.1, 10, new Size(20, 20), Size.Empty);
faces.AddRange(facesDetected);
foreach (Rectangle f in facesDetected)
{
//Set the region of interest on the faces
gray.ROI = f;
Rectangle[] eyesDetected = eye.DetectMultiScale(gray, 1.1, 10, new Size(20, 20), Size.Empty);
gray.ROI = Rectangle.Empty;
foreach (Rectangle e in eyesDetected)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
watch.Stop();
detectionTime = watch.ElapsedMilliseconds;
}
} // END of CLASS...
}// END of NAMESPACE...

Recognize objects in image [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 6 years ago.
Improve this question
Hello I am in the process of doing a school project, where we have a robot driving on the ground in between Flamingo plates. We need to create an algorithm that can identify the locations of these plates, so we can create paths around them (We are using A Star for that).
So far have we worked with AForged Library and we have created the following class, the only problem with this is that when it create the rectangles dose it not take in account that the plates are not always parallel with the camera border, and it that case will it just create a rectangle that cover the whole plate.
So we need to some way find the rotation on the object, or another way to identify this.
I have create an image that might help explain this
Image the describe the problem: http://img683.imageshack.us/img683/9835/imagerectangle.png
Any help on how I can do this would be greatly appreciated.
Any other information or ideers are always welcome.
public class PasteMap
{
private Bitmap image;
private Bitmap processedImage;
private Rectangle[] rectangels;
public void initialize(Bitmap image)
{
this.image = image;
}
public void process()
{
processedImage = image;
processedImage = applyFilters(processedImage);
processedImage = filterWhite(processedImage);
rectangels = extractRectangles(processedImage);
//rectangels = filterRectangles(rectangels);
processedImage = drawRectangelsToImage(processedImage, rectangels);
}
public Bitmap getProcessedImage
{
get
{
return processedImage;
}
}
public Rectangle[] getRectangles
{
get
{
return rectangels;
}
}
private Bitmap applyFilters(Bitmap image)
{
image = new ContrastCorrection(2).Apply(image);
image = new GaussianBlur(10, 10).Apply(image);
return image;
}
private Bitmap filterWhite(Bitmap image)
{
Bitmap test = new Bitmap(image.Width, image.Height);
for (int width = 0; width < image.Width; width++)
{
for (int height = 0; height < image.Height; height++)
{
if (image.GetPixel(width, height).R > 200 &&
image.GetPixel(width, height).G > 200 &&
image.GetPixel(width, height).B > 200)
{
test.SetPixel(width, height, Color.White);
}
else
test.SetPixel(width, height, Color.Black);
}
}
return test;
}
private Rectangle[] extractRectangles(Bitmap image)
{
BlobCounter bc = new BlobCounter();
bc.FilterBlobs = true;
bc.MinWidth = 5;
bc.MinHeight = 5;
// process binary image
bc.ProcessImage( image );
Blob[] blobs = bc.GetObjects(image, false);
// process blobs
List<Rectangle> rects = new List<Rectangle>();
foreach (Blob blob in blobs)
{
if (blob.Area > 1000)
{
rects.Add(blob.Rectangle);
}
}
return rects.ToArray();
}
private Rectangle[] filterRectangles(Rectangle[] rects)
{
List<Rectangle> Rectangles = new List<Rectangle>();
foreach (Rectangle rect in rects)
{
if (rect.Width > 75 && rect.Height > 75)
Rectangles.Add(rect);
}
return Rectangles.ToArray();
}
private Bitmap drawRectangelsToImage(Bitmap image, Rectangle[] rects)
{
BitmapData data = image.LockBits(new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
foreach (Rectangle rect in rects)
Drawing.FillRectangle(data, rect, Color.Red);
image.UnlockBits(data);
return image;
}
}
You need to analyse the blobs a bit more to find the corners as #kigurai has said. The AForge library allows you to do this, see the section Finding convex hull on this page for more info. The screenshot below (from the page) shows a small sample of what the convex hull is.
(source: aforgenet.com)
You want to take a look at the GetBlobsLeftAndRightEdges function and the GrahamConvexHull class.
If anyone is interested, this is the way I did it.
Blobsprocessing:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Imaging.Textures;
using AForge.Math.Geometry;
namespace CDIO.Library
{
public class Blobsprocessing
{
Bitmap image;
BlobCounter BlobCounter;
Blob[] blobs;
List<Polygon> hulls;
public Blobsprocessing(Bitmap image)
{
this.image = image;
}
public void Process()
{
BlobCounter = new BlobCounter();
processBlobs();
extractConvexHull();
}
public List<Polygon> getHulls()
{
return hulls;
}
private void processBlobs()
{
BlobCounter.FilterBlobs = true;
BlobCounter.MinWidth = 5;
BlobCounter.MinHeight = 5;
// set ordering options
BlobCounter.ObjectsOrder = ObjectsOrder.Size;
// process binary image
BlobCounter.ProcessImage(image);
blobs = BlobCounter.GetObjectsInformation();
}
private void extractConvexHull()
{
GrahamConvexHull hullFinder = new GrahamConvexHull();
// process each blob
hulls = new List<Polygon>();
foreach (Blob blob in blobs)
{
List<IntPoint> leftPoints, rightPoints, edgePoints;
edgePoints = new List<IntPoint>();
// get blob's edge points
BlobCounter.GetBlobsLeftAndRightEdges(blob,
out leftPoints, out rightPoints);
edgePoints.AddRange(leftPoints);
edgePoints.AddRange(rightPoints);
// blob's convex hull
List<IntPoint> hull = hullFinder.FindHull(edgePoints);
hulls.Add(new Polygon(hull));
}
}
}
}
MapFilters:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Imaging.Textures;
using AForge.Math.Geometry;
namespace CDIO.Library
{
public class MapFilters
{
private Bitmap image;
private Bitmap processedImage;
private Rectangle[] rectangels;
public void initialize(Bitmap image)
{
this.image = image;
}
public void process()
{
processedImage = image;
processedImage = applyFilters(processedImage);
processedImage = filterWhite(processedImage);
}
public Bitmap getProcessedImage
{
get
{
return processedImage;
}
}
private Bitmap applyFilters(Bitmap image)
{
image = new ContrastCorrection(2).Apply(image);
image = new GaussianBlur(10, 10).Apply(image);
return image;
}
private Bitmap filterWhite(Bitmap image)
{
Bitmap test = new Bitmap(image.Width, image.Height);
for (int width = 0; width < image.Width; width++)
{
for (int height = 0; height < image.Height; height++)
{
if (image.GetPixel(width, height).R > 200 &&
image.GetPixel(width, height).G > 200 &&
image.GetPixel(width, height).B > 200)
{
test.SetPixel(width, height, Color.White);
}
else
test.SetPixel(width, height, Color.Black);
}
}
return test;
}
}
}
Polygon:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
using System.Threading;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Imaging.Textures;
using AForge.Math.Geometry;
namespace CDIO.Library
{
public class Polygon
{
List<IntPoint> hull;
public Polygon(List<IntPoint> hull)
{
this.hull = hull;
}
public bool inPoly(int x, int y)
{
int i, j = hull.Count - 1;
bool oddNodes = false;
for (i = 0; i < hull.Count; i++)
{
if (hull[i].Y < y && hull[j].Y >= y
|| hull[j].Y < y && hull[i].Y >= y)
{
try
{
if (hull[i].X + (y - hull[i].X) / (hull[j].X - hull[i].X) * (hull[j].X - hull[i].X) < x)
{
oddNodes = !oddNodes;
}
}
catch (DivideByZeroException e)
{
if (0 < x)
{
oddNodes = !oddNodes;
}
}
}
j = i;
}
return oddNodes;
}
public Rectangle getRectangle()
{
int x = -1, y = -1, width = -1, height = -1;
foreach (IntPoint item in hull)
{
if (item.X < x || x == -1)
x = item.X;
if (item.Y < y || y == -1)
y = item.Y;
if (item.X > width || width == -1)
width = item.X;
if (item.Y > height || height == -1)
height = item.Y;
}
return new Rectangle(x, y, width-x, height-y);
}
public Bitmap drawRectangle(Bitmap image)
{
Rectangle rect = getRectangle();
Bitmap clonimage = (Bitmap)image.Clone();
BitmapData data = clonimage.LockBits(new Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadWrite, image.PixelFormat);
Drawing.FillRectangle (data, rect, getRandomColor());
clonimage.UnlockBits(data);
return clonimage;
}
public Point[] getMap()
{
List<Point> points = new List<Point>();
Rectangle rect = getRectangle();
for (int x = rect.X; x <= rect.X + rect.Width; x++)
{
for (int y = rect.Y; y <= rect.Y + rect.Height; y++)
{
if (inPoly(x, y))
points.Add(new Point(x, y));
}
}
return points.ToArray();
}
public float calculateArea()
{
List<IntPoint> list = new List<IntPoint>();
list.AddRange(hull);
list.Add(hull[0]);
float area = 0.0f;
for (int i = 0; i < hull.Count; i++)
{
area += list[i].X * list[i + 1].Y - list[i].Y * list[i + 1].X;
}
area = area / 2;
if (area < 0)
area = area * -1;
return area;
}
public Bitmap draw(Bitmap image)
{
Bitmap clonimage = (Bitmap)image.Clone();
BitmapData data = clonimage.LockBits(new Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadWrite, image.PixelFormat);
Drawing.Polygon(data, hull, Color.Red);
clonimage.UnlockBits(data);
return clonimage;
}
static Random random = new Random();
int Color1, Color2, Color3;
public Color getRandomColor()
{
Color1 = random.Next(0, 255);
Color2 = random.Next(0, 255);
Color3 = random.Next(0, 255);
Color color = Color.FromArgb(Color1, Color2, Color3);
Console.WriteLine("R: " + Color1 + " G: " + Color2 + " B: " + Color3 + " = " + color.Name);
return color;
}
}
}
The most straight forward solution is probably to find the corners of each detected blob and then geometrically calculate which point-pairs make up the different sides of the squares.
This assumes that the camera is looking straight down such that a square is actually a square in the image (no perspective distorsion).
I am however a bit curious why you need to know the rotation of the rectangles. In all the example images the rectangles are more or less aligned with the image borders, so a bounding box for a rectangle blob would be very close to what you are trying to find. At least it should be good enough for path finding.
You should be using neural networks.
See: http://en.wikipedia.org/wiki/Neural_network

Categories