Emgu CV EigenObjectRecognizer not working - c#

I've tried to code a face recognition program and need some help from the community.
The code posted below compiled with no error but the recognizer seems to be not working?
Basically target.jpg contain a person crop out of the pic1.jpg(3 person inside) so the recognizer should be able to detect it more easily.
The code below run with no errors but all 3 person in pic1.jpg is boxed, and the GetEigenDistances for all 3 faces is 0. By right only the person in pic1.jpg(person in target.jpg) should be boxed.
Any idea on where have i gone wrong? Thanks in advance.
I'm using emgu cv 2.4 with c# 2010 express
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.Util;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
namespace FaceReco
{
public partial class Form1 : Form
{
private HaarCascade haar;
List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
Image<Gray, byte> TrainedFace, UnknownFace = null;
MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
// adjust path to find your XML file
haar = new HaarCascade("haarcascade_frontalface_alt_tree.xml");
//Read an target image
Image TargetImg = Image.FromFile(Environment.CurrentDirectory + "\\target\\target.jpg");
Image<Bgr, byte> TargetFrame = new Image<Bgr, byte>(new Bitmap(TargetImg));
//FACE DETECTION FOR TARGET FACE
if (TargetImg != null) // confirm that image is valid
{
//convert the image to gray scale
Image<Gray, byte> grayframe = TargetFrame.Convert<Gray, byte>();
var faces = grayframe.DetectHaarCascade(haar, 1.4, 4,
HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(25, 25))[0];
foreach (var face in faces)
{
//add into training array
TrainedFace = TargetFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
trainingImages.Add(TrainedFace);
break;
}
TargetImageBox.Image = TrainedFace;
}
//Read an unknown image
Image UnknownImg = Image.FromFile(Environment.CurrentDirectory + "\\img\\pic1.jpg");
Image<Bgr, byte> UnknownFrame = new Image<Bgr, byte>(new Bitmap(UnknownImg));
//FACE DETECTION PROCESS
if (UnknownFrame != null) // confirm that image is valid
{
//convert the image to gray scale
Image<Gray, byte> grayframe = UnknownFrame.Convert<Gray, byte>();
//Detect faces from the gray-scale image and store into an array of type 'var',i.e 'MCvAvgComp[]'
var faces = grayframe.DetectHaarCascade(haar, 1.4, 4,
HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(25, 25))[0];
//draw a green rectangle on each detected face in image
foreach (var face in faces)
{
UnknownFace = UnknownFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
MCvTermCriteria termCrit = new MCvTermCriteria(16, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), ref termCrit);
// if recognise face, draw green box
if (recognizer.Recognize(UnknownFace) != null)
{
UnknownFrame.Draw(face.rect, new Bgr(Color.Green), 3);
}
float f = recognizer.GetEigenDistances(UnknownFace)[0];
// display threshold
UnknownFrame.Draw(f.ToString("R"), ref font, new Point(face.rect.X - 3, face.rect.Y - 3), new Bgr(Color.Red));
}
//Display the image
CamImageBox.Image = UnknownFrame;
}
}
}
}

This area is not yet my specialty, but if I can help I will try. This is what I am using and its working quite nicely.
Try to do all your work with the GPU, its a lot faster than the CPU for doing this stuff!
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
RightCameraImage = RightCameraImageCapture.QueryFrame().Resize(480, 360, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //Read the files as an 8-bit Bgr image
//Emgu.CV.GPU.GpuInvoke.HasCuda
if (GpuInvoke.HasCuda)
{
Video.DetectFace.UsingGPU(RightCameraImage, Main.FaceGpuCascadeClassifier, Main.EyeGpuCascadeClassifier, faces, eyes, out detectionTime);
}
else
{
Video.DetectFace.UsingCPU(RightCameraImage, Main.FaceCascadeClassifier, Main.EyeCascadeClassifier, faces, eyes, out detectionTime);
}
string PersonsName = string.Empty;
Image<Gray, byte> GreyScaleFaceImage;
foreach (Rectangle face in faces)
{
RightCameraImage.Draw(face, new Bgr(Color.Red), 2);
GreyScaleFaceImage = RightCameraImage.Copy(face).Convert<Gray, byte>().Resize(200, 200, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
if (KnownFacesList.Count > 0)
{
// MCvTermCriteria for face recognition...
MCvTermCriteria mCvTermCriteria = new MCvTermCriteria(KnownFacesList.Count, 0.001);
// Recognize Known Faces with Eigen Object Recognizer...
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(KnownFacesList.ToArray(), KnownNamesList.ToArray(), eigenDistanceThreashhold, ref mCvTermCriteria);
EigenObjectRecognizer.RecognitionResult recognitionResult = recognizer.Recognize(GreyScaleFaceImage);
if (recognitionResult != null)
{
// Set the Persons Name...
PersonsName = recognitionResult.Label;
// Draw the label for each face detected and recognized...
RightCameraImage.Draw(PersonsName, ref mCvFont, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
else
{
// Draw the label for each face NOT Detected...
RightCameraImage.Draw(FaceUnknown, ref mCvFont, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
}
}
My Code in the Class: Video.DetectFace:
using System;
using Emgu.CV;
using Emgu.CV.GPU;
using System.Drawing;
using Emgu.CV.Structure;
using System.Diagnostics;
using System.Collections.Generic;
namespace Video
{
//-----------------------------------------------------------------------------------
// Copyright (C) 2004-2012 by EMGU. All rights reserved. Modified by Chris Sykes.
//-----------------------------------------------------------------------------------
public static class DetectFace
{
// Use me like this:
/*
//Emgu.CV.GPU.GpuInvoke.HasCuda
if (GpuInvoke.HasCuda)
{
DetectUsingGPU(...);
}
else
{
DetectUsingCPU(...);
}
*/
private static Stopwatch watch;
public static void UsingGPU(Image<Bgr, Byte> image, GpuCascadeClassifier face, GpuCascadeClassifier eye, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
watch = Stopwatch.StartNew();
using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
{
Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
faces.AddRange(faceRegion);
foreach (Rectangle f in faceRegion)
{
using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
{
//For some reason a clone is required.
//Might be a bug of GpuCascadeClassifier in opencv
using (GpuImage<Gray, Byte> clone = faceImg.Clone())
{
Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);
foreach (Rectangle e in eyeRegion)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
}
watch.Stop();
detectionTime = watch.ElapsedMilliseconds;
}
public static void UsingCPU(Image<Bgr, Byte> image, CascadeClassifier face, CascadeClassifier eye, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
watch = Stopwatch.StartNew();
using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
{
//normalizes brightness and increases contrast of the image
gray._EqualizeHist();
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(gray, 1.1, 10, new Size(20, 20), Size.Empty);
faces.AddRange(facesDetected);
foreach (Rectangle f in facesDetected)
{
//Set the region of interest on the faces
gray.ROI = f;
Rectangle[] eyesDetected = eye.DetectMultiScale(gray, 1.1, 10, new Size(20, 20), Size.Empty);
gray.ROI = Rectangle.Empty;
foreach (Rectangle e in eyesDetected)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
watch.Stop();
detectionTime = watch.ElapsedMilliseconds;
}
} // END of CLASS...
}// END of NAMESPACE...

Related

Eyes' positions in an image of a human face?

How can I get the eye(s)' position(s) in an image of a human face ?
For instance, my program searches for eyes and then their positions could be stored in 2D vectors like :
Vector2 leftEye = new Vector2(56, 50);
I heard about Emgu but I really don't understand how it works with XMLs...
Here is an example using Emgu 3.4.1. The training data xml is available on GitHub, and you load that into a CascadeClassifier class which can then perform the detection.
using Emgu.CV;
using Emgu.CV.Structure;
using System.Diagnostics;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using System.Net;
public class Program
{
private const string EYE_DETECTION_XML = "https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_eye.xml";
private const string SAMPLE_IMAGE = "https://upload.wikimedia.org/wikipedia/commons/thumb/1/18/Lewis_Hamilton_2016_Malaysia_2.jpg/330px-Lewis_Hamilton_2016_Malaysia_2.jpg";
static void Main()
{
// download sample photo
WebClient client = new WebClient();
Bitmap image = null;
using (MemoryStream ms = new MemoryStream(client.DownloadData(SAMPLE_IMAGE)))
image = new Bitmap(Image.FromStream(ms));
// convert to Emgu image, convert to grayscale and increase brightness/contrast
Emgu.CV.Image<Bgr, byte> emguImage = new Emgu.CV.Image<Bgr, byte>(image);
var grayScaleImage = emguImage.Convert<Gray, byte>();
grayScaleImage._EqualizeHist();
// load eye classifier data
string eye_classifier_local_xml = #"c:\temp\haarcascade_eye.xml";
client.DownloadFile(#EYE_DETECTION_XML, eye_classifier_local_xml);
CascadeClassifier eyeClassifier = new CascadeClassifier(eye_classifier_local_xml);
// perform detection which will return rectangles of eye positions
var eyes = eyeClassifier.DetectMultiScale(grayScaleImage, 1.1, 4);
// draw those rectangles on original image
foreach (Rectangle eye in eyes)
emguImage.Draw(eye, new Bgr(255, 0, 0), 3);
// save image and show it
string output_image_location = #"c:\temp\output.png";
emguImage.ToBitmap().Save(output_image_location, ImageFormat.Png);
Process.Start(output_image_location);
}
}

How to use Facemarker in EMGUCV?

I'm trying to follow this OpenCV tutorial but I have not managed to create the FaceInvoke.FaceDetectNative function, I tried to use this function but the application stops working.
static bool MyDetector(IntPtr input, IntPtr output)
{
CascadeClassifier faceDetector = new CascadeClassifier(#"..\..\Resource\EMGUCV\haarcascade_frontalface_default.xml");
Image<Gray, byte> grayImage = (new Image<Bgr, byte>(CvInvoke.cvGetSize(input))).Convert<Gray, byte>();
grayImage._EqualizeHist();
Rectangle[] faces = faceDetector.DetectMultiScale(grayImage, 1.1, 10, Size.Empty);
VectorOfRect rects = new VectorOfRect(faces);
CvInvoke.cvCopy(rects.Ptr, output, IntPtr.Zero);
return true;
}
On the other hand I tried calling the GetFaces method by passing a Mat object = new Mat (); as IOutputArray which also has not worked (Crash error).
FacemarkLBFParams fParams = new FacemarkLBFParams();
fParams.ModelFile = #"..\..\Resource\EMGUCV\facemarkmodel.yaml";
FacemarkLBF facemark = new FacemarkLBF(fParams);
facemark.SetFaceDetector(MyDetector);
VectorOfRect result = new VectorOfRect();
Image<Bgr, Byte> image = new Image<Bgr, byte>(#"C:\Users\matias\Documents\Proyectos\100-20.bmp");
bool success = facemark.GetFaces(image, result);
Rectangle[] faces = result.ToArray();
Thank's
After several hours I have managed to detect the points of a face, for that use the Fit method, which receives the image, the faces (such as VectorOfRect) and a VectorOfVectorOfPointF for the output
public Image<Bgr, Byte> GetFacePoints()
{
CascadeClassifier faceDetector = new CascadeClassifier(#"..\..\Resource\EMGUCV\haarcascade_frontalface_default.xml");
FacemarkLBFParams fParams = new FacemarkLBFParams();
fParams.ModelFile = #"..\..\Resource\EMGUCV\lbfmodel.yaml";
fParams.NLandmarks = 68; // number of landmark points
fParams.InitShapeN = 10; // number of multiplier for make data augmentation
fParams.StagesN = 5; // amount of refinement stages
fParams.TreeN = 6; // number of tree in the model for each landmark point
fParams.TreeDepth = 5; //he depth of decision tree
FacemarkLBF facemark = new FacemarkLBF(fParams);
//facemark.SetFaceDetector(MyDetector);
Image<Bgr, Byte> image = new Image<Bgr, byte>(#"C:\Users\matias\Downloads\personas-buena-vibra-caracteristicas-1200x600.jpg");
Image<Gray, byte> grayImage = image.Convert<Gray, byte>();
grayImage._EqualizeHist();
VectorOfRect faces = new VectorOfRect(faceDetector.DetectMultiScale(grayImage));
VectorOfVectorOfPointF landmarks = new VectorOfVectorOfPointF();
facemark.LoadModel(fParams.ModelFile);
bool success = facemark.Fit(grayImage, faces, landmarks);
if (success)
{
Rectangle[] facesRect = faces.ToArray();
for (int i = 0; i < facesRect.Length; i++)
{
image.Draw(facesRect[i], new Bgr(Color.Blue), 2);
FaceInvoke.DrawFacemarks(image, landmarks[i], new Bgr(Color.Blue).MCvScalar);
}
return image;
}
return null;
}
Now all that remains is to optimize the code and continue with my project

Find coordinates of barcode in image c#

I have some images which contains 3-4 bar codes. I want to mark all the bar codes irrespective of the position. I'm trying to get all the rectangles in the images using below code, but they return empty or do not mark the bar codes. Am I missing something? Any pointers would be greatly appreciated.
I also tried to follow this tutorial and tried to port it to EmguCV and was not sure what to pass for missing params of certain functions. Commented part are the ones which I'm not sure. Please guide me to correct direction.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.Structure;
using System.IO;
using static System.Net.Mime.MediaTypeNames;
using Emgu.CV.CvEnum;
using Emgu.CV.Util;
using System.Windows.Forms;
namespace ConsoleApplication4
{
class Program
{
//public static Mat mat = new Mat();
// public static Mat kernel = new Mat();
// private static Image<Bgr, byte> gradX = mat.ToImage<Bgr,byte>();
// private static Image<Bgr, byte> gradY = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> gradient = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> blur = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> thresh = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> closed = mat.ToImage<Bgr, byte>();
static void Main(string[] args)
{
Image<Bgr, byte> gambar = new Image<Bgr, byte>("source.jpg");
Image<Bgr, byte> kotak = detectBarcode(gambar);
kotak.ToBitmap().Save("destination.jpg");
Console.ReadKey();
}
private static Image<Bgr, byte> detectBarcode(Image<Bgr, byte> image)
{
try
{
Image<Gray, byte> imageGrey = image.Convert<Gray, byte>();
//CvInvoke.Sobel(imageGrey, gradX, DepthType.Cv32F, 1, 0, -1);
//CvInvoke.Sobel(imageGrey, gradY, DepthType.Cv32F, 0, 1, -1);
//CvInvoke.Subtract(gradX, gradY, gradient);
//CvInvoke.ConvertScaleAbs(gradient, gradient, 0, 0);
//CvInvoke.Blur(gradient, blur, new System.Drawing.Size(new System.Drawing.Point(9, 9)), new System.Drawing.Point(9, 9));
//CvInvoke.Threshold(blur, thresh, 255, 255, ThresholdType.Binary);
//kernel = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(new System.Drawing.Point(9, 9)), new System.Drawing.Point(9, 9));
//CvInvoke.MorphologyEx(thresh,closed,MorphOp.Close,kernel,);
//CvInvoke.Erode(closed,closed, new System.Drawing.Point(0, 0),4,BorderType.Default,);
//CvInvoke.Dilate(closed, closed, new System.Drawing.Point(0, 0), 4, BorderType.Default,);
List<RotatedRect> boxList = new List<RotatedRect>();
UMat cannyEdges = new UMat();
double cannyThreshold = 180.0;
double cannyThresholdLinking = 120.0;
CvInvoke.Canny(imageGrey, cannyEdges, cannyThreshold, cannyThresholdLinking);
using (VectorOfVectorOfPoint countours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, countours, null, RetrType.List,
ChainApproxMethod.ChainApproxSimple);
int count = countours.Size;
for (int i = 0; i < count; i++)
{
using (VectorOfPoint kontur = countours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(kontur, approxContour, CvInvoke.ArcLength(kontur, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250
{
if (approxContour.Size == 4) //rectangle
{
//determine if allthe angles in the contour are within [80,100] degree
bool isRectangle = true;
System.Drawing.Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = Emgu.CV.PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double angle = Math.Abs(
edges[(j + i) % edges.Length].GetExteriorAngleDegree(edges[j]));
if (angle < 80 || angle > 100)
{
isRectangle = false;
break;
}
}
if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
Image<Bgr, byte> triRectImage = image.Copy();
foreach (RotatedRect box in boxList)
triRectImage.Draw(box, new Bgr(0, 0, 0), 5);
return triRectImage;
}
catch (Exception e) {
Console.WriteLine(e.StackTrace);
return null;
}
}
}
}
I find myself referring you to, for example
public static void Sobel(IInputArray src ,IOutputArray dst,
DepthType ddepth, int xorder, int yorder, int kSize = 3, double scale = 1, double delta = 0, BorderType borderType =
BorderType.Reflect101 )
There follows a detailed list of the parameters and what they mean. If you don't actually understand any of this then I would suggest you need to read the tutorials thoroughly because otherwise you will need an expert in Emgu CV to tell you how to write your program, which isn't exactly the point of this site.
I don't wish to sound unkind but you at least need to have a stab at whatever it is you are trying to do.

Finding contour points in emgucv

I am working with emguCV for finding contours essential points then saving this point in a file and user redraw this shape in future. so, my goal is this image:
example
my solution is this:
1. import image to picturebox
2. edge detection with canny algorithm
3. finding contours and save points
I found a lot of points with below codes but i can't drawing first shape with this point!
using Emgu.CV;
using Emgu.Util;
private void button1_Click(object sender, EventArgs e)
{
Bitmap bmp = new Bitmap(pictureBox1.Image);
Image<Bgr, Byte> img = new Image<Bgr, byte>(bmp);
Image<Gray, Byte> gray = img.Convert<Gray, Byte>().PyrDown().PyrUp();
Gray cannyThreshold = new Gray(80);
Gray cannyThresholdLinking = new Gray(120);
Gray circleAccumulatorThreshold = new Gray(120);
Image<Gray, Byte> cannyEdges = gray.Canny(cannyThreshold, cannyThresholdLinking).Not();
Bitmap color;
Bitmap bgray;
IdentifyContours(cannyEdges.Bitmap, 50, true, out bgray, out color);
pictureBox1.Image = color;
}
public void IdentifyContours(Bitmap colorImage, int thresholdValue, bool invert, out Bitmap processedGray, out Bitmap processedColor)
{
Image<Gray, byte> grayImage = new Image<Gray, byte>(colorImage);
Image<Bgr, byte> color = new Image<Bgr, byte>(colorImage);
grayImage = grayImage.ThresholdBinary(new Gray(thresholdValue), new Gray(255));
if (invert)
{
grayImage._Not();
}
using (MemStorage storage = new MemStorage())
{
for (Contour<Point> contours = grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage); contours != null; contours = contours.HNext)
{
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage);
if (currentContour.BoundingRectangle.Width > 20)
{
CvInvoke.cvDrawContours(color, contours, new MCvScalar(255), new MCvScalar(255), -1, 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0));
color.Draw(currentContour.BoundingRectangle, new Bgr(0, 255, 0), 1);
}
Point[] pts = currentContour.ToArray();
foreach (Point p in pts)
{
//add points to listbox
listBox1.Items.Add(p);
}
}
}
processedColor = color.ToBitmap();
processedGray = grayImage.ToBitmap();
}
In your code you have added contour approximation operation
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage);
This contour approximation will approximate your Contour to a nearest polygon & so your actual points got shifted. If you want to reproduce the same image you need not to do any approximation.
Refer this thread.

Issue with emgucv C# and Capture

I am having an issue with a System.StackOverflowException with the Capture function. Here is the code, please ignore the loadScript function but take a look at the constructor and FaceDetect function:
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Data;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.Util;
using System.Runtime.InteropServices;
using PAD_SCRIPT;
using Emgu.CV.GPU;
using Emgu.CV.UI;
namespace PAD_CORE_ENGINE
{
class VisionCore
{
private Capture capture;
private HaarCascade haarCascade;
double[,] faceData = new double[100, 5];
double[,] eyeData = new double[100, 10];
double[,] circleData = new double[100, 5];
int numberOfFaces;
private Image<Bgr, Byte> image;
private System.Windows.Forms.Timer myTimer = new System.Windows.Forms.Timer();
public VisionCore()
{
capture = new Capture(0);
capture.ImageGrabbed += updateFaceDetect;
capture.Start();
//haarCascade = new HaarCascade(#"haarcascade_frontalface_default.xml");
}
public double[,] getDetectFaceInfo()
{
return faceData;
}
public double[,] getEyeInfo()
{
return eyeData;
}
public double[,] getCircleData()
{
return circleData;
}
public double getDetectFaceX(int index)
{
return faceData[index, 0];
}
public double getDetectFaceY(int index)
{
return faceData[index, 1];
}
public double getDetectFaceWidth(int index)
{
return faceData[index, 2];
}
public double getDetectFaceHeight(int index)
{
return faceData[index, 3];
}
public double getEyeX(int index)
{
return eyeData[index, 0];
}
public double getEyeY(int index)
{
return eyeData[index, 1];
}
public double getEyeWidth(int index)
{
return eyeData[index, 2];
}
public double getEyeHeight(int index)
{
return eyeData[index, 3];
}
private void DetectFace(Image<Bgr, Byte> image, String faceFileName, String eyeFileName, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
Stopwatch watch;
if (GpuInvoke.HasCuda)
{
using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName))
using (GpuCascadeClassifier eye = new GpuCascadeClassifier(eyeFileName))
{
watch = Stopwatch.StartNew();
using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
{
Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
faces.AddRange(faceRegion);
foreach (Rectangle f in faceRegion)
{
using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
{
//For some reason a clone is required.
//Might be a bug of GpuCascadeClassifier in opencv
using (GpuImage<Gray, Byte> clone = faceImg.Clone(null))
{
Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);
foreach (Rectangle e in eyeRegion)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
}
watch.Stop();
}
}
else
{
//Read the HaarCascade objects
using (CascadeClassifier face = new CascadeClassifier(faceFileName))
using (CascadeClassifier eye = new CascadeClassifier(eyeFileName))
{
watch = Stopwatch.StartNew();
using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
{
//normalizes brightness and increases contrast of the image
gray._EqualizeHist();
//Detect the faces from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(
gray,
1.1,
10,
new Size(20, 20),
Size.Empty);
faces.AddRange(facesDetected);
foreach (Rectangle f in facesDetected)
{
//Set the region of interest on the faces
gray.ROI = f;
Rectangle[] eyesDetected = eye.DetectMultiScale(
gray,
1.1,
10,
new Size(20, 20),
Size.Empty);
gray.ROI = Rectangle.Empty;
foreach (Rectangle e in eyesDetected)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
watch.Stop();
}
}
detectionTime = watch.ElapsedMilliseconds;
capture.QueryFrame();
}
protected void updateFaceDetect(object sender, EventArgs e)
{
//Image<Bgr, Byte> image, String faceFileName, String eyeFileName, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime
Image<Bgr, Byte> image = capture.RetrieveBgrFrame();
string faceFileName = "haarcascade_frontalface_default.xml";
string eyeFileName = "haarcascade_eye.xml";
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
long detectionTime = 0;
DetectFace(image, faceFileName, eyeFileName, faces, eyes, out detectionTime);
DisplayImage(image);
}
public Image<Bgr, Byte> getImage()
{
return image;
}
public void DisplayImage(Image<Bgr, Byte> img)
{
try
{
ImageViewer.Show(image, String.Format(
"Completed face and eye detection using {0}",
GpuInvoke.HasCuda ? "GPU" : "CPU"
));
}
catch (Exception i)
{
Console.WriteLine(i.Message);
}
}
public Image<Bgr, Byte> processFaces(Image<Bgr, Byte> img, List<Rectangle> faces)
{
foreach (Rectangle face in faces)
img.Draw(face, new Bgr(Color.Red), 2);
return img;
}
public Image<Bgr, Byte> processEyes(Image<Bgr, Byte> img, List<Rectangle> eyes)
{
foreach (Rectangle eye in eyes)
img.Draw(eye, new Bgr(Color.Blue), 2);
return img;
}
public void testVision()
{
DisplayImage(capture.RetrieveBgrFrame());
}
public CircleF[] detectCircles(Image<Bgr, Byte> img)
{
Image<Gray, Byte> gray = img.Convert<Gray, Byte>().PyrDown().PyrUp();
Stopwatch watch = Stopwatch.StartNew();
double cannyThreshold = 180.0;
double circleAccumulatorThreshold = 120;
CircleF[] circles = gray.HoughCircles(new Gray(cannyThreshold), new Gray(circleAccumulatorThreshold), 2.0, 20.0, 5, 0)[0];
watch.Stop();
return circles;
}
public Image<Bgr, Byte> ProcessCircles(Image<Bgr, Byte> img, CircleF[] circles)
{
foreach (CircleF circle in circles)
{
img.Draw(circle, new Bgr(Color.Brown), 2);
}
return img;
}
//display the image
public int getNumOfFaces()
{
return numberOfFaces;
}
public PADScript loadScript(PADScript l)
{
l.addLuaCommand("getNumOfFaces", this);
l.addLuaCommand("getDetectFaceInfo", this);
l.addLuaCommand("getImage", this);
l.addLuaCommand("getDetectFaceInfo", this);
l.addLuaCommand("getEyeInfo", this);
l.addLuaCommand("getDetectFaceX", this);
l.addLuaCommand("getDetectFaceY", this);
l.addLuaCommand("getDetectFaceWidth", this);
l.addLuaCommand("getDetectFaceHeight", this);
l.addLuaCommand("getEyeX", this);
l.addLuaCommand("getEyeY", this);
l.addLuaCommand("getEyeWidth", this);
l.addLuaCommand("getEyeHeight", this);
l.addLuaCommand("testVision", this);
return l;
}
}
}
I am thinking that the capture is querying too many captures at once, but I also get the error in using (GpuCascadeClassifier face = new GpuCascadeClassifier(faceFileName)). I am not really for sure where this error is coming from. Thank you in advance for your help!
Edit: the exception is: An unhandled exception of type 'System.StackOverflowException' occurred in Emgu.CV.GPU.dll

Categories