I'm using this example:
http://www.aforgenet.com/framework/features/blobs_processing.html
I tried using the last example and show the output in a picture box after button click:
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Math.Geometry;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Image_Processing_testings
{
public partial class Form1 : Form
{
Bitmap image = null;
public Form1()
{
InitializeComponent();
Bitmap bitmap = new Bitmap("C:\\Users\\user\\Desktop\\test.png");
Bitmap gsImage = Grayscale.CommonAlgorithms.BT709.Apply(bitmap);
DifferenceEdgeDetector filter = new DifferenceEdgeDetector();
image = filter.Apply(gsImage);
// process image with blob counter
BlobCounter blobCounter = new BlobCounter();
blobCounter.ProcessImage(image);
Blob[] blobs = blobCounter.GetObjectsInformation();
// create convex hull searching algorithm
GrahamConvexHull hullFinder = new GrahamConvexHull();
// lock image to draw on it
BitmapData data = image.LockBits(
new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.ReadWrite, image.PixelFormat);
int i = 0;
// process each blob
foreach (Blob blob in blobs)
{
List<IntPoint> leftPoints, rightPoints, edgePoints = new List<IntPoint>();
// get blob's edge points
blobCounter.GetBlobsLeftAndRightEdges(blob,
out leftPoints, out rightPoints);
edgePoints.AddRange(leftPoints);
edgePoints.AddRange(rightPoints);
// blob's convex hull
List<IntPoint> hull = hullFinder.FindHull(edgePoints);
Drawing.Polygon(data, hull, Color.Red);
i++;
}
image.UnlockBits(data);
MessageBox.Show("Found: " + i + " Objects");
}
private void button1_Click_1(object sender, EventArgs e)
{
pictureBox1.Image = image;
}
}
}
The result is that i'm getting the image after filter, but without any polygon on it.
I counted the number of blob and got 3 for this picture :
The examples in the link you've provided assume that white pixels belong to the object and black pixels belong to the background. Your image that you've provided is the opposite. Therefore, invert the image before applying the algorithm and that should work.
Related
tried this :
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Weather
{
public class DrawDateTime
{
public DrawDateTime()
{
string fileName = #"e:\radar_without_clouds.jpg";//...
int width = 0;//...
int height = 0;//...
Bitmap bitmap = new Bitmap(
width, height,
System.Drawing.Imaging.PixelFormat.Format24bppRgb); // or some other format
using (Graphics graphics = Graphics.FromImage(bitmap))
{
using (System.Drawing.SolidBrush myBrush = new System.Drawing.SolidBrush(System.Drawing.Color.Black))
{
graphics.FillRectangle(myBrush, new Rectangle(0, 0, 140, 21)); // whatever
// and so on...
} // myBrush will be disposed at this line
bitmap.Save(fileName);
} // graphics will be disposed at this
}
}
}
using it in form1
public Form1()
{
InitializeComponent();
DrawDateTime datetime = new DrawDateTime();
}
but getting error exception on the line :
Bitmap bitmap = new Bitmap(
width, height,
System.Drawing.Imaging.PixelFormat.Format24bppRgb);
System.ArgumentException
HResult=0x80070057
Message=Parameter is not valid.
Source=System.Drawing
StackTrace:
at System.Drawing.Bitmap..ctor(Int32 width, Int32 height, PixelFormat format)
at Weather.DrawDateTime..ctor() in E:\Csharp Projects\wt\DrawDateTime.cs:line 18
at Weather.Form1..ctor() in E:\Csharp Projects\wt\Form1.cs:line 57
at Weather.Program.Main() in E:\Csharp Projects\wt\Program.cs:line 19
what i want to do is to draw a rectangle and fill it in black at specific location on image and then to write some text inside the drawn black rectangle in yellow with specific size of the text.
exmaple of image with black rectangle and yellow text inside at the top left corner. but i want to be able to write my own text. the same as it is in the image but with my own text. whatever text i want to write on it instead what is written now.
for example to write on the image on the top left corner in yellow in that size the current date time.
So here is my code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace GandomTest
{
class Program
{
public static bool SearchPixel(string hexcode)
{
Bitmap bitmap = new Bitmap(Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height);
Graphics graphics = Graphics.FromImage(bitmap as Image);
graphics.CopyFromScreen(0, 0, 0, 0, bitmap.Size);
Color desiredPixelColor = ColorTranslator.FromHtml(hexcode);
for (int x = 0; x < SystemInformation.VirtualScreen.Width; x++)
{
for (int y = 0; y < SystemInformation.VirtualScreen.Height; y++)
{
Color currentPixelColor = bitmap.GetPixel(x, y);
if (desiredPixelColor == currentPixelColor)
{
Console.WriteLine("Color found!");
Thread.Sleep(10000);
return true;
}
else
{
Console.WriteLine("Not found");
continue;
}
}
}
return false;
}
static void Main(string[] args)
{
SearchPixel("#000000");
Console.ReadKey();
}
}
}
I was trying to loop the pixel recognition so as soon as the pixel is detected write something in console. The thing I do not know how can I make this work properly. I know this probably isn't the fastest way to do it but everytime I run it, and the specified color is on screen, the program does not detect it.
I am capturing desktop screen and drawing on picturebox.
Below is a sample code for testing.Basically i am updating small portion of images on picture box coming from socket. so for performance prospective i am not invalidate whole image always, instead of just invalidate area which need to repaint considering scaling.
On paint event using below DrawImage overload to draw only specific region of picture box
if you try to run below code it not drawing properly.
Text areas are not readable.What is wrong i am doing here.I search extensively but did not got any solution.
Here is my code.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace WindowsFormsApp2
{
public partial class Form1 : Form
{
private Bitmap initial = new Bitmap(Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height,
PixelFormat.Format32bppArgb);
private Rectangle GetViewRect() { return pictureBox1.ClientRectangle; }
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
initial = CaptureDesktop();
Rectangle imageRect = new Rectangle(0, 0, initial.Width, initial.Height);
var viewRect = GetViewRect();
var scaleX = (float)viewRect.Width / initial.Width;
var scaleY = (float)viewRect.Height / initial.Height;
// Make sure the target rectangle includes the new block
var targetRect = Rectangle.FromLTRB(
(int)Math.Truncate(imageRect.X * scaleX),
(int)Math.Truncate(imageRect.Y * scaleY),
(int)Math.Ceiling(imageRect.Right * scaleX),
(int)Math.Ceiling(imageRect.Bottom * scaleY));
pictureBox1.Invalidate(targetRect);
pictureBox1.Update();
}
private Bitmap CaptureDesktop()
{
var bmpScreenshot = new Bitmap(Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height,
PixelFormat.Format32bppArgb);
// Create a graphics object from the bitmap.
var gfxScreenshot = Graphics.FromImage(bmpScreenshot);
// Take the screenshot from the upper left corner to the right bottom corner.
gfxScreenshot.CopyFromScreen(Screen.PrimaryScreen.Bounds.X,
Screen.PrimaryScreen.Bounds.Y,
0,
0,
Screen.PrimaryScreen.Bounds.Size,
CopyPixelOperation.SourceCopy);
return bmpScreenshot;
}
private void pictureBox1_Paint(object sender, PaintEventArgs e)
{
lock (initial)
{
e.Graphics.PixelOffsetMode = System.Drawing.Drawing2D.PixelOffsetMode.Half;
e.Graphics.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.HighSpeed;
e.Graphics.CompositingMode = System.Drawing.Drawing2D.CompositingMode.SourceCopy;
e.Graphics.InterpolationMode = System.Drawing.Drawing2D.InterpolationMode.NearestNeighbor;
var viewRect = GetViewRect();
var scaleX = (float)initial.Width / viewRect.Width;
var scaleY = (float)initial.Height / viewRect.Height;
var targetRect = e.ClipRectangle;
var imageRect = new RectangleF(targetRect.X * scaleX, targetRect.Y * scaleY, targetRect.Width * scaleX, targetRect.Height * scaleY);
e.Graphics.DrawImage(initial, targetRect, imageRect, GraphicsUnit.Pixel);
}
}
}
}
`
Edit : if i change to below code. it works fine.
e.Graphics.PixelOffsetMode = System.Drawing.Drawing2D.PixelOffsetMode.Half;
e.Graphics.SmoothingMode = System.Drawing.Drawing2D.SmoothingMode.HighQuality;
e.Graphics.CompositingMode = System.Drawing.Drawing2D.CompositingMode.SourceCopy;
e.Graphics.InterpolationMode = System.Drawing.Drawing2D.InterpolationMode.High
;
How can I get the eye(s)' position(s) in an image of a human face ?
For instance, my program searches for eyes and then their positions could be stored in 2D vectors like :
Vector2 leftEye = new Vector2(56, 50);
I heard about Emgu but I really don't understand how it works with XMLs...
Here is an example using Emgu 3.4.1. The training data xml is available on GitHub, and you load that into a CascadeClassifier class which can then perform the detection.
using Emgu.CV;
using Emgu.CV.Structure;
using System.Diagnostics;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using System.Net;
public class Program
{
private const string EYE_DETECTION_XML = "https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_eye.xml";
private const string SAMPLE_IMAGE = "https://upload.wikimedia.org/wikipedia/commons/thumb/1/18/Lewis_Hamilton_2016_Malaysia_2.jpg/330px-Lewis_Hamilton_2016_Malaysia_2.jpg";
static void Main()
{
// download sample photo
WebClient client = new WebClient();
Bitmap image = null;
using (MemoryStream ms = new MemoryStream(client.DownloadData(SAMPLE_IMAGE)))
image = new Bitmap(Image.FromStream(ms));
// convert to Emgu image, convert to grayscale and increase brightness/contrast
Emgu.CV.Image<Bgr, byte> emguImage = new Emgu.CV.Image<Bgr, byte>(image);
var grayScaleImage = emguImage.Convert<Gray, byte>();
grayScaleImage._EqualizeHist();
// load eye classifier data
string eye_classifier_local_xml = #"c:\temp\haarcascade_eye.xml";
client.DownloadFile(#EYE_DETECTION_XML, eye_classifier_local_xml);
CascadeClassifier eyeClassifier = new CascadeClassifier(eye_classifier_local_xml);
// perform detection which will return rectangles of eye positions
var eyes = eyeClassifier.DetectMultiScale(grayScaleImage, 1.1, 4);
// draw those rectangles on original image
foreach (Rectangle eye in eyes)
emguImage.Draw(eye, new Bgr(255, 0, 0), 3);
// save image and show it
string output_image_location = #"c:\temp\output.png";
emguImage.ToBitmap().Save(output_image_location, ImageFormat.Png);
Process.Start(output_image_location);
}
}
I need to create a figure (square) where for each client registered in the system, I put a pixel in a chosen location of the square by the client. I have this example . I need a hint in C # on how to get started.This is only exemple to start.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace RandomPixelImage
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
int width = 640, height = 320;
//bitmap
Bitmap bmp = new Bitmap(width, height);
//random number
Random rand = new Random();
//create random pixels
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
//generate random ARGB value
int a = rand.Next(256);
int r = rand.Next(256);
int g = rand.Next(256);
int b = rand.Next(256);
//set ARGB value
bmp.SetPixel(x, y, Color.FromArgb(a, r, g, b));
}
}
//load bmp in picturebox1
pictureBox1.Image = bmp;
//save (write) random pixel image
bmp.Save("D:\\Image\\RandomImage.png");
}
}
}
Since you have already made a Bitmap, all you need to do is assign that to PictureBox control:
First add a PictureBox control to your form, and then add this code to get your image onto it.
pictureBox.Image = bmp;
As your example shows, you can use Bitmap.SetPixel() to alter single pixels.