Related
The code below successfully takes screenshots of the monitors attached to my Windows 10 laptop computer, so long as the monitors are not "flipped". When the monitors are flipped to any orientation except "landscape" the captured images are all black pixels (r,g,b,a = 0,0,0,255)
How can I modify the code below so that it will also work with flipped monitors?
Target Framework is:
Net 4.8
Referenced packages are:
SharpDX 4.2.0
SharpDX.Direct2D1 4.2.0
SharpDX.Direct3D11 4.2.0
SharpDX.DXGI 4.2.0
SharpDX.DXGI 4.2.0
using System;
using System.IO;
using System.Runtime.ExceptionServices;
using SharpDX;
namespace ScreenCast {
internal static class Program {
[STAThread]
private static void Main() {
var captureCount = 0;
using var factory = new SharpDX.DXGI.Factory4();
foreach (var adapter in factory.Adapters1) {
using var device = new SharpDX.Direct3D11.Device(adapter);
foreach (var output in adapter.Outputs) {
if (output.Description.IsAttachedToDesktop) {
var description = output.Description;
using var output1 = output.QueryInterface<SharpDX.DXGI.Output1>();
Capture($"{captureCount++}.bmp", device, output1);
}
output.Dispose();
}
adapter.Dispose();
}
}
private static void Capture(string outputFileName, SharpDX.Direct3D11.Device device, SharpDX.DXGI.Output1 output1) {
int width = output1.Description.DesktopBounds.Right - output1.Description.DesktopBounds.Left;
int height = output1.Description.DesktopBounds.Bottom - output1.Description.DesktopBounds.Top;
using var stagingScreenTexture = new SharpDX.Direct3D11.Texture2D(device, new SharpDX.Direct3D11.Texture2DDescription {
Width = width,
Height = height,
CpuAccessFlags = SharpDX.Direct3D11.CpuAccessFlags.Read,
BindFlags = SharpDX.Direct3D11.BindFlags.None,
Format = SharpDX.DXGI.Format.B8G8R8A8_UNorm,
OptionFlags = SharpDX.Direct3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = SharpDX.Direct3D11.ResourceUsage.Staging
});
using var duplicatedOutput = output1.DuplicateOutput(device);
SharpDX.DXGI.Resource screenResource = null;
SharpDX.DXGI.OutputDuplicateFrameInformation duplicateFrameInformation;
AcquireFrame(duplicatedOutput, out duplicateFrameInformation, out screenResource);
duplicatedOutput.ReleaseFrame();
AcquireFrame(duplicatedOutput, out duplicateFrameInformation, out screenResource);
// copy resource into memory that can be accessed by the CPU
using var screenTexture = screenResource.QueryInterface<SharpDX.Direct3D11.Texture2D>();
device.ImmediateContext.CopyResource(screenTexture, stagingScreenTexture);
// Get the desktop capture texture
var mapSource = device.ImmediateContext.MapSubresource(stagingScreenTexture, 0, SharpDX.Direct3D11.MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
using var bmp = new System.Drawing.Bitmap(width, height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
var bmpBounds = new System.Drawing.Rectangle(0, 0, width, height);
var bmpData = bmp.LockBits(bmpBounds, System.Drawing.Imaging.ImageLockMode.WriteOnly, bmp.PixelFormat);
var src = mapSource.DataPointer;
var dest = bmpData.Scan0;
for (var y = 0; y < height; y++) {
SharpDX.Utilities.CopyMemory(dest, src, width * 4);
src += mapSource.RowPitch;
dest += bmpData.Stride;
}
bmp.UnlockBits(bmpData);
bmp.Save(outputFileName);
device.ImmediateContext.UnmapSubresource(stagingScreenTexture, 0);
screenResource.Dispose();
duplicatedOutput.ReleaseFrame();
// Display the texture using system associated viewer
System.Diagnostics.Process.Start(Path.GetFullPath(Path.Combine(Environment.CurrentDirectory, outputFileName)));
}
static void AcquireFrame(SharpDX.DXGI.OutputDuplication duplication, out SharpDX.DXGI.OutputDuplicateFrameInformation info, out SharpDX.DXGI.Resource resource) {
while (true) {
try {
duplication.AcquireNextFrame(100, out info, out resource);
return;
} catch (SharpDXException x) {
if (x.ResultCode.Code != SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code)
ExceptionDispatchInfo.Capture(x).Throw();
}
}
}
}
}
First, I only used SharpDX for a few days, so I'm by no means an expert, but I ran into a similar problem when capturing from rotated monitor and from what I've been able to deduce captured frame is not rotated.
e.g. Your monitor is rotated 90 deg to portrait (Width x Height 1080x1920) so you'd expect the captured frame to be portrait as well, right? Nope, you get the 1920 x 1080 landscape bitmap so your screen width= bitmap height and vice versa:
Here is the code I used in my capture class, still work in progress:
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using System.Windows.Forms;
using SharpDX;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Mathematics.Interop;
using Device = SharpDX.Direct3D11.Device;
using MapFlags = SharpDX.Direct3D11.MapFlags;
namespace EXM.ExampleCapture
{
public class DXScreenCaptureUtil {
private static ImageCodecInfo jpegCodec = ImageCodecInfo.GetImageEncoders()
.First(c => c.FormatID == ImageFormat.Jpeg.Guid);
private static EncoderParameters jpegParams = new EncoderParameters() { Param = new[] { new EncoderParameter(Encoder.Quality, 60L) } };
//Cache objects
private static Factory1 factory = new Factory1();
private static Adapter adapter;
private static Device device;
/// <summary>
/// Gets target device (Display) based on the rectangle we want to capture
/// </summary>
/// <param name="sourceRect">Rectangle we want to capture</param>
/// <returns>Screen which contains the area we want to capture or null if no device contains our area of interest</returns>
private static Screen GetTargetScreen(Rectangle sourceRect) {
foreach (var scr in Screen.AllScreens)
{
if (sourceRect.X >= scr.Bounds.X && sourceRect.Y >= scr.Bounds.Y
&& sourceRect.Right <= scr.Bounds.Width + scr.Bounds.X
&& sourceRect.Bottom <= scr.Bounds.Height + scr.Bounds.Y
)
{
return scr;
}
}
return null;
}
public static (byte[], int) Capture(Rectangle sourceRect, int jpegQuality) {
Screen targetScreen = GetTargetScreen(sourceRect);
if (targetScreen == null) {
throw new Exception($#"Could not find target screen for capture rectangle {sourceRect}");
}
//This is to instruct client receiving the image to rotate it, seems like a reasonable thing to offload it to client and save a bit of CPU time on server
int rotation = 0;
byte[] imageBytes = null;
// Width/Height of desktop to capture
int width = targetScreen.Bounds.Width;
int height = targetScreen.Bounds.Height;
Rectangle cropRect = new Rectangle(sourceRect.X - targetScreen.Bounds.X, sourceRect.Y - targetScreen.Bounds.Y, sourceRect.Width, sourceRect.Height);
// Create DXGI Factory1
if (adapter == null) { adapter = factory.Adapters.Where(x => x.Outputs.Any(o => o.Description.DeviceName == targetScreen.DeviceName)).FirstOrDefault(); }
// Create device from Adapter
if (device == null) { device = new Device(adapter); }
//using (var output = adapter.Outputs.Where(o => o.Description.DeviceName == targetScreen.DeviceName).FirstOrDefault()) //This creates a memory leak!
Output output = null;
//I'm open to suggestions here:
for (int i = 0; i < adapter.GetOutputCount(); i++) {
output = adapter.GetOutput(i);
if (output.Description.DeviceName == targetScreen.DeviceName) {
break;
}
else {
output.Dispose();
}
}
using (var output1 = output.QueryInterface<Output1>()) {
if (output1.Description.Rotation == DisplayModeRotation.Rotate90) {
width = targetScreen.Bounds.Height;
height = targetScreen.Bounds.Width;
int offsetX = targetScreen.Bounds.X - sourceRect.X;
cropRect = new Rectangle(
sourceRect.Y - targetScreen.Bounds.Y,
targetScreen.Bounds.Width - (sourceRect.Width + offsetX),
sourceRect.Height, sourceRect.Width);
rotation = 90;
}
else if (output1.Description.Rotation == DisplayModeRotation.Rotate270) {
width = targetScreen.Bounds.Height;
height = targetScreen.Bounds.Width;
int offsetY = targetScreen.Bounds.Y - sourceRect.Y;
cropRect = new Rectangle(
targetScreen.Bounds.Height - (sourceRect.Height + offsetY),
targetScreen.Bounds.X - sourceRect.X,
sourceRect.Height, sourceRect.Width);
rotation = 270;
}
else if (output1.Description.Rotation == DisplayModeRotation.Rotate180) {
rotation = 180;
}
// Create Staging texture CPU-accessible
var textureDesc = new Texture2DDescription {
CpuAccessFlags = CpuAccessFlags.Read,
BindFlags = BindFlags.None,
Format = Format.B8G8R8A8_UNorm,
Width = width,
Height = height,
OptionFlags = ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = ResourceUsage.Staging
};
using (var screenTexture = new Texture2D(device, textureDesc))
//Duplicate the output
using (var duplicatedOutput = output1.DuplicateOutput(device)) {
bool captureDone = false;
SharpDX.DXGI.Resource screenResource = null;
OutputDuplicateFrameInformation duplicateFrameInformation;
for (int i = 0; !captureDone; i++) {
try {
//Try to get duplicated frame within given time
duplicatedOutput.AcquireNextFrame(1000, out duplicateFrameInformation, out screenResource);
//Ignore first call, this always seems to return a black frame
if (i == 0) {
screenResource.Dispose();
continue;
}
//copy resource into memory that can be accessed by the CPU
using (var screenTexture2D = screenResource.QueryInterface<Texture2D>()) {
device.ImmediateContext.CopyResource(screenTexture2D, screenTexture);
}
//Get the desktop capture texture
var mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0, MapMode.Read, MapFlags.None);
var boundsRect = new System.Drawing.Rectangle(0, 0, width, height);
//Create Drawing.Bitmap
using (var bitmap = new System.Drawing.Bitmap(width, height, PixelFormat.Format32bppArgb)) {
//Copy pixels from screen capture Texture to GDI bitmap
var bitmapData = bitmap.LockBits(boundsRect, ImageLockMode.WriteOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destinationPtr = bitmapData.Scan0;
for (int y = 0; y < height; y++) {
//Copy a single line
Utilities.CopyMemory(destinationPtr, sourcePtr, width * 4);
//Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destinationPtr = IntPtr.Add(destinationPtr, bitmapData.Stride);
}
//Release source and dest locks
bitmap.UnlockBits(bitmapData);
device.ImmediateContext.UnmapSubresource(screenTexture, 0);
//Save the output
imageBytes = CropBitmapToJPEGBytes(bitmap, cropRect, jpegQuality);
}
//Capture done
captureDone = true;
}
catch (SharpDXException e) {
if (e.ResultCode.Code != SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code) {
throw;
}
}
finally {
//Dispose manually
if (screenResource != null) {
screenResource.Dispose();
}
duplicatedOutput.ReleaseFrame();
}
}
}
}
output.Dispose();
return (imageBytes, rotation);
}
/// <summary>
/// Crop bitmap
/// </summary>
/// <param name="orig">Original bitmap</param>
/// <param name="cropRect">Crop rectangle</param>
/// <returns>Cropped bitmap</returns>
static byte[] CropBitmapToJPEGBytes(Bitmap orig, Rectangle cropRect, int jpegQuality) {
EncoderParameter qualityParam = new EncoderParameter(Encoder.Quality, (long)jpegQuality);
jpegParams.Param[0] = qualityParam;
byte[] imageBytes;
using (Bitmap nb = new Bitmap(cropRect.Width, cropRect.Height)) {
using (Graphics g = Graphics.FromImage(nb)) {
g.DrawImage(orig, -cropRect.X, -cropRect.Y);
using (MemoryStream s = new MemoryStream()) {
nb.Save(s, jpegCodec, jpegParams);
imageBytes = s.ToArray();
}
}
}
return imageBytes;
}
}
}
I have some images which contains 3-4 bar codes. I want to mark all the bar codes irrespective of the position. I'm trying to get all the rectangles in the images using below code, but they return empty or do not mark the bar codes. Am I missing something? Any pointers would be greatly appreciated.
I also tried to follow this tutorial and tried to port it to EmguCV and was not sure what to pass for missing params of certain functions. Commented part are the ones which I'm not sure. Please guide me to correct direction.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.Structure;
using System.IO;
using static System.Net.Mime.MediaTypeNames;
using Emgu.CV.CvEnum;
using Emgu.CV.Util;
using System.Windows.Forms;
namespace ConsoleApplication4
{
class Program
{
//public static Mat mat = new Mat();
// public static Mat kernel = new Mat();
// private static Image<Bgr, byte> gradX = mat.ToImage<Bgr,byte>();
// private static Image<Bgr, byte> gradY = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> gradient = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> blur = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> thresh = mat.ToImage<Bgr, byte>();
// private static Image<Bgr, byte> closed = mat.ToImage<Bgr, byte>();
static void Main(string[] args)
{
Image<Bgr, byte> gambar = new Image<Bgr, byte>("source.jpg");
Image<Bgr, byte> kotak = detectBarcode(gambar);
kotak.ToBitmap().Save("destination.jpg");
Console.ReadKey();
}
private static Image<Bgr, byte> detectBarcode(Image<Bgr, byte> image)
{
try
{
Image<Gray, byte> imageGrey = image.Convert<Gray, byte>();
//CvInvoke.Sobel(imageGrey, gradX, DepthType.Cv32F, 1, 0, -1);
//CvInvoke.Sobel(imageGrey, gradY, DepthType.Cv32F, 0, 1, -1);
//CvInvoke.Subtract(gradX, gradY, gradient);
//CvInvoke.ConvertScaleAbs(gradient, gradient, 0, 0);
//CvInvoke.Blur(gradient, blur, new System.Drawing.Size(new System.Drawing.Point(9, 9)), new System.Drawing.Point(9, 9));
//CvInvoke.Threshold(blur, thresh, 255, 255, ThresholdType.Binary);
//kernel = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(new System.Drawing.Point(9, 9)), new System.Drawing.Point(9, 9));
//CvInvoke.MorphologyEx(thresh,closed,MorphOp.Close,kernel,);
//CvInvoke.Erode(closed,closed, new System.Drawing.Point(0, 0),4,BorderType.Default,);
//CvInvoke.Dilate(closed, closed, new System.Drawing.Point(0, 0), 4, BorderType.Default,);
List<RotatedRect> boxList = new List<RotatedRect>();
UMat cannyEdges = new UMat();
double cannyThreshold = 180.0;
double cannyThresholdLinking = 120.0;
CvInvoke.Canny(imageGrey, cannyEdges, cannyThreshold, cannyThresholdLinking);
using (VectorOfVectorOfPoint countours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, countours, null, RetrType.List,
ChainApproxMethod.ChainApproxSimple);
int count = countours.Size;
for (int i = 0; i < count; i++)
{
using (VectorOfPoint kontur = countours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(kontur, approxContour, CvInvoke.ArcLength(kontur, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250
{
if (approxContour.Size == 4) //rectangle
{
//determine if allthe angles in the contour are within [80,100] degree
bool isRectangle = true;
System.Drawing.Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = Emgu.CV.PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double angle = Math.Abs(
edges[(j + i) % edges.Length].GetExteriorAngleDegree(edges[j]));
if (angle < 80 || angle > 100)
{
isRectangle = false;
break;
}
}
if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
Image<Bgr, byte> triRectImage = image.Copy();
foreach (RotatedRect box in boxList)
triRectImage.Draw(box, new Bgr(0, 0, 0), 5);
return triRectImage;
}
catch (Exception e) {
Console.WriteLine(e.StackTrace);
return null;
}
}
}
}
I find myself referring you to, for example
public static void Sobel(IInputArray src ,IOutputArray dst,
DepthType ddepth, int xorder, int yorder, int kSize = 3, double scale = 1, double delta = 0, BorderType borderType =
BorderType.Reflect101 )
There follows a detailed list of the parameters and what they mean. If you don't actually understand any of this then I would suggest you need to read the tutorials thoroughly because otherwise you will need an expert in Emgu CV to tell you how to write your program, which isn't exactly the point of this site.
I don't wish to sound unkind but you at least need to have a stab at whatever it is you are trying to do.
I wrote a code that reads an image and calculates the normal vector for every pixel out of the RGB-Values. The Problem is even if I choose a 256x256 image c# will take too long to operate and sometimes it shuts down. Can someone show me another method or a different solution?
Code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Aufgabe17NormalMapping
{
public partial class Form1 : Form
{
public delegate void TextManagerDelegate(string text);
TextManagerDelegate TextHandler;
Bitmap img;
public Form1()
{
InitializeComponent();
TextHandler = new TextManagerDelegate(TextManager);
}
public void TextManager(string txt)
{
textBox1.AppendText(txt + Environment.NewLine);
}
public void RunProcess()
{
string filePath = #"C:\Users\USerName\Documents\Visual Studio 2017\Projects\Aufgabe17NormalMapping\images.jpg";
using (img = new Bitmap(filePath))
{
var height = img.Height;
var width = img.Width;
double[,] ArrayR = new double[width, height];
double[,] ArrayG = new double[width, height];
double[,] ArrayB = new double[width, height];
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
Color pixel = img.GetPixel(i, j);
ArrayR[i, j] = (((double)Convert.ToDouble(pixel.R.ToString())) + 0.5) * 2;
ArrayG[i, j] = (((double)Convert.ToDouble(pixel.G.ToString())) + 0.5) * 2;
ArrayB[i, j] = (((double)Convert.ToDouble(pixel.B.ToString())) + 0.5) * 2;
Debug.WriteLine(ArrayR[i, j].ToString(), "Array R");
//Debug.Print(ArrayR[i, j].ToString(), "Array Rsdsd");
BeginInvoke(TextHandler, ArrayR[i, j].ToString());
}
}
}
}
private void button1_Click(object sender, EventArgs e)
{
Task T = new Task(RunProcess);
T.Start();
}
}
}
Your code is really fast, it is the "reporting" that takes a long time. Remove the Debug.WriteLine and TextHandler and I got the following time (in debug mode)
ImgSize 532x1280
Lasted: 0:00:01,181845
Code snippet:
static void Main(string[] args)
{
var watch = Stopwatch.StartNew();
RunProcess();
Console.WriteLine($"Lasted: {watch.Elapsed.ToString("g")}");
}
public static void RunProcess()
{
var str = new StringBuilder();
string filePath = #"C:\Users\Wouter\Desktop\gamez.jpg";
using (var img = new Bitmap(filePath))
{
var height = img.Height;
var width = img.Width;
Console.WriteLine($"ImgSize {width}x{height}");
double[,] ArrayR = new double[width, height];
double[,] ArrayG = new double[width, height];
double[,] ArrayB = new double[width, height];
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
Color pixel = img.GetPixel(i, j);
ArrayR[i, j] = (((double)Convert.ToDouble(pixel.R.ToString())) + 0.5) * 2;
ArrayG[i, j] = (((double)Convert.ToDouble(pixel.G.ToString())) + 0.5) * 2;
ArrayB[i, j] = (((double)Convert.ToDouble(pixel.B.ToString())) + 0.5) * 2;
str.AppendLine(ArrayR[i, j].ToString());
}
}
}
BeginInvoke(TextHandler, str.ToString());
}
Try working with a StringBuilder and once you've got the entire image processed, update the UI once.
I am trying to implement Loop Subdivision algorithm in C#.
http://www.cs.cmu.edu/afs/cs/academic/class/15462-s13/www/lec_slides/project2_slides.pdf
How can i keep track of all the edges in which I have found out the new vertex. Here's my code to load Json file of the wireframe. I have created a new class subdivision to subdivide the mesh, but it is not working as there are errors. Can anyone please throw some light on how can I subdivide the mesh.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
using System.IO;
using Newtonsoft.Json;
namespace MonkeySubdivision
{
public partial class Form1 : Form
{
Mesh[] meshes0;
Mesh[] meshes1;
Mesh[] meshes2;
bool loaded = false;
public Form1()
{
InitializeComponent();
}
private void display()
{
if (!loaded)
return;
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.Viewport(0, 0, Width, Height);
Matrix4 world = Matrix4.LookAt(Vector3.Zero, Vector3.UnitZ, Vector3.UnitY);
GL.MatrixMode(MatrixMode.Modelview);
GL.LoadMatrix(ref world);
GL.LoadIdentity();
GL.Translate(-0.1f, -0.4f, -5.5f);
meshes0 = LoadJson();
meshes1 = Subdivision.subdivsion(meshes0);
// Subdivide the mesh depending on the numeric value
if (numericUpDown1.Value == 1)
{
// meshes1 = Subdivision.subdivsion(meshes0);
Console.Write("Inside 1st subdivision");
Console.WriteLine("This should be displayed");
meshes0 = meshes1;
}
if (numericUpDown1.Value == 2)
{
Console.Write("Inside 2nd subdivision");
}
if (numericUpDown1.Value == 3)
{
Console.Write("Inside 3rd subdivision");
}
if (numericUpDown1.Value == 4)
{
Console.Write("Inside 4th subdivision");
}
if (numericUpDown1.Value == 5)
{
Console.Write("Inside 5th subdivision");
}
int vertcount = getnumvertices(meshes0);
label2.Text = vertcount.ToString();
int facecount = getnumfaces(meshes0);
label4.Text = facecount.ToString();
// Code To Display the triangles on screen
foreach (Mesh mesh in meshes0)
{
foreach (var face in mesh.Faces)
{
GL.Begin(PrimitiveType.Triangles);
GL.Color3(Color.Yellow);
GL.Vertex3(mesh.Vertices[face.A]);
GL.Vertex3(mesh.Vertices[face.B]);
GL.Vertex3(mesh.Vertices[face.C]);
GL.End();
}
GL.PolygonMode(MaterialFace.Front, PolygonMode.Line);
GL.PolygonMode(MaterialFace.Back, PolygonMode.Line);
GL.Flush();
glControl1.SwapBuffers();
}
}
//Number of faces in a Mesh
private int getnumfaces(Mesh[] meshsub)
{
int count = 0;
foreach (Mesh mesh in meshsub)
{
foreach (var face in mesh.Faces)
count = count + 1;
}
return count;
}
//Number of vertices in a Mesh
private int getnumvertices(Mesh[] meshsub)
{
int count = 0;
foreach (Mesh mesh in meshsub)
{
foreach (var face in mesh.Vertices)
count = count + 1;
}
return count;
}
private void glControl1_Resize(object sender, EventArgs e)
{
GL.Viewport(ClientRectangle.X, ClientRectangle.Y, ClientRectangle.Width, ClientRectangle.Height);
Matrix4 projection = Matrix4.CreatePerspectiveFieldOfView((float)Math.PI / 4, Width / (float)Height, 1.0f, 64.0f);
GL.MatrixMode(MatrixMode.Projection);
GL.LoadMatrix(ref projection);
SetupViewport();
glControl1.Invalidate();
}
private void glControl1_Paint(object sender, PaintEventArgs e)
{
display();
}
private void glControl1_Load(object sender, EventArgs e)
{
loaded = true;
GL.ClearColor(Color.Black);
GL.ShadeModel(ShadingModel.Smooth);
GL.ClearColor(Color.Black);
GL.ClearDepth(1.0f);
GL.Enable(EnableCap.DepthTest);
GL.DepthFunc(DepthFunction.Lequal);
GL.Hint(HintTarget.PerspectiveCorrectionHint, HintMode.Nicest);
SetupViewport();
Application.Idle += Application_Idle; // press TAB twice after +=
}
void Application_Idle(object sender, EventArgs e)
{
// no guard needed -- we hooked into the event in Load handler
while (glControl1.IsIdle)
{
display();
SetupViewport();
}
}
private void SetupViewport()
{
int w = glControl1.Width;
int h = glControl1.Height;
GL.Viewport(ClientRectangle.X, ClientRectangle.Y, ClientRectangle.Width, ClientRectangle.Height);
Matrix4 projection = Matrix4.CreatePerspectiveFieldOfView((float)Math.PI / 4, w / (float)h, 1.0f, 64.0f);
GL.MatrixMode(MatrixMode.Projection);
GL.LoadMatrix(ref projection);
GL.Viewport(0, 0, w, h); // Use all of the glControl painting area
}
// JSON file parser
private Mesh[] LoadJson()
{
var meshes = new List<Mesh>();
using (StreamReader r = new StreamReader("cube.babylon"))
{
string json = r.ReadToEnd();
dynamic jsonObject = JsonConvert.DeserializeObject(json);
for (var meshIndex = 0; meshIndex < jsonObject.meshes.Count; meshIndex++)
{
var verticesArray = jsonObject.meshes[meshIndex].vertices;
// Faces
var indicesArray = jsonObject.meshes[meshIndex].indices;
var uvCount = jsonObject.meshes[meshIndex].uvCount.Value;
var verticesStep = 1;
// Depending of the number of texture's coordinates per vertex
// we're jumping in the vertices array by 6, 8 & 10 windows frame
switch ((int)uvCount)
{
case 0:
verticesStep = 6;
break;
case 1:
verticesStep = 8;
break;
case 2:
verticesStep = 10;
break;
}
// the number of interesting vertices information for us
var verticesCount = verticesArray.Count / verticesStep;
// number of faces is logically the size of the array divided by 3 (A, B, C)
var facesCount = indicesArray.Count / 3;
var mesh = new Mesh(jsonObject.meshes[meshIndex].name.Value, verticesCount, facesCount);
// Filling the Vertices array of our mesh first
for (var index = 0; index < verticesCount; index++)
{
var x = (float)verticesArray[index * verticesStep].Value;
var y = (float)verticesArray[index * verticesStep + 1].Value;
var z = (float)verticesArray[index * verticesStep + 2].Value;
mesh.Vertices[index] = new Vector3(x, y, z);
}
// Then filling the Faces array
for (var index = 0; index < facesCount; index++)
{
var a = (int)indicesArray[index * 3].Value;
var b = (int)indicesArray[index * 3 + 1].Value;
var c = (int)indicesArray[index * 3 + 2].Value;
mesh.Faces[index] = new Face { A = a, B = b, C = c };
}
// Getting the position you've set in Blender
var position = jsonObject.meshes[meshIndex].position;
mesh.Position = new Vector3((float)position[0].Value, (float)position[1].Value, (float)position[2].Value);
meshes.Add(mesh);
}
return meshes.ToArray();
}
}
private void Form1_Load(object sender, EventArgs e)
{
glControl1.Resize += new EventHandler(glControl1_Resize);
meshes0 = LoadJson();
meshes1 = Subdivision.subdivsion(meshes0);
}
private void numericUpDown1_ValueChanged(object sender, EventArgs e)
{
}
}
}
Subdivision function will take the mesh and then subdivide the mesh according to the loop subdivision algorithm.
using OpenTK;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Collections;
namespace MonkeySubdivision
{
class Subdivision
{
public static Mesh[] subdivsion(Mesh[] meshsub)
{
var meshes = new List<Mesh>();
//Vertices of a mesh
//Hashtable edges = new Hashtable();
int verticescount = getnumvertices(meshsub);
Console.WriteLine(verticescount);
int facecount = getnumfaces(meshsub);
int edgecount = verticescount + facecount - 2;
int newvercount = verticescount + edgecount;
int newfacecount = facecount * 4;
Vector3[] NewVertices = new Vector3[newvercount];
var meshnew = new Mesh("subdmesh", newvercount, newfacecount);
foreach (Mesh mesh in meshsub)
{
//for (var j = 0; j < verticescount; j++)
//{
// Console.WriteLine(mesh.Vertices[j]);
// NewVertices[j] = mesh.Vertices[j];
//}
foreach (Mesh mesh2 in meshsub)
{
//for (var index = 0; index < facecount; index++)
//{
// foreach (var faces in mesh.Faces)
// {
// meshnew.Faces[index] = mesh.Faces[index];
// }
//}
int i = 0;
foreach (var face in mesh.Faces)
{
var P0 = face.A;
var P1 = face.B;
var P2 = face.C;
Console.WriteLine("Faces");
Console.WriteLine(P0);
Console.WriteLine(P1);
Console.WriteLine(P2);
NewVertices[i] = getfourthvert(P0, P1, P2, meshsub);
NewVertices[i + 1 ] = getfourthvert(P1, P2, P0, meshsub);
NewVertices[i + 2] = getfourthvert(P2, P0, P1,meshsub);
i = i + 3;
for (var index = verticescount; index < newvercount; index++)
{
meshnew.Vertices[index] = NewVertices[index];
}
/* for(var index = facecount; index < newfacecount; index++)
{
var a = face.A;
var b = (int)indicesArray[index * 3 + 1].Value;
var c = (int)indicesArray[index * 3 + 2].Value;
mesh.Faces[index] = new Face { A = a, B = b, C = c };
}*/
meshes.Add(meshnew);
}
int n = 6;
double num = (3.0 + 2.0 * Math.Cos(2.0 * Math.PI / n));
double beta = 1.0 / n * (5.0 / 8.0 - num * num / 64.0);
}
}
return meshes.ToArray();
}
private static int getnumfaces(Mesh[] meshsub)
{
int count = 0;
foreach (Mesh mesh in meshsub)
{
foreach (var face in mesh.Faces)
count = count + 1;
}
return count;
}
private static int getnumvertices(Mesh[] meshsub)
{
int count = 0;
foreach (Mesh mesh in meshsub)
{
foreach (var vert in mesh.Vertices)
count = count + 1;
}
return count;
}
private static Vector3 getfourthvert(int X0, int X1, int X2, Mesh[] meshsub)
{
int X3;
Vector3 V4 = new Vector3(0, 0, 0);
foreach (Mesh mesh in meshsub)
{
foreach (var face2 in mesh.Faces)
{
var V0 = mesh.Vertices[X0];
var V1 = mesh.Vertices[X1];
var V2 = mesh.Vertices[X2];
var V3 = mesh.Vertices[0];
if ((X0 == face2.A) && (X1 == face2.B))
{
var temp = face2.C;
if (temp != X2)
{
X3 = temp;
V3 = mesh.Vertices[X3];
V4 = (3 * V0 + 3 * V1 + V2 + V3) / 8;
}
}
}
}
Console.WriteLine(V4);
return V4;
}
}
}
You need a class for a Mesh which can contain three links to other meshes. You can keep also a list of all meshes but is should be static.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Drawing;
namespace WindowsFormsApplication1
{
public class Mesh
{
static List<Mesh> meshes { get; set; }
static List<PolyGon> polygons { get; set; }
}
public class PolyGon
{
List<Edge> edges { get; set; }
}
public class Edge
{
List<PointF> points { get; set; } //two points
List<PolyGon> parents { get; set; } // two parents
}
}
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 6 years ago.
Improve this question
Hello I am in the process of doing a school project, where we have a robot driving on the ground in between Flamingo plates. We need to create an algorithm that can identify the locations of these plates, so we can create paths around them (We are using A Star for that).
So far have we worked with AForged Library and we have created the following class, the only problem with this is that when it create the rectangles dose it not take in account that the plates are not always parallel with the camera border, and it that case will it just create a rectangle that cover the whole plate.
So we need to some way find the rotation on the object, or another way to identify this.
I have create an image that might help explain this
Image the describe the problem: http://img683.imageshack.us/img683/9835/imagerectangle.png
Any help on how I can do this would be greatly appreciated.
Any other information or ideers are always welcome.
public class PasteMap
{
private Bitmap image;
private Bitmap processedImage;
private Rectangle[] rectangels;
public void initialize(Bitmap image)
{
this.image = image;
}
public void process()
{
processedImage = image;
processedImage = applyFilters(processedImage);
processedImage = filterWhite(processedImage);
rectangels = extractRectangles(processedImage);
//rectangels = filterRectangles(rectangels);
processedImage = drawRectangelsToImage(processedImage, rectangels);
}
public Bitmap getProcessedImage
{
get
{
return processedImage;
}
}
public Rectangle[] getRectangles
{
get
{
return rectangels;
}
}
private Bitmap applyFilters(Bitmap image)
{
image = new ContrastCorrection(2).Apply(image);
image = new GaussianBlur(10, 10).Apply(image);
return image;
}
private Bitmap filterWhite(Bitmap image)
{
Bitmap test = new Bitmap(image.Width, image.Height);
for (int width = 0; width < image.Width; width++)
{
for (int height = 0; height < image.Height; height++)
{
if (image.GetPixel(width, height).R > 200 &&
image.GetPixel(width, height).G > 200 &&
image.GetPixel(width, height).B > 200)
{
test.SetPixel(width, height, Color.White);
}
else
test.SetPixel(width, height, Color.Black);
}
}
return test;
}
private Rectangle[] extractRectangles(Bitmap image)
{
BlobCounter bc = new BlobCounter();
bc.FilterBlobs = true;
bc.MinWidth = 5;
bc.MinHeight = 5;
// process binary image
bc.ProcessImage( image );
Blob[] blobs = bc.GetObjects(image, false);
// process blobs
List<Rectangle> rects = new List<Rectangle>();
foreach (Blob blob in blobs)
{
if (blob.Area > 1000)
{
rects.Add(blob.Rectangle);
}
}
return rects.ToArray();
}
private Rectangle[] filterRectangles(Rectangle[] rects)
{
List<Rectangle> Rectangles = new List<Rectangle>();
foreach (Rectangle rect in rects)
{
if (rect.Width > 75 && rect.Height > 75)
Rectangles.Add(rect);
}
return Rectangles.ToArray();
}
private Bitmap drawRectangelsToImage(Bitmap image, Rectangle[] rects)
{
BitmapData data = image.LockBits(new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
foreach (Rectangle rect in rects)
Drawing.FillRectangle(data, rect, Color.Red);
image.UnlockBits(data);
return image;
}
}
You need to analyse the blobs a bit more to find the corners as #kigurai has said. The AForge library allows you to do this, see the section Finding convex hull on this page for more info. The screenshot below (from the page) shows a small sample of what the convex hull is.
(source: aforgenet.com)
You want to take a look at the GetBlobsLeftAndRightEdges function and the GrahamConvexHull class.
If anyone is interested, this is the way I did it.
Blobsprocessing:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Imaging.Textures;
using AForge.Math.Geometry;
namespace CDIO.Library
{
public class Blobsprocessing
{
Bitmap image;
BlobCounter BlobCounter;
Blob[] blobs;
List<Polygon> hulls;
public Blobsprocessing(Bitmap image)
{
this.image = image;
}
public void Process()
{
BlobCounter = new BlobCounter();
processBlobs();
extractConvexHull();
}
public List<Polygon> getHulls()
{
return hulls;
}
private void processBlobs()
{
BlobCounter.FilterBlobs = true;
BlobCounter.MinWidth = 5;
BlobCounter.MinHeight = 5;
// set ordering options
BlobCounter.ObjectsOrder = ObjectsOrder.Size;
// process binary image
BlobCounter.ProcessImage(image);
blobs = BlobCounter.GetObjectsInformation();
}
private void extractConvexHull()
{
GrahamConvexHull hullFinder = new GrahamConvexHull();
// process each blob
hulls = new List<Polygon>();
foreach (Blob blob in blobs)
{
List<IntPoint> leftPoints, rightPoints, edgePoints;
edgePoints = new List<IntPoint>();
// get blob's edge points
BlobCounter.GetBlobsLeftAndRightEdges(blob,
out leftPoints, out rightPoints);
edgePoints.AddRange(leftPoints);
edgePoints.AddRange(rightPoints);
// blob's convex hull
List<IntPoint> hull = hullFinder.FindHull(edgePoints);
hulls.Add(new Polygon(hull));
}
}
}
}
MapFilters:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Imaging.Textures;
using AForge.Math.Geometry;
namespace CDIO.Library
{
public class MapFilters
{
private Bitmap image;
private Bitmap processedImage;
private Rectangle[] rectangels;
public void initialize(Bitmap image)
{
this.image = image;
}
public void process()
{
processedImage = image;
processedImage = applyFilters(processedImage);
processedImage = filterWhite(processedImage);
}
public Bitmap getProcessedImage
{
get
{
return processedImage;
}
}
private Bitmap applyFilters(Bitmap image)
{
image = new ContrastCorrection(2).Apply(image);
image = new GaussianBlur(10, 10).Apply(image);
return image;
}
private Bitmap filterWhite(Bitmap image)
{
Bitmap test = new Bitmap(image.Width, image.Height);
for (int width = 0; width < image.Width; width++)
{
for (int height = 0; height < image.Height; height++)
{
if (image.GetPixel(width, height).R > 200 &&
image.GetPixel(width, height).G > 200 &&
image.GetPixel(width, height).B > 200)
{
test.SetPixel(width, height, Color.White);
}
else
test.SetPixel(width, height, Color.Black);
}
}
return test;
}
}
}
Polygon:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
using System.Threading;
using AForge;
using AForge.Imaging;
using AForge.Imaging.Filters;
using AForge.Imaging.Textures;
using AForge.Math.Geometry;
namespace CDIO.Library
{
public class Polygon
{
List<IntPoint> hull;
public Polygon(List<IntPoint> hull)
{
this.hull = hull;
}
public bool inPoly(int x, int y)
{
int i, j = hull.Count - 1;
bool oddNodes = false;
for (i = 0; i < hull.Count; i++)
{
if (hull[i].Y < y && hull[j].Y >= y
|| hull[j].Y < y && hull[i].Y >= y)
{
try
{
if (hull[i].X + (y - hull[i].X) / (hull[j].X - hull[i].X) * (hull[j].X - hull[i].X) < x)
{
oddNodes = !oddNodes;
}
}
catch (DivideByZeroException e)
{
if (0 < x)
{
oddNodes = !oddNodes;
}
}
}
j = i;
}
return oddNodes;
}
public Rectangle getRectangle()
{
int x = -1, y = -1, width = -1, height = -1;
foreach (IntPoint item in hull)
{
if (item.X < x || x == -1)
x = item.X;
if (item.Y < y || y == -1)
y = item.Y;
if (item.X > width || width == -1)
width = item.X;
if (item.Y > height || height == -1)
height = item.Y;
}
return new Rectangle(x, y, width-x, height-y);
}
public Bitmap drawRectangle(Bitmap image)
{
Rectangle rect = getRectangle();
Bitmap clonimage = (Bitmap)image.Clone();
BitmapData data = clonimage.LockBits(new Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadWrite, image.PixelFormat);
Drawing.FillRectangle (data, rect, getRandomColor());
clonimage.UnlockBits(data);
return clonimage;
}
public Point[] getMap()
{
List<Point> points = new List<Point>();
Rectangle rect = getRectangle();
for (int x = rect.X; x <= rect.X + rect.Width; x++)
{
for (int y = rect.Y; y <= rect.Y + rect.Height; y++)
{
if (inPoly(x, y))
points.Add(new Point(x, y));
}
}
return points.ToArray();
}
public float calculateArea()
{
List<IntPoint> list = new List<IntPoint>();
list.AddRange(hull);
list.Add(hull[0]);
float area = 0.0f;
for (int i = 0; i < hull.Count; i++)
{
area += list[i].X * list[i + 1].Y - list[i].Y * list[i + 1].X;
}
area = area / 2;
if (area < 0)
area = area * -1;
return area;
}
public Bitmap draw(Bitmap image)
{
Bitmap clonimage = (Bitmap)image.Clone();
BitmapData data = clonimage.LockBits(new Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadWrite, image.PixelFormat);
Drawing.Polygon(data, hull, Color.Red);
clonimage.UnlockBits(data);
return clonimage;
}
static Random random = new Random();
int Color1, Color2, Color3;
public Color getRandomColor()
{
Color1 = random.Next(0, 255);
Color2 = random.Next(0, 255);
Color3 = random.Next(0, 255);
Color color = Color.FromArgb(Color1, Color2, Color3);
Console.WriteLine("R: " + Color1 + " G: " + Color2 + " B: " + Color3 + " = " + color.Name);
return color;
}
}
}
The most straight forward solution is probably to find the corners of each detected blob and then geometrically calculate which point-pairs make up the different sides of the squares.
This assumes that the camera is looking straight down such that a square is actually a square in the image (no perspective distorsion).
I am however a bit curious why you need to know the rotation of the rectangles. In all the example images the rectangles are more or less aligned with the image borders, so a bounding box for a rectangle blob would be very close to what you are trying to find. At least it should be good enough for path finding.
You should be using neural networks.
See: http://en.wikipedia.org/wiki/Neural_network