I'm writing a small graph writing program as a personal project.
Each point is supposed to get drawn on a wpf canvas but I'm having trouble translating the points from the graph's coordinate system, example: x = -8 to 4 y=-4 to 4, to the canvas' coordinate system example: x = 600 to 0 y = 400 to 0.
I'm using the method outlined here to precompute the transformation equations.
However I'm having trouble with the result of the multiplication v=M^-1*u.
My expected result is:
[0.2 ]
[0.0 ]
[-8.0]
[4.0 ]
But the result I'm getting is:
[4.0 ]
[-4.0 ]
[0.01 ]
[-0.006]
I've verified that my transformation matrix is correct and when I do the calculation by hand I'm getting the expected result.
The method to calculate the transformation equations:
private void CalculateTransformationFunctions()
{
// Define the transformation matrix
var transformationMatrix = new Matrix4x4
{
M11 = _destArea.XMin,
M12 = _destArea.YMin,
M13 = 1,
M14 = 0,
M21 = -_destArea.YMin,
M22 = _destArea.XMin,
M23 = 0,
M24 = 1,
M31 = _destArea.XMax,
M32 = _destArea.YMax,
M33 = 1,
M34 = 0,
M41 = -_destArea.YMax,
M42 = _destArea.XMax,
M43 = 0,
M44 = 1
};
// Define the source vector
var srcVector = new Vector4
{
X = _srcArea.XMin,
Y = _srcArea.YMax,
Z = _srcArea.XMax,
W = _srcArea.YMin
};
// Invert the transformationmatrix before the multiplication
Matrix4x4 invertedTransformationMatrix;
if(!Matrix4x4.Invert(transformationMatrix,out invertedTransformationMatrix))
throw new Exception();
// Returns the wrong value
var transformResult = Vector4.Transform(srcVector, invertedTransformationMatrix);
float a = transformResult.X,
b = transformResult.Y,
c = transformResult.Z,
d = transformResult.W;
_xTransformationFunction = (x, y) => (a*x + b*y - b*d - a*c)/(a*a + b*b);
_yTransformationFunction = (x, y) => (b*x - a*y - b*c + a*d)/(a*a + b*b);
}
Which is called in the constructor of its parent class.
My question:
Am I misunderstanding what Vector4.Transform() does here? Or am I completely blind and missing something very obvious?
Full source of the class:
using System;
using System.Numerics;
using System.Windows;
using System.Windows.Media;
using Grapher.Control.Grapher;
namespace Grapher.GraphingMath
{
public class Translator
{
private GraphingArea _srcArea;
private GraphingArea _destArea;
public GraphingArea SourceArea
{
get
{
return _srcArea;
}
set
{
_srcArea = value;
CalculateTransformationFunctions();
}
}
public GraphingArea DestinationArea
{
get { return _destArea; }
set
{
_destArea = value;
CalculateTransformationFunctions();
}
}
private Func<double, double, double> _xTransformationFunction;
private Func<double, double, double> _yTransformationFunction;
public Translator(GraphingArea sourceArea, GraphingArea destArea)
{
_destArea = destArea;
_srcArea = sourceArea;
CalculateTransformationFunctions();
}
public Point TranslatePoint(Point point)
{
var x = point.X;
var y = point.Y;
return new Point
{
X = _xTransformationFunction(x, y),
Y = _yTransformationFunction(x, y)
};
}
/*
x1 y1 1 0
-y1 x1 0 1
M= x2 y2 1 0
-y2 x2 0 1
x1,y1 = dest_min
x2,y2 = dest_max
*/
private void CalculateTransformationFunctions()
{
// Define the transformation matrix
var transformationMatrix = new Matrix4x4
{
M11 = _destArea.XMin,
M12 = _destArea.YMin,
M13 = 1,
M14 = 0,
M21 = -_destArea.YMin,
M22 = _destArea.XMin,
M23 = 0,
M24 = 1,
M31 = _destArea.XMax,
M32 = _destArea.YMax,
M33 = 1,
M34 = 0,
M41 = -_destArea.YMax,
M42 = _destArea.XMax,
M43 = 0,
M44 = 1
};
// Define the source vector
var srcVector = new Vector4
{
X = _srcArea.XMin,
Y = _srcArea.YMax,
Z = _srcArea.XMax,
W = _srcArea.YMin
};
// Invert the transformationmatrix before the multiplication
Matrix4x4 invertedTransformationMatrix;
if(!Matrix4x4.Invert(transformationMatrix,out invertedTransformationMatrix))
throw new Exception();
// Returns the wrong value
var transformResult = Vector4.Transform(srcVector, invertedTransformationMatrix);
float a = transformResult.X,
b = transformResult.Y,
c = transformResult.Z,
d = transformResult.W;
_xTransformationFunction = (x, y) => (a*x + b*y - b*d - a*c)/(a*a + b*b);
_yTransformationFunction = (x, y) => (b*x - a*y - b*c + a*d)/(a*a + b*b);
}
}
}
And for the graphing area struct:
using System;
namespace Grapher.Control.Grapher
{
public struct GraphingArea
{
public float XMin { get; set; }
public float YMin { get; set; }
public float XMax { get; set; }
public float YMax { get; set; }
public float Width => Math.Abs(XMax - XMin);
public float Height => Math.Abs(YMax - YMin);
}
}
In my main method I call the Translator class like this:
Point testPoint = new Point {X = 0, Y = 0};
var srcArea = new GraphingArea
{
XMax = 4,
XMin = -8,
YMax = 4,
YMin = -4
};
var destArea = new GraphingArea
{
XMax = 600,
XMin = 0,
YMax = 400,
YMin = 0
};
var translator = new Translator(srcArea, destArea);
var translatedPoint = translator.TranslatePoint(testPoint);
Edit
Ended up just writing my own matrix multiplication method. I must be misunderstanding what Vector4.Transform() does...
Code here, for anyone interested:
using System.Numerics;
namespace Grapher.GraphingMath.MatrixAndVectorMath
{
public static class Matrix4x4Multiply
{
public static Vector4 Vector4Multiply(Matrix4x4 matrix, Vector4 vector)
{
var mat = new float[4, 4]
{
{matrix.M11, matrix.M12, matrix.M13, matrix.M14},
{matrix.M21, matrix.M22, matrix.M23, matrix.M24},
{matrix.M31, matrix.M32, matrix.M33, matrix.M34},
{matrix.M41, matrix.M42, matrix.M43, matrix.M44}
}; // We'll just wrap the matrix in a float so we can index it.
var vec = new float[4] {vector.X, vector.Y, vector.Z, vector.W}; // And the same with the vector
var result = new float[4] {0, 0, 0, 0};
for (var row = 0; row < mat.GetLength(0); row++)
{
for (var col = 0; col < mat.GetLength(1); col++)
{
result[row] += mat[row, col]*vec[col];
}
}
return new Vector4
{
X = result[0],
Y = result[1],
Z = result[2],
W = result[3]
};
}
}
}
I don't know if this will help but I do something along those lines in one of my projects. I don't use matrices though so it may not be what you are looking for. I simply store the extents of coordinates for the graph and the container (canvas) width and height. I then provide two extensions functions:
public static System.Windows.Point ConvertToScreen(this System.Windows.Point point, CartesianExtents2D extents, double containerWidth, double containerHeight)
{
var x = (point.X - extents.XMinimum) * containerWidth / (extents.XMaximum - extents.XMinimum);
var y = (extents.YMaximum - point.Y) * containerHeight / (extents.YMaximum - extents.YMinimum);
return new System.Windows.Point(x, y);
}
public static System.Windows.Point ConvertToReal(this System.Windows.Point point, CartesianExtents2D extents, double containerWidth, double containerHeight, )
{
var x = extents.XMinimum + (point.X * (extents.XMaximum - extents.XMinimum)) / containerWidth;
var y = extents.YMaximum - (point.Y * (extents.YMaximum - extents.YMinimum)) / containerHeight;
return new System.Windows.Point(x, y);
}
Call thus:
Point p = new Point();
p.ConvertToReal(...);
I'm hoping the contents of CartesianExtents2D is obvious - just min and max for x and y
Related
I am doing image processing so that I am finding contours in the image. What I need is the centroid pixel number of the found contour in the image. To find the pixel number I am using the code given below.
After finding the pixel number I want to show it in the text boxes as x and y coordinates. But the code is not working.
Please help me. What is wrong?
VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
CvInvoke.FindContours(cannyImage, contours, null, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
var cannyOut = cannyImage.ToImage<Bgr, byte>();
//CvInvoke.DrawContours(cannyOut, contours, 2, new MCvScalar(255, 0, 0),2);
VectorOfPoint approx = new VectorOfPoint();
Dictionary<int, double> shapes = new Dictionary<int, double>();
for (int i = 0; i < contours.Size; i++)
{
approx.Clear();
double perimeter = CvInvoke.ArcLength(contours[i], true);
CvInvoke.ApproxPolyDP(contours[i], approx, 0.04 * perimeter, true);
double area = CvInvoke.ContourArea(contours[i]);
if (approx.Size > 4)
{
shapes.Add(i, area);
}
}
if (shapes.Count > 0)
{
var sortedShapes = (from item in shapes
orderby item.Value ascending
select item).ToList();
for (int i = 0; i < sortedShapes.Count; i++)
{
CvInvoke.DrawContours(cannyOut, contours, sortedShapes[i].Key, new MCvScalar(255, 0, 0), 2);
var moments = CvInvoke.Moments(contours[sortedShapes[i].Key]);
int x = (int)(moments.M10 / moments.M00);
int y = (int)(moments.M01 / moments.M00);
CvInvoke.PutText(cannyOut, (i + 1).ToString(), new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheyTriplex, 1.0,
new MCvScalar(255, 0, 0), 2);
//CvInvoke.PutText(cannyOut, sortedShapes[i].Value.ToString(), new Point(x, y - 30), Emgu.CV.CvEnum.FontFace.HersheyTriplex, 1.0,
// new MCvScalar(255, 0, 0), 2);
textBox1.Text = x.ToString();
textBox2.Text = y.ToString();
}
}
To find the centroid of a shape you need to split it into many triangles first.
Then for each triangle with vertices A, B, C you do the summation weighted by the area of the triangle just as so
static void Main(string[] args)
{
var shape = new List<Triangle>();
// fill shape with triangles
float area = 0f;
Vector2 centroid = Vector2.Zero;
foreach (var triangle in shape)
{
float trig_area = triangle.Area;
Vector2 trig_cen = triangle.Centroid;
area += trig_area;
centroid += trig_area * trig_cen;
}
centroid /= area;
}
For reference, a 2D triangle has the following properties
public readonly struct Triangle
{
public Triangle(Vector2 a, Vector2 b, Vector2 c) : this()
{
A = a;
B = b;
C = c;
}
public Vector2 A { get; }
public Vector2 B { get; }
public Vector2 C { get; }
public float Area { get => (Cross(A, B) + Cross(B, C) + Cross(C, A)) / 2; }
public Vector2 Centroid { get => (A + B + C) / 3; }
// helper function
static float Cross(Vector2 a, Vector2 b) => a.X * b.Y - a.Y * b.X;
}
A few days ago i switched from tensorflow to fastai for my c# Project. But now i am facing a problem with my normalisation. For both i use an onnx pipeline to load the model and the data.
var onnxPipeline = mLContext.Transforms.ResizeImages(resizing: ImageResizingEstimator.ResizingKind.Fill, outputColumnName: inputName,
imageWidth: ImageSettings.imageWidth, imageHeight: ImageSettings.imageHeight,
inputColumnName: nameof(ImageInputData.Image))
.Append(mLContext.Transforms.ExtractPixels(outputColumnName: inputName, interleavePixelColors: true, scaleImage: 1 / 255f))
.Append(mLContext.Transforms.ApplyOnnxModel(outputColumnName: outputName, inputColumnName: inputName, modelFile: onnxModelPath));
var emptyData = mLContext.Data.LoadFromEnumerable(new List<ImageInputData>());
var onnxModel = onnxPipeline.Fit(emptyData);
with
class ImageInputData
{
[ImageType(ImageSettings.imageHeight, ImageSettings.imageWidth)]
public Bitmap Image { get; set; }
public ImageInputData(byte[] image)
{
using (var ms = new MemoryStream(image))
{
Image = new Bitmap(ms);
}
}
public ImageInputData(Bitmap image)
{
Image = image;
}
}
After using fastai i learned, that the models get better accuracy if the data is normalized with a specific mean and standard deviation (because i used the resnet34 model it should be means { 0.485, 0.456, 0.406 } stds = { 0.229, 0.224, 0.225 } respectively).
So the pixelvalues (for each color ofc.) have to be transformed with those values to match the trainings images. But how can i achive this in C#?
What i tried so far is:
int imageSize = 256;
double[] means = new double[] { 0.485, 0.456, 0.406 }; // used in fastai model
double[] stds = new double[] { 0.229, 0.224, 0.225 };
Bitmap bitmapImage = inputBitmap;
Image image = bitmapImage;
Color[] pixels = new Color[imageSize * imageSize];
for (int x = 0; x < bitmapImage.Width; x++)
{
for (int y = 0; y < bitmapImage.Height; y++)
{
Color pixel = bitmapImage.GetPixel(x, y);
pixels[x + y] = pixel;
double red = (pixel.R - (means[0] * 255)) / (stds[0] * 255); // *255 to scale the mean and std values to the Bitmap
double gre = (pixel.G - (means[1] * 255)) / (stds[1] * 255);
double blu = (pixel.B - (means[2] * 255)) / (stds[2] * 255);
Color pixel_n = Color.FromArgb(pixel.A, (int)red, (int)gre, (int)blu);
bitmapImage.SetPixel(x, y, pixel_n);
}
}
Ofcourse its not working, because the Colorvalues can`t be negative (which i realised only later).
But how can i achive this normalisation between -1 and 1 for my model in C# with the onnx-model?
Is there a different way to feed the model or to handle the normalisation?
Any help would be appreciated!
One way to solve this problem is to switch from an onnx pipeline to an onnx Inferencesession, which is in my view simpler and better to understand:
public List<double> UseOnnxSession(Bitmap image, string onnxModelPath)
{
double[] means = new double[] { 0.485, 0.456, 0.406 };
double[] stds = new double[] { 0.229, 0.224, 0.225 };
using (var session = new InferenceSession(onnxModelPath))
{
List<double> scores = new List<double>();
Tensor<float> t1 = ConvertImageToFloatData(image, means, stds);
List<float> fl = new List<float>();
var inputMeta = session.InputMetadata;
var inputs = new List<NamedOnnxValue>()
{
NamedOnnxValue.CreateFromTensor<float>("input_1", t1)
};
using (var results = session.Run(inputs))
{
foreach (var r in results)
{
var x = r.AsTensor<float>().First();
var y = r.AsTensor<float>().Last();
var softmaxScore = Softmax(new double[] { x, y });
scores.Add(softmaxScore[0]);
scores.Add(softmaxScore[1]);
}
}
return scores;
}
}
// Create your Tensor and add transformations as you need.
public static Tensor<float> ConvertImageToFloatData(Bitmap image, double[] means, double[] std)
{
Tensor<float> data = new DenseTensor<float>(new[] { 1, 3, image.Width, image.Height });
for (int x = 0; x < image.Width; x++)
{
for (int y = 0; y < image.Height; y++)
{
Color color = image.GetPixel(x, y);
var red = (color.R - (float)means[0] * 255) / ((float)std[0] * 255);
var gre = (color.G - (float)means[1] * 255) / ((float)std[1] * 255);
var blu = (color.B - (float)means[2] * 255) / ((float)std[2] * 255);
data[0, 0, x, y] = red;
data[0, 1, x, y] = gre;
data[0, 2, x, y] = blu;
}
}
return data;
}
Also i have to use my own Softmax method on these scores to get the real probabilities out of my model:
public double[] Softmax(double[] values)
{
double[] ret = new double[values.Length];
double maxExp = values.Select(Math.Exp).Sum();
for (int i = 0; i < values.Length; i++)
{
ret[i] = Math.Round((Math.Exp(values[i]) / maxExp), 4);
}
return ret;
}
Hope this helps someone who has a similar Problem.
I am having a crash issue with the VertexBuffer line. If I comment out the line, my program will iterate through this code 49 times. The code is from Riemer's tutorial for XNA 4.0 and does work in that solution. I have checked the 'using' statements and don't see any thing different between the two projects. I can't figure out why the VertexBuffer will crash the program while the IndexBuffer is just fine. Does anyone have any ideas?
public void CopyToTerrainBuffers()
{
for (int x = 0; x < 7; x++)
for (int y = 0; y < 7; y++)
{
if (!gpu_buffer_std[x, y].initialized)
{
VertexBuffer temp = new VertexBuffer( device,
typeof(VertexMultitextured),
tiles_std[x + 1, y + 1].terrainVertices.Length * VertexMultitextured.SizeInBytes,
BufferUsage.WriteOnly);
// size = 3,698,744
gpu_buffer_std[x, y].terrainVertexBuffer = new VertexBuffer(device,
typeof(VertexMultitextured),
tiles_std[x + 1, y + 1].terrainVertices.Length * VertexMultitextured.SizeInBytes,
BufferUsage.WriteOnly);
gpu_buffer_std[x, y].terrainIndexBuffer = new IndexBuffer( device,
typeof(int),
tiles_std[x + 1, y + 1].indices.Length,
BufferUsage.WriteOnly);
gpu_buffer_std[x, y].initialized = true;
}
if (!tiles_std[x + 1, y + 1].toGPU)
{
//gpu_buffer_std[x, y].terrainVertexBuffer.SetData(tiles_std[x + 1, y + 1].terrainVertices);
gpu_buffer_std[x, y].terrainIndexBuffer.SetData(tiles_std[x + 1, y + 1].indices);
gpu_buffer_std[x, y].terrainVertexDeclaration = VertexMultitextured.VertexDeclaration;
tiles_std[x + 1, y + 1].toGPU = true;
}
}
}
The 'temp' was to see if it was the pointer causing the problem. This is the whole function (brute force method). Here is the struct being used.
public struct VertexMultitextured : IVertexType
{
public Vector3 position;
public Vector3 normal;
public Vector4 textureCoordinate;
public Vector4 texWeights;
public static int SizeInBytes = sizeof(float) * (3 + 3 + 4 + 4);
public readonly static VertexDeclaration VertexDeclaration = new VertexDeclaration
(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float) * 3, VertexElementFormat.Vector3, VertexElementUsage.Normal, 0),
new VertexElement(sizeof(float) * (3 + 3), VertexElementFormat.Vector4, VertexElementUsage.TextureCoordinate, 0),
new VertexElement(sizeof(float) * (3 + 3 + 4), VertexElementFormat.Vector4, VertexElementUsage.TextureCoordinate, 1)
);
public VertexMultitextured(Vector3 position, Vector3 normal, Vector4 textureCoordinate, Vector4 texWeights)
{
this.position = position;
this.normal = normal;
this.textureCoordinate = textureCoordinate;
this.texWeights = texWeights;
}
public Vector3 Position
{
get { return position; }
set { position = value; }
}
public Vector3 Normal
{
get { return normal; }
set { normal = value; }
}
public Vector4 TextureCoordinate
{
get { return textureCoordinate; }
set { textureCoordinate = value; }
}
public Vector4 TexWeights
{
get { return texWeights; }
set { texWeights = value; }
}
VertexDeclaration IVertexType.VertexDeclaration
{
get { return VertexDeclaration; }
}
}
Update: I have taken the code in question and placed it in Reimers code, and have produced the same results. The debugger shows the following:
System.OutOfMemoryException was unhandled
HResult=-2147024882
Message=Insufficient memory to continue the execution of the program.
Source=Microsoft.Xna.Framework
StackTrace:
at Microsoft.Xna.Framework.Helpers.GetExceptionFromResult(UInt32 result)
at Microsoft.Xna.Framework.Graphics.GraphicsHelpers.GetExceptionFromResult(UInt32 result)
at Microsoft.Xna.Framework.Graphics.VertexBuffer.CreateBuffer(VertexDeclaration vertexDeclaration, UInt32 dwVertexCount, UInt32 usage, _D3DPOOL pool)
at Microsoft.Xna.Framework.Graphics.VertexBuffer..ctor(GraphicsDevice graphicsDevice, Type vertexType, Int32 vertexCount, BufferUsage usage)
at Series3D4.Game1.CopyToTerrainBuffers(VertexMultitextured[] vertices, Int32[] indices) in D:\C#Programs\Series3D4\Series3D4\Series3D4\Game1.cs:line 494
at Series3D4.Game1.LoadVertices() in D:\C#Programs\Series3D4\Series3D4\Series3D4\Game1.cs:line 190
at Series3D4.Game1.LoadContent() in D:\C#Programs\Series3D4\Series3D4\Series3D4\Game1.cs:line 172
at Microsoft.Xna.Framework.Game.Initialize()
at Series3D4.Game1.Initialize() in D:\C#Programs\Series3D4\Series3D4\Series3D4\Game1.cs:line 154
at Microsoft.Xna.Framework.Game.RunGame(Boolean useBlockingRun)
at Microsoft.Xna.Framework.Game.Run()
at Series3D4.Program.Main(String[] args) in D:\C#Programs\Series3D4\Series3D4\Series3D4\Program.cs:line 13
InnerException:
Here is the class being used:
public class DisplayTileChunk
{
// Header info for tile control
public bool beenmoved; // x,z been updated after move
public bool isvisible; // render the tile on screen
public bool loaded; // tile loaded/processed in memory
public bool toGPU; // tile data transfered to GPU
// Terrain info
public int[] texture_index = new int[4];
public VertexMultitextured[] terrainVertices = new VertexMultitextured[4225]; // 65 x 65
public int[] indices = new int[24576]; // 64 x 64 x 6
public VertexBuffer terrainVertexBuffer;
public IndexBuffer terrainIndexBuffer;
public VertexDeclaration terrainVertexDeclaration;
public int noVertices; // = terrainVertexBuffer.VertexCount;
public int noTriangles; // = terrainIndexBuffer.IndexCount / 3;
// Water Info
public VertexBuffer waterVertexBuffer;
public VertexDeclaration waterVertexDeclaration;
// Vegetation Info
public VertexBuffer treeVertexBuffer;
public VertexDeclaration treeVertexDeclaration;
}
And the modified function from Reimers code:
private void CopyToTerrainBuffers(VertexMultitextured[] vertices, int[] indices)
{
terrainVertexBuffer = new VertexBuffer(device, typeof(VertexMultitextured), vertices.Length * VertexMultitextured.SizeInBytes, BufferUsage.WriteOnly);
terrainVertexBuffer.SetData(vertices);
terrainIndexBuffer = new IndexBuffer(device, typeof(int), indices.Length, BufferUsage.WriteOnly);
terrainIndexBuffer.SetData(indices);
for (int x = 0; x < 36; x++)
for (int y = 0; y < 36; y++)
{
gpu_buffer_std[x, y] = new DisplayTileChunk();
VertexBuffer test = new VertexBuffer(device, typeof(VertexMultitextured), 4225 * VertexMultitextured.SizeInBytes, BufferUsage.None);
//gpu_buffer_std[x, y].terrainVertexBuffer = new VertexBuffer(device, typeof(VertexMultitextured), gpu_buffer_std[x, y].terrainVertices.Length * VertexMultitextured.SizeInBytes, BufferUsage.WriteOnly);
//gpu_buffer_std[x, y].terrainIndexBuffer = new IndexBuffer(device, typeof(int), gpu_buffer_std[x, y].indices.Length, BufferUsage.WriteOnly);
}
}
And some interesting numbers from the debugger:
vertices.Length = 16384
terrainVertexBuffer.VertexCount = 917504
terrainVertexBuffer._vertexCount = 917504
terrainVertexBuffer._size = 51380224
test.VertexCount = 236600
test._vertexCount = 236600
test._size = 13249600
Found the problem, original line:
terrainVertexBuffer = new VertexBuffer(device,
typeof(VertexMultitextured),
vertices.Length * VertexMultitextured.SizeInBytes,
BufferUsage.WriteOnly);
Correct line:
terrainVertexBuffer = new VertexBuffer(device,
typeof(VertexMultitextured),
vertices.Length, // error was here
BufferUsage.WriteOnly);
I couldn't see the error for looking at it, took a few days off and 'boom', there it was.
I saved group of points on my panel to List<MyVector> savedPoints, then I calculated the the point with lowest coordinate y :
public void searchLowest()
{
MyVector temp;
double ylon = savedPoints[0].getY();
for (int i = 0; i < savedPoints.Count; i++)
{
if (savedPoints[i].getY() > ylon)
{
ylon = savedPoints[i].getY();
lowest = i;
}
}
temp = savedPoints[lowest];
}
after this I made a method to calculate polar angles :
public static double angle(MyVector vec1, MyVector vec2)
{
double angle = Math.Atan2(vec1.getY() - vec2.getY(), vec1.getX() - vec2.getX());
return angle;
}
now don't know how to use Gift wrapping algorithm in my case. The pseudocode on WikiPedia link is not really understandable for me, so I'm asking for help here.
I'm using C# and win forms (net.framework 4.0)
Thanks for any help.
Using this as a reference, here is teh code:
namespace GiftWrapping
{
using System.Drawing;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
class Program
{
static void Main(string[] args)
{
List<Point> test = new List<Point>(
new Point[]
{
new Point(200,200), new Point(300,100), new Point(200,50), new Point(100,100),
new Point(200, 100), new Point(300, 200), new Point(250, 100),
});
foreach (Point point in ConvexHull(test))
{
Console.WriteLine(point);
}
Console.ReadKey();
}
public static List<Point> ConvexHull(List<Point> points)
{
if (points.Count < 3)
{
throw new ArgumentException("At least 3 points reqired", "points");
}
List<Point> hull = new List<Point>();
// get leftmost point
Point vPointOnHull = points.Where(p => p.X == points.Min(min => min.X)).First();
Point vEndpoint;
do
{
hull.Add(vPointOnHull);
vEndpoint = points[0];
for (int i = 1; i < points.Count; i++)
{
if ((vPointOnHull == vEndpoint)
|| (Orientation(vPointOnHull, vEndpoint, points[i]) == -1))
{
vEndpoint = points[i];
}
}
vPointOnHull = vEndpoint;
}
while (vEndpoint != hull[0]);
return hull;
}
private static int Orientation(Point p1, Point p2, Point p)
{
// Determinant
int Orin = (p2.X - p1.X) * (p.Y - p1.Y) - (p.X - p1.X) * (p2.Y - p1.Y);
if (Orin > 0)
return -1; // (* Orientation is to the left-hand side *)
if (Orin < 0)
return 1; // (* Orientation is to the right-hand side *)
return 0; // (* Orientation is neutral aka collinear *)
}
}
}
adaptation to your private classes, would be your homework.
Here is an implementation using the System.Windows.Point class in WindowsBase:
public struct PolarVector {
public double Radius { get; set; }
public double Angle { get; set; }
public override string ToString() {
return "{" + Radius + "," + Angle + "}";
}
}
private static void Main(string[] args) {
var points = new[] {
new Point {X = 0, Y = 0},
//new Point {X = 2, Y = 0},
new Point {X = 0, Y = 2},
new Point {X = 1.5, Y = 0.5},
new Point {X = 2, Y = 2},
};
foreach(var point in ConvexHull(points)) {
Console.WriteLine(point);
}
Console.WriteLine();
if(Debugger.IsAttached) {
Console.WriteLine("Press any key to exit...");
Console.ReadKey();
}
}
public static IList<Point> ConvexHull(IList<Point> points) {
var pointOnHull = LeftMost(points);
var pointsOnHull = new List<Point>();
Point currentPoint;
do {
pointsOnHull.Add(pointOnHull);
currentPoint = points[0];
foreach(var nextPoint in points.Skip(1)) {
if (currentPoint == pointOnHull || IsLeft(nextPoint, pointOnHull, currentPoint)) {
currentPoint = nextPoint;
}
}
pointOnHull = currentPoint;
}
while (currentPoint != pointsOnHull[0]);
return pointsOnHull;
}
private static Point LeftMost(IEnumerable<Point> points) {
return points.Aggregate((v1, v2) => v2.X < v1.X ? v2 : v1);
}
private static bool IsLeft(Point nextPoint, Point lastPoint, Point currentPoint) {
var nextVector = ToPolar(nextPoint, lastPoint);
var currentVector = ToPolar(currentPoint, lastPoint);
return nextVector.Radius != 0 && Normalize(nextVector.Angle - currentVector.Angle) > 0;
}
private static PolarVector ToPolar(Point target, Point start) {
var vector = target - start;
return new PolarVector { Radius = Math.Sqrt((vector.Y * vector.Y) + (vector.X * vector.X)), Angle = Math.Atan2(vector.Y, vector.X)};
}
private static double Normalize(double radians) {
while(radians > Math.PI) {
radians -= 2*Math.PI;
}
while (radians < -Math.PI) {
radians += 2*Math.PI;
}
return radians;
}
For Gift Wrapping algorithm implementation, it is advisable that one uses Left test technique
// Left test implementation given by Petr
private static int Orientation(Point p1, Point p2, Point p)
{
// Determinant
int Orin = (p2.X - p1.X) * (p.Y - p1.Y) - (p.X - p1.X) * (p2.Y - p1.Y);
if (Orin > 0)
return -1; // (* Orientation is to the left-hand side *)
if (Orin < 0)
return 1; // (* Orientation is to the right-hand side *)
return 0; // (* Orientation is neutral aka collinear *)
}
Using this(Left test) comparison technique helps us in wrapping the gift faster, so to speak.
Never ever use arc tangent calculations, it will impact the run times in a big way.
Reference: Left test technique mentioned here - https://stackoverflow.com/a/1560510/1019673
Everyone,
here is a link to a small python app:
http://en.wikipedia.org/wiki/File:Beta-skeleton.svg
I think I've correctly converted it. (Source at bottom of post)
But, the Math.Acos always returns NaN. Is there a difference between the python version of acos and Math.Acos?
private Random rnd = new Random();
private double scale = 5;
private double radius = 10;
private double beta1 = 1.1;
private double beta2 = 0.9;
private double theta1;
private double theta2;
private Point[] points = new Point[10];
public MainWindow()
{
InitializeComponent();
for (int i = 0; i < 100; i++ )
{
points[i] = new Point((rnd.NextDouble() * scale),
(rnd.NextDouble() * scale));
}
theta1 = Math.Asin(1/beta1);
theta2 = Math.PI - Math.Asin(beta2);
}
private double Dot(Point p, Point q, Point r)
{
var pr = new Point();
var qr = new Point();
//(p[0]-r[0])
pr.X = p.X-r.X;
//(p[1]-r[1])
pr.Y = p.Y-r.Y;
//(q[0]-r[0])
qr.X = q.X-r.X;
//(q[1]-r[1])
qr.Y = q.Y-r.Y;
return (pr.X*qr.X) + (pr.Y*qr.Y);
}
private double Sharp(Point p,Point q)
{
double theta = 0;
foreach(var pnt in points)
{
if(pnt!=p && pnt!=q)
{
var dotpq = Dot(p, q, pnt);
double t = Math.Acos(dotpq);
double u = Math.Pow((dotpq * dotpq), 0.5);
var tempVal = t/u;
theta = Math.Max(theta, tempVal);
}
}
return theta;
}
private void DrawPoint(Point p)
{
var e = new Ellipse
{
Width = radius/2,
Height = radius/2,
Stroke = Brushes.Red,
Visibility = Visibility.Visible
};
Canvas.SetTop(e, p.Y + radius);
Canvas.SetLeft(e, p.X + radius);
MyCanvas.Children.Add(e);
}
private void DrawEdge1(Point p,Point q)
{
var l = new Line
{
X1 = p.X,
Y1 = p.Y,
X2 = q.X,
Y2 = q.Y,
Stroke = Brushes.Black,
Width = 1,
Visibility = Visibility.Visible
};
MyCanvas.Children.Add(l);
}
private void DrawEdge2(Point p,Point q)
{
var l = new Line
{
X1 = p.X,
Y1 = p.Y,
X2 = q.X,
Y2 = q.Y,
Stroke = Brushes.Blue,
Width = 1,
Visibility = Visibility.Visible
};
MyCanvas.Children.Add(l);
}
private void Window_Loaded(object sender, RoutedEventArgs e)
{
foreach (var p in points)
{
foreach (var q in points)
{
var theta = Sharp(p, q);
if(theta < theta1) DrawEdge1(p, q);
else if(theta < theta2) DrawEdge2(p, q);
}
}
}
What you need to do to get the angle from the dot product is to take away the lengths before you acos.
What python has:
prq = acos(dot(p,q,r) / (dot(p,p,r)*dot(q,q,r))**0.5)
What you're doing is not dividing in the Acos, but dividing after.
so:
int r = pnt;
int ppr = Dot(p,p,r);
int qqr = Dot(q,q,r);
int pqr = Dot(p,q,r);
double u = Math.Acos(pqr / Math.Sqrt(ppr * qqr));
Of course change the variables, I was just trying to keep it similar to the python to help you understand :)
I think it's due to your translation of the Python expression (dot(p,q,r) / (dot(p,p,r) * dot(q,q,r)) **0.5). Exponentiation in Python has one of the lowest operators precedency-wise, so the square-root is being taken of the subterm dot(p,q,r) / (dot(p,p,r) * dot(q,q,r)). In your C# version, when calculating the value of the double 'u', you're only taking the square-root of the product of the last two terms, i.e. the (dotpq * dotpq).
The question really is what is the value of dotpq when the function gets called. It has to be a double value between -1 and 1 as stated in the docs.