I am building a test 3D renderer in WinForms using the objects in System.Numerics such as Vector3 and Matrix4x4.
The object drawn is a point cloud, centered around (0,0,0), and rotated about the origin. Each node renders as dots on the screen. Here is what the 3D shape should look like
Fake Perspective
and more specifically when viewed from the front the perspective should be obvious with the blue dots that are further away from the eye to be at a smaller distance from the center
Fake Perspective
The pipeline is roughly as follows:
Rotation transformation
Matrix4x4 RY = Matrix4x4.CreateRotationY(ry);
Perspective transformation (fov=90, aspect=1.0f, near=1f, far=100f)
Matrix4x4 P = Matrix4x4.CreatePerspectiveFieldOfView(fov.Radians(), 1.0f, 1f, 100f);
Camera transformation
Matrix4x4 C = RY * P;
var node = Vector3.Transform(face.Nodes[i], C);
Project to 2D
Vector2 point = new Vector2(node.X, node.Y);
View transformation
Matrix3x2 S = Matrix3x2.CreateScale(height / scale, -height / scale);
Matrix3x2 T = Matrix3x2.CreateTranslation(width / 2f, height / 2f);
Matrix3x2 V = S*T
point = Vector2.Transform(point, V);
Pixel Coordinates & Render
PointF pixel = new PointF(point.X, point.Y);
e.Graphics.FillEllipse(brush,pixel.X - 2, pixel.Y - 2, 4, 4);
So what I am seeing is an orthographic projection.
Program Output
The blue nodes further away are not smaller as expected. Somehow the perspective transformation is being ignored.
So my question is my usage of Matrix4x4.CreatePerspectiveFieldOfView() correct in step #2? And is the projection from 3D to 2D in step #4 correct?
Steps #1, #5 and #6 seem to be working exactly as intended, my issue is with steps #2-#4 somewhere.
Example code to reproduce the issue
Form1.cs
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
public Shape Object { get; set; }
protected override void OnLoad(EventArgs e)
{
base.OnLoad(e);
this.Object = Shape.DemoShape1();
}
protected override void OnPaint(PaintEventArgs e)
{
base.OnPaint(e);
float width = ClientSize.Width, height = ClientSize.Height;
float scale = 40f, fov = 90f;
Matrix4x4 RY = Matrix4x4.CreateRotationY(ry);
Matrix4x4 RX = Matrix4x4.CreateRotationX(rx);
Matrix4x4 P = Matrix4x4.CreatePerspectiveFieldOfView(fov.Radians(), 1.0f, 1f, 100f);
Matrix4x4 C = RY * RX * P;
Matrix3x2 S = Matrix3x2.CreateScale(
height / scale, -height / scale);
Matrix3x2 T = Matrix3x2.CreateTranslation(
width / 2f, height / 2f);
Matrix3x2 V = S * T;
using (var pen = new Pen(Color.Black, 0))
{
var arrow = new AdjustableArrowCap(4f, 9.0f);
pen.CustomEndCap = arrow;
using (var brush = new SolidBrush(Color.Black))
{
// Draw coordinate triad (omited)
// Each face has multiple nodes with the same color
foreach (var face in Object.Faces)
{
brush.Color = face.Color;
PointF[] points = new PointF[face.Nodes.Count];
for (int i = 0; i < points.Length; i++)
{
// transform nodes into draw points
var item = Vector4.Transform(face.Nodes[i], C);
var point = Vector2.Transform(item.Project(), V);
points[i] = point.ToPoint();
}
// Draw points as dots
e.Graphics.SmoothingMode = SmoothingMode.HighQuality;
for (int i = 0; i < points.Length; i++)
{
e.Graphics.FillEllipse(brush,
points[i].X - 2, points[i].Y - 2,
4, 4);
}
}
}
}
}
}
GraphicsExtensions.cs
public static class GraphicsExtensions
{
public static PointF ToPoint(this Vector2 vector)
=> new PointF(vector.X, vector.Y);
public static Vector2 Project(this Vector3 vector)
=> new Vector2(vector.X, vector.Y);
public static Vector2 Project(this Vector4 vector)
=> new Vector2(vector.X, vector.Y);
public static float Radians(this float degrees) => (float)(Math.PI/180) * degrees;
public static float Degrees(this float radians) => (float)(180/Math.PI) * radians;
}
Related
With the code below I created a function to draw a 3D chain model in C# using the helix toolkit. This works exactly how I wanted to but... now I'm breaking my head around a good approach to draw the chainlinks in specific direction, from a startpoint to a endpoint, but I didn't come much further the last week. I know I need to work with vector multiplication or scalars but I need some guidance to right topic to solve my problem.
using HelixToolkit.SharpDX.Core;
using HelixToolkit.Wpf.SharpDX;
using SharpDX;
namespace RapiD.Geometry.Models
{
public partial class ChainLink3D : GeometryBase3D
{
[ObservableProperty]
float radius;
[ObservableProperty]
float width;
[ObservableProperty]
float diameter;
[ObservableProperty]
float length;
[ObservableProperty]
int copies;
[ObservableProperty]
ObservableCollection<Element3D> elements;
public ChainLink3D(float diameter, float width, float length, int copies)
{
this.width = width;
this.length = length;
this.diameter = diameter;
this.copies = copies;
this.elements= new ObservableCollection<Element3D>();
OriginalMaterial = PhongMaterials.Chrome;
DrawChainLink();
}
public void DrawChainLink()
{
MeshBuilder meshBuilder = new MeshBuilder();
float radius = (width - diameter) / 2;
float trans = 0f;
float translate = length + (radius * 2) - diameter;
float yoffset = 0;
int segments = 10;
float interval = 180 / segments;
int numOfCopies = copies;
float startPoint = radius - (diameter / 2);
float endPoint = -length -radius + (diameter / 2);
Vector3 startVector = new Vector3(-300, 200f, 0);
Vector3 endVector = new Vector3(300, 500, 0);
Vector3 direction = Vector3.Normalize (endVector - startVector);
//The for loop is drawing the chainlink
for (int j = 0; j < numOfCopies; j++)
{
List<Vector3> single_chain_link = new List<Vector3>();
for (float i = 0; i <= 360; i += interval)
{
if (i > 180)
yoffset = -length;
else
yoffset = 0;
float a = i * MathF.PI / 180;
float x = radius * MathF.Cos(a);
float y = radius * MathF.Sin(a) + yoffset + trans;
Vector3 vec = new Vector3(x, y, 0);
//Rotates every second chainlink
if (j % 2 == 1)
vec =new Vector3(0, y, x);
vec += startVector;
//vec *= direction;
single_chain_link.Add(vec);
}
// this three are a reference for a new example direction in which I want to draw the chain link to
meshBuilder.AddSphere(Vector3.Zero, 5, 10, 10);
meshBuilder.AddSphere(startVector, 5, 10, 10);
meshBuilder.AddSphere(endVector, 5, 10, 10);
meshBuilder.AddTube(single_chain_link, diameter, 10, true);
meshBuilder.AddArrow(new Vector3(0, startPoint + trans, 0), new Vector3(0, endPoint + trans, 0), 2, 10);
elements.Add(new Element3D(new Vector3(0, startPoint + trans, 0), new Vector3(0, endPoint + trans, 0)));
//single_chain_link.OrderByDescending(x => x.X);
MeshGeometry = meshBuilder.ToMeshGeometry3D();
trans -= translate;
}
}
}
}
I did successfully draw the chain form a specific startpoint, but I want to draw the elements from the given startpoint to a endposition.
You should be using a transformation to rotate and/or move your model to the correct orientation.
To create a rotation matrix from a direction it is useful to know some linear algebra. Notably that the cross product between two vectors result in a vector orthogonal to both. And that a rotation matrix is not really anything more than three ortogonal axes. So you can do something like the following pseudo code
var x = myDirection;
if(x.AlmostEqual(Vector3.UnitY)){
var y = x.CrossProduct(Vector3.UnitZ);
}
else{
var y = x.CrossProduct(Vector3.UnitY);
}
var z = y.CrossProduct(x);
// Create a matrix from the x, y, z axes
If you are using System.Numerics there is the Matrix4x4.CreateLookAt that does more or less this.
Once you have a matrix you can just transform your model to rotate it in whatever direction you want. Note that it is common, at least for me, to mix up directions and end up with something that is of by 90 degrees, or some other error. It does not help that different libraries can use some different conventions. My best solution is to do things in small steps and verify that the result is as you expect it to be.
I have been working on a 2D physics engine using polygons.
And i am having trouble implementing the actual physics part. For a bit of background, i am not experienced at all when it comes to physics and therefor even if a found how to do the entire physics thing online, i would not be able to implement it into my project.
My goal is:
To have polygons fall with gravity.
Have weight drag etc.
Collision between multiple polygons.
What i have already made:
A way of displaying and creating multiple polygons.
Moving and rotating specified object(polygon).
Coeffients for drag, gravity and weight.
Hit boxes and visual boxes. (Visual boxes are what gets displayed and hit boxes are for physics)
A center point for every object. (So far is used for rotation)
A tick for when everything gets calculated. (Gametick/tickrate or whatever you wanna call it)
What i was not able to add / looking for:
Actual gravity.
Collision detection
Velocity for each object.
Collision between object.
Code snippets / how stuff works so far:
Beware that my code is janky and could be made better or more efficient.
Efficiency is not what im looking for!
Function for creating object:
public Object CreateNew(PointF[] hb, PointF[] vb, float rt, Color cl, bool gr, PointF ps)
{
Object obj = new Object
{
pos = ps,
rotation = rt,
offsets = vb,
hitBox = hb,
visBox = vb,
gravity = gr,
clr = cl,
};
#region center
List<Vector2> v2Points = new List<Vector2>();
foreach (PointF p in obj.offsets)
{
v2Points.Add(new Vector2(p.X, p.Y));
}
PointF point = ToPoint(Centroid(v2Points));
obj.center = new PointF(point.X, point.Y);
#endregion
return obj;
}
Function for changing position of object:
public Object ChangePosition(PointF pos, double rot, Object obj)
{
//////////////
int i = 0;
foreach (PointF p in obj.visBox)
{
float minPosX = (float)Math.Sqrt((Math.Pow(obj.center.X - pos.X, 2) + Math.Pow(0 - 0, 2)));
float minPosY = (float)Math.Sqrt((Math.Pow(obj.center.Y - pos.Y, 2) + Math.Pow(0 - 0, 2)));
obj.visBox[i] = new PointF(obj.offsets[i].X + pos.X, obj.offsets[i].Y + pos.Y);
i++;
}
i = 0;
foreach (PointF p in obj.hitBox)
{
float minPosX = (float)Math.Sqrt((Math.Pow(obj.center.X - pos.X, 2) + Math.Pow(0 - 0, 2)));
float minPosY = (float)Math.Sqrt((Math.Pow(obj.center.Y - pos.Y, 2) + Math.Pow(0 - 0, 2)));
obj.hitBox[i] = new PointF(obj.offsets[i].X + pos.X, obj.offsets[i].Y + pos.Y);
i++;
}
obj.pos = pos;
List<Vector2> v2Points = new List<Vector2>();
foreach (PointF p in obj.offsets)
{
v2Points.Add(new Vector2(p.X, p.Y));
}
PointF point = ToPoint(Centroid(v2Points));
obj.center = point;
List<Vector2> v2Points2 = new List<Vector2>();
foreach (PointF p in obj.hitBox)
{
v2Points2.Add(new Vector2(p.X, p.Y));
}
PointF point2 = ToPoint(Centroid(v2Points2));
obj.centerHitBox = point2;
obj.hitBox = RotatePolygon(obj.hitBox, obj.center, rotation * -1);
obj.visBox = RotatePolygon(obj.visBox, obj.center, rotation * -1);
obj.offsets = RotatePolygon(obj.offsets, obj.center, rotation * -1);
obj.hitBox = RotatePolygon(obj.hitBox, obj.center, rot);
obj.visBox = RotatePolygon(obj.visBox, obj.center, rot);
obj.offsets = RotatePolygon(obj.offsets, obj.center, rot);
rotation = rot;
return obj;
}
Pastebin link to object script:
https://pastebin.com/9SnG4vyj
I will provide more information or scripts if anybody needs it!
I'm new to 3D programming and am having a terrible time getting my texture to fill my meshes properly. I've got it sizing correctly on the walls but the texture on the roof is running on an angle and is stretched out too far.
I have several methods to create the mesh but they are all eventually sent to AddTriangle method, where the TextureCoordinates are set.
public static void AddTriangle(this MeshGeometry3D mesh, Point3D[] pts)
{
// Create the points.
int index = mesh.Positions.Count;
foreach (Point3D pt in pts)
{
mesh.Positions.Add(pt);
mesh.TriangleIndices.Add(index++);
mesh.TextureCoordinates.Add(new Point(pt.X + pt.Z, 0 - pt.Y));
}
}
Here is how my material is set up.
imageBrush.ImageSource = new BitmapImage(new Uri("pack://application:,,,/Textures/shingles1.jpg"));
imageBrush.TileMode = TileMode.Tile;
imageBrush.ViewportUnits = BrushMappingMode.Absolute;
imageBrush.Viewport = new Rect(0, 0, 25, 25);
SidingColor = new DiffuseMaterial(imageBrush);
SidingColor.Color = RGB(89, 94, 100);
My texture looks like this:
And here is the results I'm getting.
That's as close as I could get after hours of fooling around and googling.
Whew that was a little more difficult than I anticipated.
Here are few resources that helped me find a solution.
How to convert a 3D point on a plane to UV coordinates?
From the link below I realized the above formula above formula was correct but for a right hand coordinate system. I converted it and that was the final step.
http://www.math.tau.ac.il/~dcor/Graphics/cg-slides/geom3d.pdf
Here is the code that works in case someone else has this question.
public static void AddTriangle(this MeshGeometry3D mesh, Point3D[] pts)
{
if (pts.Count() != 3) return;
//use the three point of the triangle to calculate the normal (angle of the surface)
Vector3D normal = CalculateNormal(pts[0], pts[1], pts[2]);
normal.Normalize();
//calculate the uv products
Vector3D u;
if (normal.X == 0 && normal.Z == 0) u = new Vector3D(normal.Y, -normal.X, 0);
else u = new Vector3D(normal.X, -normal.Z, 0);
u.Normalize();
Vector3D n = new Vector3D(normal.Z, normal.X, normal.Y);
Vector3D v = Vector3D.CrossProduct(n, u);
int index = mesh.Positions.Count;
foreach (Point3D pt in pts)
{
//add the points to create the triangle
mesh.Positions.Add(pt);
mesh.TriangleIndices.Add(index++);
//apply the uv texture positions
double u_coor = Vector3D.DotProduct(u, new Vector3D(pt.Z,pt.X,pt.Y));
double v_coor = Vector3D.DotProduct(v, new Vector3D(pt.Z, pt.X, pt.Y));
mesh.TextureCoordinates.Add(new Point(u_coor, v_coor));
}
}
private static Vector3D CalculateNormal(Point3D firstPoint, Point3D secondPoint, Point3D thirdPoint)
{
var u = new Point3D(firstPoint.X - secondPoint.X,
firstPoint.Y - secondPoint.Y,
firstPoint.Z - secondPoint.Z);
var v = new Point3D(secondPoint.X - thirdPoint.X,
secondPoint.Y - thirdPoint.Y,
secondPoint.Z - thirdPoint.Z);
return new Vector3D(u.Y * v.Z - u.Z * v.Y, u.Z * v.X - u.X * v.Z, u.X * v.Y - u.Y * v.X);
}
I have drawn regular polygons and divided those into equal parts.
It's like this :
but I want to fill it with 2 colors like this :
How do I implement this?
Code how to draw polygons is below:
Graphics g = e.Graphics;
nPoints = CalculateVertices(sides, radius, angle, center);
g.DrawPolygon(navypen, nPoints);
g.FillPolygon(BlueBrush, nPoints);
Point center = new Point(ClientSize.Width / 2, ClientSize.Height / 2);
for(int i = 0; i < sides; i++) {
g.DrawLine(new Pen(Color.Navy), center.X, center.Y, nPoints[i].X, nPoints[i].Y);
}
private PointF[] CalculateVertices(int sides, int radius, float startingAngle, Point center)
{
if (sides < 3) {
sides = 3;
}
//throw new ArgumentException("Polygon must have 3 sides or more.");
List<PointF> points = new List<PointF>();
float step = 360.0f / sides;
float angle = startingAngle; //starting angle
for (double i = startingAngle; i < startingAngle + 360.0; i += step) //go in a circle
{
points.Add(DegreesToXY(angle, radius, center));
angle += step;
}
return points.ToArray();
}
private PointF DegreesToXY(float degrees, float radius, Point origin)
{
PointF xy = new PointF();
double radians = degrees * Math.PI / 180.0;
xy.X = (int)(Math.Cos(radians) * radius + origin.X);
xy.Y = (int)(Math.Sin(-radians) * radius + origin.Y);
return xy;
}
There are several ways but the most straight-forward is to draw the polygons (triangles) of different colors separately.
Assumig a List<T> for colors:
List<Color> colors = new List<Color> { Color.Yellow, Color.Red };
You can add this before the DrawLine call:
using (SolidBrush brush = new SolidBrush(colors[i%2]))
g.FillPolygon(brush, new[] { center, nPoints[i], nPoints[(i+1)% sides]});
Note how I wrap around both the nPoints and the colors using the % operator!
I'd like to update a list of points (PointFs) by performing a rotation (around a new origin) and translating each point by an amount that is proportional to its current distance from the origin (so not an absolute translation).
I currently do this for each point in turn but performance is poor when moving more than a handful of points.
I'd like to make the transformation more efficient so wanted to use a matrix. The rotation is no problem, but I don't know how to do the proportional translation.
Can I do this with an affine matrix? Is there some other way to do the transformation more efficiently?
UPDATED
Here's my current code. I've changed it a little so at least it does use a matrix for the rotation. Note the translation is based on a ratio, so points closer to the centre won't move as far as points further away:
private void DragPointsAroundCentre(PointF centre, PointF priorLocation, PointF newLocation, PointF[] otherPoints)
{
// calculate the angle and length of the transformation from the original location
var priorLength = Maths.Distance(centre, priorLocation);
var newLength = Maths.Distance(centre, newLocation);
var lengthRatio = newLength / priorLength;
var rotationAngle = (float)Maths.Angle(centre, priorLocation, newLocation);
// apply the rotation to the other points
Rotate(otherPoints, rotationAngle, centre);
// apply an equivalent translation to the other points
for (int i = 0; i < otherPoints.Length ; i++)
{
var translation = GetPointOnLine(centre, otherPoints[i], (float) lengthRatio);
otherPoints[i].X = translation.X;
otherPoints[i].Y = translation.Y;
}
}
private static void Rotate(PointF[] points, float angle, PointF center)
{
using (Matrix m = new Matrix())
{
m.RotateAt(angle, center);
m.TransformPoints(points);
}
}
// gets a point from a relative position on a line using the specified ratio
private static PointF GetPointOnLine(PointF origin, PointF point, float ratio)
{
return new PointF(
origin.X + (point.X - origin.X) * ratio,
origin.Y + (point.Y - origin.Y) * ratio);
}
This is the code I use for transformations. I hope this helps you:
class Program
{
static void Main(string[] args)
{
PointF[] points = new PointF[]
{
new PointF(1, 0),
new PointF(0, 1)
};
float angle = 90; // in degrees
PointF center = new PointF(1, 1);
Rotate(points, angle, center);
float offset = 10;
PointF vector = new PointF(1, 1);
Translate(points, offset, vector);
}
static void Rotate(PointF[] points, float angle, PointF center)
{
using (Matrix m = new Matrix())
{
m.RotateAt(angle, center);
m.TransformPoints(points);
}
}
// Translates point along the specified vector.
static void Translate(PointF[] points, float offset, PointF vector)
{
float magnitude = (float)Math.Sqrt((vector.X * vector.X) + (vector.Y * vector.Y)); // = length
vector.X /= magnitude;
vector.Y /= magnitude;
PointF translation = new PointF()
{
X = offset * vector.X,
Y = offset * vector.Y
};
using (Matrix m = new Matrix())
{
m.Translate(translation.X, translation.Y);
m.TransformPoints(points);
}
}
}
If you need the transformation to be very efficient you can combine both transformation matrices into one and transform all points only once.
EDIT:
You can use for example a simple parallel loop to make it a little bit faster. But even for 30.000.000 points the difference is not too big in this case (my case 4 cpu cores). But it depends of course how often do you process them.
class Program
{
static void Main(string[] args)
{
int pointCount = 30000000;
PointF[] otherPoints = new PointF[pointCount];
Random rnd = new Random();
for (int i = 0; i < pointCount; i++)
{
otherPoints[i] = new Point(rnd.Next(), rnd.Next());
}
PointF centre = new PointF(3, 3);
float lengthRatio = 7.3f;
// apply an equivalent translation to the other points
Stopwatch sw = new Stopwatch();
sw.Start();
for (int i = 0; i < otherPoints.Length; i++)
{
var translation = GetPointOnLine(centre, otherPoints[i], (float)lengthRatio);
otherPoints[i].X = translation.X;
otherPoints[i].Y = translation.Y;
}
sw.Stop();
Console.WriteLine("Single thread: {0} sec.", sw.Elapsed.TotalSeconds);
sw.Reset();
sw.Start();
Parallel.For(0, pointCount, i =>
{
var translation = GetPointOnLine(centre, otherPoints[i], (float)lengthRatio);
otherPoints[i].X = translation.X;
otherPoints[i].Y = translation.Y;
});
sw.Stop();
Console.WriteLine("Multi thread: {0} sec.", sw.Elapsed.TotalSeconds);
Console.ReadKey();
}
// gets a point from a relative position on a line using the specified ratio
private static PointF GetPointOnLine(PointF origin, PointF point, float ratio)
{
return new PointF(
origin.X + (point.X - origin.X) * ratio,
origin.Y + (point.Y - origin.Y) * ratio);
}
}
EDIT-2:
I found a transformation that is exacly the same as yours and transforms the points in only one loop using a single matrix. Here's the code for both the old and the new transformation:
class Program
{
static void Main(string[] args)
{
PointF[] points1 = new PointF[]
{
new PointF(1f, 0f),
new PointF(0f, 1f),
new PointF(1f, 1f),
new PointF(2f, 2f),
};
PointF[] points2 = new PointF[]
{
new PointF(1f, 0f),
new PointF(0f, 1f),
new PointF(1f, 1f),
new PointF(2f, 2f),
};
PointF center = new PointF(2f, 2f);
float priorLength = 4f;
float newLength = 5f;
float lengthRatio = newLength / priorLength;
float rotationAngle = 45f;
Transformation_old(points1, rotationAngle, center, lengthRatio);
Transformation_new(points2, rotationAngle, center, lengthRatio);
Console.ReadKey();
}
static void Transformation_old(PointF[] points, float rotationAngle, PointF center, float lengthRatio)
{
Rotate(points, rotationAngle, center);
for (int i = 0; i < points.Length; i++)
{
var translation = GetPointOnLine(center, points[i], lengthRatio);
points[i].X = translation.X;
points[i].Y = translation.Y;
}
}
static void Rotate(PointF[] points, float angle, PointF center)
{
using (Matrix m = new Matrix())
{
m.RotateAt(angle, center);
m.TransformPoints(points);
}
}
private static PointF GetPointOnLine(PointF origin, PointF point, float ratio)
{
return new PointF(
origin.X + (point.X - origin.X) * ratio,
origin.Y + (point.Y - origin.Y) * ratio);
}
// Uses only a single matrix and a single transformation:
static void Transformation_new(PointF[] points, float rotationAngle, PointF center, float lengthRatio)
{
using (Matrix m = new Matrix())
{
m.RotateAt(rotationAngle, center, MatrixOrder.Prepend);
// Replaces GetPointOnLine
m.Translate(center.X, center.Y, MatrixOrder.Prepend);
m.Scale(lengthRatio, lengthRatio, MatrixOrder.Prepend);
m.Translate(-center.X, -center.Y, MatrixOrder.Prepend);
m.TransformPoints(points);
}
}
}