Hey together,
first time posting here, because I'm damn stuck...
The further away a mesh is from the origin at (0, 0, 0), the more it "jumps"/"flickers" when rotating or moving the camera. It's somehow hard to describe this effect: it is like the mesh is jittering/shivering/trembling a little bit and this trembling gets bigger and bigger as you gain distance to the origin.
For me, it begins to be observable at around 100000 units distance to the origin, so at (0, 0, 100000) for example. Neither the axis of the translation nor the type of the mesh (default mesh created from Mesh.Create... or with assimp.NET imported 3ds mesh) have influence on this effect. The value of the position of the mesh doesn't change when this effect occurs, checked this by logging the position.
If I'm not missing something, this narrows it down to two possibilities:
My camera code
The DirectX-Device
As for the DirectX-Device, this is my device initialization code:
private void InitializeDevice()
{
//Initialize D3D
_d3dObj = new D3D9.Direct3D();
//Set presentation parameters
_presParams = new D3D9.PresentParameters();
_presParams.Windowed = true;
_presParams.SwapEffect = D3D9.SwapEffect.Discard;
_presParams.AutoDepthStencilFormat = D3D9.Format.D16;
_presParams.EnableAutoDepthStencil = true;
_presParams.PresentationInterval = D3D9.PresentInterval.One;
_presParams.BackBufferFormat = _d3dObj.Adapters.DefaultAdapter.CurrentDisplayMode.Format;
_presParams.BackBufferHeight = _d3dObj.Adapters.DefaultAdapter.CurrentDisplayMode.Height;
_presParams.BackBufferWidth = _d3dObj.Adapters.DefaultAdapter.CurrentDisplayMode.Width;
//Set form width and height to current backbuffer width und height
this.Width = _presParams.BackBufferWidth;
this.Height = _presParams.BackBufferHeight;
//Checking device capabilities
D3D9.Capabilities caps = _d3dObj.GetDeviceCaps(0, D3D9.DeviceType.Hardware);
D3D9.CreateFlags devFlags = D3D9.CreateFlags.SoftwareVertexProcessing;
D3D9.DeviceType devType = D3D9.DeviceType.Reference;
//setting device flags according to device capabilities
if ((caps.VertexShaderVersion >= new Version(2, 0)) && (caps.PixelShaderVersion >= new Version(2, 0)))
{
//if device supports vertexshader and pixelshader >= 2.0
//then use the hardware device
devType = D3D9.DeviceType.Hardware;
if (caps.DeviceCaps.HasFlag(D3D9.DeviceCaps.HWTransformAndLight))
{
devFlags = D3D9.CreateFlags.HardwareVertexProcessing;
}
if (caps.DeviceCaps.HasFlag(D3D9.DeviceCaps.PureDevice))
{
devFlags |= D3D9.CreateFlags.PureDevice;
}
}
//initialize the device
_device = new D3D9.Device(_d3dObj, 0, devType, this.Handle, devFlags, _presParams);
//set culling
_device.SetRenderState(D3D9.RenderState.CullMode, D3D9.Cull.Counterclockwise);
//set texturewrapping (needed for seamless spheremapping)
_device.SetRenderState(D3D9.RenderState.Wrap0, D3D9.TextureWrapping.All);
//set lighting
_device.SetRenderState(D3D9.RenderState.Lighting, false);
//enabling the z-buffer
_device.SetRenderState(D3D9.RenderState.ZEnable, D3D9.ZBufferType.UseZBuffer);
//and setting write-access exlicitly to true...
//i'm a little paranoid about this since i had to struggle for a few days with weirdly overlapping meshes
_device.SetRenderState(D3D9.RenderState.ZWriteEnable, true);
}
Am I missing a flag or renderstate? Is there something that could cause such a weird/distorted behaviour?
My camera class is based on Michael Silvermans C++ Quaternion Camera:
//every variable prefixed with an underscore is
//a private static variable initialized beforehand
public static class Camera
{
//gets called every frame
public static void Update()
{
if (_filter)
{
_filteredPos = Vector3.Lerp(_filteredPos, _pos, _filterAlpha);
_filteredRot = Quaternion.Slerp(_filteredRot, _rot, _filterAlpha);
}
_device.SetTransform(D3D9.TransformState.Projection, Matrix.PerspectiveFovLH(_fov, _screenAspect, _nearClippingPlane, _farClippingPlane));
_device.SetTransform(D3D9.TransformState.View, GetViewMatrix());
}
public static void Move(Vector3 delta)
{
_pos += delta;
}
public static void RotationYaw(float theta)
{
_rot = Quaternion.Multiply(Quaternion.RotationAxis(_up, -theta), _rot);
}
public static void RotationPitch(float theta)
{
_rot = Quaternion.Multiply(_rot, Quaternion.RotationAxis(_right, theta));
}
public static void SetTarget(Vector3 target, Vector3 up)
{
SetPositionAndTarget(_pos, target, up);
}
public static void SetPositionAndTarget(Vector3 position, Vector3 target, Vector3 upVec)
{
_pos = position;
Vector3 up, right, lookAt = target - _pos;
lookAt = Vector3.Normalize(lookAt);
right = Vector3.Cross(upVec, lookAt);
right = Vector3.Normalize(right);
up = Vector3.Cross(lookAt, right);
up = Vector3.Normalize(up);
SetAxis(lookAt, up, right);
}
public static void SetAxis(Vector3 lookAt, Vector3 up, Vector3 right)
{
Matrix rot = Matrix.Identity;
rot.M11 = right.X;
rot.M12 = up.X;
rot.M13 = lookAt.X;
rot.M21 = right.Y;
rot.M22 = up.Y;
rot.M23 = lookAt.Y;
rot.M31 = right.Z;
rot.M32 = up.Z;
rot.M33 = lookAt.Z;
_rot = Quaternion.RotationMatrix(rot);
}
public static void ViewScene(BoundingSphere sphere)
{
SetPositionAndTarget(sphere.Center - new Vector3((sphere.Radius + 150) / (float)Math.Sin(_fov / 2), 0, 0), sphere.Center, new Vector3(0, 1, 0));
}
public static Vector3 GetLookAt()
{
Matrix rot = Matrix.RotationQuaternion(_rot);
return new Vector3(rot.M13, rot.M23, rot.M33);
}
public static Vector3 GetRight()
{
Matrix rot = Matrix.RotationQuaternion(_rot);
return new Vector3(rot.M11, rot.M21, rot.M31);
}
public static Vector3 GetUp()
{
Matrix rot = Matrix.RotationQuaternion(_rot);
return new Vector3(rot.M12, rot.M22, rot.M32);
}
public static Matrix GetViewMatrix()
{
Matrix viewMatrix, translation = Matrix.Identity;
Vector3 position;
Quaternion rotation;
if (_filter)
{
position = _filteredPos;
rotation = _filteredRot;
}
else
{
position = _pos;
rotation = _rot;
}
translation = Matrix.Translation(-position.X, -position.Y, -position.Z);
viewMatrix = Matrix.Multiply(translation, Matrix.RotationQuaternion(rotation));
return viewMatrix;
}
}
Do you spot anything in the camera code which could cause this behaviour?
I just can't imagine that DirectX can't handle distances greater than 100k. I am supposed to render solar systems and I'm using 1 unit = 1km. So the earth would be rendered at its maximum distance to the sun at (0, 0, 152100000) (just as an example). This is becoming impossible if these "jumps" keep occuring.
Finally i thought about scaling everything down, so that a system never goes beyond 100k/-100k distance from the origin, but I think this won't work because the "jittering" gets bigger as the distance from the origin gets bigger. Scaling everything down would - i think - scale down the jumping-behaviour, too.
Just to not leave this question unanswered (credits to #jcoder, see comments of question):
The weird behaviour of the meshes comes from the floating point precision of DX. The bigger your world gets, the less precision is there to calculate positions accurately.
There are two possibilities to solve this problem:
Downscaling the whole world
this may be problematic in a "galactic-style" world, where you have really big position offsets as well as really small ones (i.e. the distance of a planet to its sun is really big, but the distance of a spaceship in orbit of a planet may be really small)
Dividing the world into smaller chunks
this way you have either to express all positions relative to something else (see stackoverflow.com/questions/1930421) or make multiple worlds and somehow move between them
Related
I'm trying to project a texture on a simple cube meshFilter using only C# but I'm having a bit of a hard time understanding what to do. I almost got it working for the X axis rotation and there is a lot of bad warping for Y/Z. Basically, I update the UVs when the position/rotation of the camera changes, here is my code :
[ExecuteInEditMode]
public class ObjectEditor : MonoBehaviour {
public GameObject Model;
public void UpdateTexture(Camera camera) {
MeshFilter[] mesheFilters = Model.GetComponentsInChildren<MeshFilter>();
foreach (MeshFilter meshFilter in mesheFilters) {
int size = meshFilter.sharedMesh.vertices.Length;
Vector2[] uvs = new Vector2[size];
for (int i = 0; i < size; i++) {
uvs[i] = vertexToUVPosition(camera, meshFilter, i);
}
meshFilter.sharedMesh.SetUVs(0, uvs);
}
}
private Vector2 vertexToUVPosition(Camera camera, MeshFilter meshFilter, int index) {
Vector3 vertex = meshFilter.sharedMesh.vertices[index];
Matrix4x4 VP = camera.projectionMatrix * camera.worldToCameraMatrix;
Vector4 worldPos = new Vector4(vertex.x, vertex.y, vertex.z, 1f);
Vector4 clipPos = VP * worldPos;
clipPos *= 1f / clipPos.w;
return camera.WorldToScreenPoint(clipPos);
}
}
Everything regarding the projection happens in vertexToUVPosition.
And here is what I have right now (the projected texture is a simple black/white checkerboard) :
Can someone experienced in projections explain to me what I'm doing wrong and maybe provide a sample C# code that works correctly? Thank you.
I found a solution which solves the problem completely but I guess it is not mathematically correct since I don't really know much about matrixes and projections. It is however a good starting point for whoever wants to do something similar without any experience.
Before showing the code, some things you should setup in order to make it easier to debug potential problems :
Make sure your texture is in clamp mode, it will be easier to see if the projection works correctly.
Position your object at (0,0,0).
Position your camera at (0,0,0) and make sure it is in perspective mode.
Use another camera in orthographic mode to look at the projected texture and validate it is shown correctly.
The algorithm :
// Calculate the VP matrix based on the Perspective Camera
VP = camera.projectionMatrix * camera.worldToCameraMatrix
foreach vertex in mesh
// Replace the "w" component by 1.
worldPosition = new Vector4(vertex.x, vertex.y, vertex.z, 1f);
clipPosition = VP * worldPosition;
// Small correction on Y axis (maybe someone can explain why I need this?).
clipPosition.Scale(new Vector3(1, 0.5f, 1));
// Use the clipPosition as UV coordinates for that vertex.
...
My implementation :
[ExecuteInEditMode]
public class ObjectEditor : MonoBehaviour {
public GameObject Model;
public void UpdateTexture(Camera camera) {
Matrix4x4 vp = camera.projectionMatrix * camera.worldToCameraMatrix;
MeshFilter[] mesheFilters = Model.GetComponentsInChildren<MeshFilter>();
foreach (MeshFilter meshFilter in mesheFilters) {
int size = meshFilter.sharedMesh.vertices.Length;
Vector2[] uvs = new Vector2[size];
for (int i = 0; i < size; i++) {
uvs[i] = vertexToUVPosition(vp, meshFilter, i);
}
meshFilter.sharedMesh.SetUVs(0, uvs);
}
}
private Vector2 vertexToUVPosition(Matrix4x4 vp, MeshFilter meshFilter, int index) {
Vector3 vertex = meshFilter.sharedMesh.vertices[index];
Vector4 worldPos = new Vector4(vertex.x, vertex.y, vertex.z, 1f);
Vector4 clipPos = vp * worldPos;
clipPos.Scale(new Vector3(1, 0.5f, 1));
return clipPos;
}
}
The result - Test with rotation on two axis (40,-45,0):
The orthographic camera view :
I've looked through what seems like every question and resource there is for Triangle.NET trying to find an answer to how to insert a vertex into an existing triangulation. The closest I've gotten was in the discussion archives for Traingle.Net where someone asked a similar question (discussion id 632458) but unfortunately, the answer was not what I was looking for.
My goal here is to make a destructible wall in Unity where, when the player shoots the wall, it will create a hole in the wall (like in Rainbow Six Siege).
Here's what I did for my original implementation:
Create initial triangulation using the four corners of the wall.
When the player shoots, perform a raycast, if the raycast intersects with the wall then add the point of intersection to the polygon variable and re-triangulate the entire mesh using that variable.
Draw new triangulation on the wall as a texture to visualise what's happening.
Repeat.
As you can see, step 2 is the problem.
Because I re-triangulate the entire mesh every time the player hits the wall, the more times the player hits the wall the slower the triangulation gets as the number of vertices rises. This could be fine I guess, but I want destructible walls to play a major role in my game so this is not acceptable.
So, digging through the Triangle.Net source code, I find an internal method called InsertVertex. The summary for this method states:
Insert a vertex into a Delaunay triangulation, performing flips as necessary to maintain the Delaunay property.
This would mean I wouldn't have to re-triangulate every time the player shoots!
So I get to implementing this method, and...it doesn't work. I get an error like the one below:
NullReferenceException: Object reference not set to an instance of an object
TriangleNet.TriangleLocator.PreciseLocate (TriangleNet.Geometry.Point searchpoint, TriangleNet.Topology.Otri& searchtri, System.Boolean stopatsubsegment) (at Assets/Triangle.NET/TriangleLocator.cs:146)
I have been stuck on this problem for days and I cannot solve it for the life of me! If anyone who is knowledgeable enough with the Triangle.NET library would be willing to help me I would be so grateful! Along with that, if there is a better alternative to either the implementation or library I'm using (for my purpose which I outlined above) that would also be awesome!
Currently, how I've set up the scene is really simple, I just have a quad which I scaled up and added the script below to it as a component. I then linked that component to a shoot raycast script attached to the Main Camera:
How the scene is setup.
What it looks like in Play Mode.
The exact Triangle.Net repo I cloned is this one.
My code is posted below:
using UnityEngine;
using TriangleNet.Geometry;
using TriangleNet.Topology;
using TriangleNet.Meshing;
public class Delaunay : MonoBehaviour
{
[SerializeField]
private int randomPoints = 150;
[SerializeField]
private int width = 512;
[SerializeField]
private int height = 512;
private TriangleNet.Mesh mesh;
Polygon polygon = new Polygon();
Otri otri = default(Otri);
Osub osub = default(Osub);
ConstraintOptions constraintOptions = new ConstraintOptions() { ConformingDelaunay = true };
QualityOptions qualityOptions = new QualityOptions() { MinimumAngle = 25 };
void Start()
{
osub.seg = null;
Mesh objMesh = GetComponent<MeshFilter>().mesh;
// Add four corners of wall (quad in this case) to polygon.
//foreach (Vector3 vert in objMesh.vertices)
//{
// Vector2 temp = new Vector2();
// temp.x = map(vert.x, -0.5f, 0.5f, 0, 512);
// temp.y = map(vert.y, -0.5f, 0.5f, 0, 512);
// polygon.Add(new Vertex(temp.x, temp.y));
//}
// Generate random points and add to polygon.
for (int i = 0; i < randomPoints; i++)
{
polygon.Add(new Vertex(Random.Range(0.0f, width), Random.Range(0.0f, height)));
}
// Triangulate polygon.
delaunayTriangulation();
}
// When left click is pressed, a raycast is sent out. If that raycast hits the wall, updatePoints() is called and is passed in the location of the hit (hit.point).
public void updatePoints(Vector3 pos)
{
// Convert pos to local coords of wall.
pos = transform.InverseTransformPoint(pos);
Vertex newVert = new Vertex(pos.x, pos.y);
//// Give new vertex a unique id.
//if (mesh != null)
//{
// newVert.id = mesh.NumberOfInputPoints;
//}
// Insert new vertex into existing triangulation.
otri.tri = mesh.dummytri;
mesh.InsertVertex(newVert, ref otri, ref osub, false, false);
// Draw result as a texture onto the wall so to visualise what is happening.
draw();
}
private void delaunayTriangulation()
{
mesh = (TriangleNet.Mesh)polygon.Triangulate(constraintOptions, qualityOptions);
draw();
}
void draw()
{
Texture2D tx = new Texture2D(width, height);
// Draw triangulation.
if (mesh.Edges != null)
{
foreach (Edge edge in mesh.Edges)
{
Vertex v0 = mesh.vertices[edge.P0];
Vertex v1 = mesh.vertices[edge.P1];
DrawLine(new Vector2((float)v0.x, (float)v0.y), new Vector2((float)v1.x, (float)v1.y), tx, Color.black);
}
}
tx.Apply();
this.GetComponent<Renderer>().sharedMaterial.mainTexture = tx;
}
// Bresenham line algorithm
private void DrawLine(Vector2 p0, Vector2 p1, Texture2D tx, Color c, int offset = 0)
{
int x0 = (int)p0.x;
int y0 = (int)p0.y;
int x1 = (int)p1.x;
int y1 = (int)p1.y;
int dx = Mathf.Abs(x1 - x0);
int dy = Mathf.Abs(y1 - y0);
int sx = x0 < x1 ? 1 : -1;
int sy = y0 < y1 ? 1 : -1;
int err = dx - dy;
while (true)
{
tx.SetPixel(x0 + offset, y0 + offset, c);
if (x0 == x1 && y0 == y1) break;
int e2 = 2 * err;
if (e2 > -dy)
{
err -= dy;
x0 += sx;
}
if (e2 < dx)
{
err += dx;
y0 += sy;
}
}
}
private float map(float from, float fromMin, float fromMax, float toMin, float toMax)
{
float fromAbs = from - fromMin;
float fromMaxAbs = fromMax - fromMin;
float normal = fromAbs / fromMaxAbs;
float toMaxAbs = toMax - toMin;
float toAbs = toMaxAbs * normal;
float to = toAbs + toMin;
return to;
}
}
Great news! I've managed to fix the issue. InsertVertex() doesn't actually add the new vertex to the list of vertices! So this means that when it tried to triangulate, it was trying to point to the new vertex but it couldn't (because that vertex wasn't in the list). So, to solve this, I just manually add my new vertex to the list of vertices in the mesh, before calling InsertVertex(). Note: When you do this, you also need to manually set the vertex's id. I set the id to the size of the list of vertices because I was adding all new vertices to the end of the list.
// When left click is pressed, a raycast is sent out. If that raycast hits the wall, updatePoints() is called and is passed in the location of the hit (hit.point).
public void updatePoints(Vector3 pos)
{
// Convert pos to local coords of wall. You don't need to do this, i do it because of my draw() method where i map everything out onto a texture and display it.
pos = transform.InverseTransformPoint(pos);
pos.x = map(pos.x, -0.5f, 0.5f, 0, 512);
pos.y = map(pos.y, -0.5f, 0.5f, 0, 512);
Vertex newVert = new Vertex(pos.x, pos.y);
// Manually add new vertex to list of vertices.
newVert.id = mesh.vertices.Count;
mesh.vertices.Add(newVert.id, newVert);
//Doing just the first line gave me a null pointer exception. Adding the two extra lines below it fixed it for me.
otri.tri = mesh.dummytri;
otri.orient = 0;
otri.Sym();
// Insert new vertex into existing triangulation.
mesh.InsertVertex(newVert, ref otri, ref osub, false, false);
// Draw result as a texture onto the wall so to visualise what is happening.
draw();
}
Hope this will help someone done the road!
I'm creating a basic simulator in Unity for my A-level Computer Science project. At the moment the user is able to draw a box (crate) object by selecting the associated tool and clicking and dragging to determine two opposite corners of the box, thus determining its dimensions.
The box consists of a single prefab which is instantiated and has its size changed accordingly. The code for it is as follows:
void Start () {
boxAnim = boxButton.GetComponent<Animator>();
}
// Update is called once per frame
void Update()
{
//sets the mouseDown and mouseHeld bools and the mouse position Vector3
mouseDown = Input.GetMouseButtonDown(0);
mouseHeld = Input.GetMouseButton(0);
mousePosition = Input.mousePosition;
//checks if the user has started to draw
if (mouseDown && !draw)
{
draw = true;
originalMousePosition = mousePosition;
}
//checking if the user has released the mouse
if (draw && !mouseHeld)
{
finalMousePosition = mousePosition;
draw = false;
if (boxAnim.GetBool("Pressed") == true) //if the box draw button is pressed
{
boxDraw(originalMousePosition, finalMousePosition); //draws crate
}
}
}
void boxDraw(Vector3 start, Vector3 end)
{
//asigns world coordinates for the start and end mouse positions
worldStart = Camera.main.ScreenToWorldPoint(start);
worldEnd = Camera.main.ScreenToWorldPoint(end);
if (worldStart.y >= -3.2f && worldEnd.y >= -3.2f)
{
//determines the size of box to be drawn
boxSize.x = Mathf.Abs(worldStart.x - worldEnd.x);
boxSize.y = Mathf.Abs(worldStart.y - worldEnd.y);
//crate sprite is 175px wide, 175/50 = 3.5 (50px per unit) so the scale factor must be the size, divided by 3.5
boxScaleFactor.x = boxSize.x / 3.5f;
boxScaleFactor.y = boxSize.y / 3.5f;
//initial scale of the box is 1 (this isn't necessary but makes reading program easier)
boxScale.x = 1 * boxScaleFactor.x;
boxScale.y = 1 * boxScaleFactor.y;
//creates a new crate under the name newBox and alters its size
GameObject newBox = Instantiate(box, normalCoords(start, end), box.transform.rotation) as GameObject;
newBox.transform.localScale = boxScale;
}
}
Vector3 normalCoords(Vector3 start, Vector3 end)
{
//takes start and end coordinates as position coordinates and returns a world coordinate coordinate for the box
if(end.x > start.x)
{
start.x = start.x + (Mathf.Abs(start.x - end.x) / 2f);
}
else
{
start.x = start.x - (Mathf.Abs(start.x - end.x) / 2f);
}
if(end.y > start.y)
{
start.y = start.y + (Mathf.Abs(start.y - end.y) / 2f);
}
else
{
start.y = start.y - (Mathf.Abs(start.y - end.y) / 2f);
}
start = Camera.main.ScreenToWorldPoint(new Vector3(start.x, start.y, 0f));
return start;
}
In a similar manner, I want to be able to have a 'ramp' object be able to be created, so that the user can click and drag to determine the base width, then click again to determine the angle of elevation/ height, (the ramp will always be a right angled triangle.) The problem lies in that I want to have the ramp as a sprite I have created, rather than just a basic block colour. A single sprite however would only have a single angle of elevation, and no transform would be able to alter this (as far as I'm aware.) Obviously I don't want to have to create a different sprite for each angle, so is there anything I can do?
The solution I was thinking was if there was some sort of feature whereby I could alter the nodes of a vector image in the code, but I'm pretty sure this doesn't exist.
EDIT: Just to clarify this is a 2D environment, the code includes Vector3s just because that’s what I’m used to
You mention Sprite which is a 2D object (well, its actually very much alike a Quad which counts as 3D) but you reference full 3D in other parts of your question and in your code, which I think was confusing people, because creating a texture for a sprite is a very different problem. I am assuming you mentioned Sprite by mistake and you actually want a 3D object (Unity is 3D internally most of the time anyways), it can only have one side if you want
You can create 3D shapes from code no problems, although you do need to get familiar with the Mesh class, and mastering creating triangles on the fly takes some practice
Here's a couple of good starting points
https://docs.unity3d.com/Manual/Example-CreatingaBillboardPlane.html
https://docs.unity3d.com/ScriptReference/Mesh.html
I have a solution to part of the problem using meshes and a polygon collider. I now have a function that will create a right angled triangle with a given width and height and a collider in the shape of that triangle:
using UnityEngine;
using System.Collections;
public class createMesh : MonoBehaviour {
public float width = 5f;
public float height = 5f;
public PolygonCollider2D polyCollider;
void Start()
{
polyCollider = GetComponent<PolygonCollider2D>();
}
// Update is called once per frame
void Update () {
TriangleMesh(width, height);
}
void TriangleMesh(float width, float height)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//Verticies
Vector3[] verticies = new Vector3[3]
{
new Vector3(0,0,0), new Vector3(width, 0, 0), new Vector3(0,
height, 0)
};
//Triangles
int[] tri = new int[3];
tri[0] = 0;
tri[1] = 2;
tri[2] = 1;
//normals
Vector3[] normals = new Vector3[3];
normals[0] = -Vector3.forward;
normals[1] = -Vector3.forward;
normals[2] = -Vector3.forward;
//UVs
Vector2[] uv = new Vector2[3];
uv[0] = new Vector2(0, 0);
uv[0] = new Vector2(1, 0);
uv[0] = new Vector2(0, 1);
//initialise
mesh.vertices = verticies;
mesh.triangles = tri;
mesh.normals = normals;
mesh.uv = uv;
//setting up collider
polyCollider.pathCount = 1;
Vector2[] path = new Vector2[3]
{
new Vector2(0,0), new Vector2(0, height), new Vector2(width, 0)
};
polyCollider.SetPath(0, path);
}
}
I just need to put this function into code very similar to my code for drawing a box so that the user can specify width and height.
I am trying to make a scrolling level to a test game I am doing as a learning exercise. I have created a map containing lots of tiles which are drawn according to their position in an array. I basically want the camera to scroll down the level, but at the moment it just frantically shakes up and down a little bit.
I have a camera class which is just a blank static class containing a static vector2 for the camera location. It is just set as 50, 50 as all the tiles are 50 by 50.
Then in my maps update method I have the following:
public void Update(GameTime gameTime) {
Camera.Location.Y = MathHelper.Clamp(Camera.Location.Y + (float)speed, 0, (300 - 18) * 50)
}
The 300 and 18 are the total number of tiles and the number of tiles on screen (vertically).
I am completely lost so would appreciate any help or advice.
Here's a simple 2D camera class that I use in my games.
public class Camera2D
{
public Camera2D()
{
Zoom = 1;
Position = Vector2.Zero;
Rotation = 0;
Origin = Vector2.Zero;
Position = Vector2.Zero;
}
public float Zoom { get; set; }
public Vector2 Position { get; set; }
public float Rotation { get; set; }
public Vector2 Origin { get; set; }
public void Move(Vector2 direction)
{
Position += direction;
}
public Matrix GetTransform()
{
var translationMatrix = Matrix.CreateTranslation(new Vector3(Position.X, Position.Y, 0));
var rotationMatrix = Matrix.CreateRotationZ(Rotation);
var scaleMatrix = Matrix.CreateScale(new Vector3(Zoom, Zoom, 1));
var originMatrix = Matrix.CreateTranslation(new Vector3(Origin.X, Origin.Y, 0));
return translationMatrix * rotationMatrix * scaleMatrix * originMatrix;
}
}
The idea is to apply it as a transform when you are about you draw your sprite batch like so:
var screenScale = GetScreenScale();
var viewMatrix = Camera.GetTransform();
_spriteBatch.Begin(SpriteSortMode.Deferred, BlendState.NonPremultiplied,
null, null, null, null, viewMatrix * Matrix.CreateScale(screenScale));
You probably also noticed the use of a screen scale. Although not directly related to the camera, it's a way to make sure your screen is scaled to the desired resolution. This way you can draw your scene independent of resolution and scale it at the end. Here's the screen scale method for reference:
public Vector3 GetScreenScale()
{
var scaleX = (float)_graphicsDevice.Viewport.Width / (float)_width;
var scaleY = (float)_graphicsDevice.Viewport.Height / (float)_height;
return new Vector3(scaleX, scaleY, 1.0f);
}
My friend and I were messing around in XNA 4.0 making a 2D racing game. Kind of like this one: Ivan “Ironman” Stewart’s Super Off Road. The problem we are having is know which direction are car is facing to move it appropriately. We could track the direction by a enum value North, South, East, West but we don't want to do that for a number of reasons.
We were wondering if there was a way to accomplish this via math. Maybe by having an anchor point designated at the hood of the car and having the car always move towards that spot and then move that anchor point. We aren't sure. Or maybe there is a way using a 2D Vector.
I figured since we hit a hard spot, we should ask the coding community for help!
Just to be clear. I'm not looking for code; I just want to discuss some concepts of 2D movement in all directions without having to track a direction enum. I know that can't be the only way to do it.
Car physics is actually a very difficult subject. I wish you well, but your're embarking on a difficult quest.
As for the answer: you can store the direction angle in radians and use atan2 function to get the relation between angles.
Or you can use Vector2D and use vector math to determine angles, also atan2 will be your friend.
Asnippets of my code on the issue:
public class RotatingImage implements RotatingImageVO {
protected double x, y; // center position
protected double facingx, facingy;
protected double angle;
protected void calculateAngle() {
angle = Math.atan2(x-facingx, facingy-y);
}
}
Remember that calculating atan2 is expensive. When I did it for each draw iteration for each object (tower defense, towers were rotating;) it took ~30% of my computing power. Do it only if you detect a noticable angle change. Like this:
public void setFacingPoint(double facingx, double facingy) {
if (Math.abs((facingx-this.facingx)/(facingx+this.facingx)) > 0.002
|| Math.abs((facingy-this.facingy)/(facingy+this.facingy)) > 0.002) {
this.facingx = facingx;
this.facingy = facingy;
calculateAngle();
}
}
You can represent the direction by using a normalized vector. This is just a hard coded example but you could easily use the input from a gamepad thumbstick.
Vector2 north = new Vector2(0, -1);
And later in your code move your sprite like this (assuming there is a position vector).
float speed = 100; // 100 pixels per second.
Vector2 direction = north;
position += direction * ((float)gameTime.ElapsedGameTime.TotalSeconds * speed);
Describing the direction using a unitvector and the position using a point makes sense. Then when the car moves forward you do
currentPosition = currentPosition + distance * directionvector; // (pseudocode)
When changing the direction it is good to use a matrix to (rotate)transform the direction vector.
(I'm not familiar with XNA)
Wrote some dummy code to illustrate:
[Test]
public void MoveForwardTest()
{
var position = new Point(0, 0);
var direction = new Vector(1, 0);
double distance = 5;
//Update position moving forward distance along direction
position = position + distance * direction;
Assert.AreEqual(5.0, position.X, 1e-3);
Assert.AreEqual(0, position.Y, 1e-3);
}
[Test]
public void RotateThenMoveTest()
{
var position = new Point(0, 0);
var direction = new Vector(1, 0);
double distance = 5;
//Create the rotation matrix
var rotateTransform = new RotateTransform(90);
//Apply the rotation to the direction
direction = Vector.Multiply(direction, rotateTransform.Value);
//Update position moving forward distance along direction
position = position + distance * direction;
Assert.AreEqual(0, position.X, 1e-3);
Assert.AreEqual(5.0, position.Y, 1e-3);
}
[Test]
public void CheckIfOtherCarIsInfrontTest()
{
var position = new Point(0, 0);
var direction = new Vector(1, 0);
var otherCarPosition = new Point(1, 0);
//Create a vector from current car to other car
Vector vectorTo = Point.Subtract(otherCarPosition, position);
//If the dotproduct is > 0 the other car is in front
Assert.IsTrue(Vector.Multiply(direction, vectorTo) > 0);
}
[Test]
public void AngleToNortTest()
{
var direction = new Vector(1, 0);
var northDirection = new Vector(0, 1);
Assert.AreEqual(90, Vector.AngleBetween(direction, northDirection), 1e-3);
}