Box2D & XNA Rendering the data - c#

So i have been finding that for Box2D your physics information should not be your rendering information
so you can't do things like
spriteBatch.Draw(mazeBox, mazeBody.Position / 0.01f, Color.White)
instead you should create transforms of the physics info and use that as your rendering.
So what does that exactly mean? I have been trying to find info on how to use transforms and render but i am getting blanks.

That means that physics could be rendered just as you wish: bigger, smaller, rotated, translated and so on. You just need to find out which proportions your renderer (in our case it's XNA) will draw your physic bodies. Just do the next: draw a line at ground position and a 1x1 box in the ball / box position using "hello box2d" application (this one doesn't exists at all, but you can simply create a new box2d application, which does nothing but simulating ball / box falling on the floor. And do not forget about stepping your physics!).
If you're interested, here's my SFML application with box2d and some character controller basics:
#include <stdio.h>
#include <Box2D/Box2D.h>
#include <SFML/Window.hpp>
#include <SFML/Graphics.hpp>
#include "Animation.h"
#pragma comment(lib, "Box2D.lib")
#pragma comment(lib, "sfml-system.lib")
#pragma comment(lib, "sfml-window-s.lib")
#pragma comment(lib, "sfml-graphics.lib")
#define M_PI 3.14f
#define PIXELS_PER_METER 64.f
#define METERS_PER_PIXEL (1.f / PIXELS_PER_METER)
#define PPM PIXELS_PER_METER
#define MPP METERS_PER_PIXEL
#define x_cor 2.f * METERS_PER_PIXEL
#define y_cor METERS_PER_PIXEL
// Thanks to bobasaurus =)
class DebugDraw : public b2DebugDraw
{
public:
DebugDraw(sf::RenderWindow *renderWindow)
{
window = renderWindow;
}
void DrawPolygon(const b2Vec2 *vertices, int32 vertexCount, const b2Color &color)
{
sf::Shape polygon;
for (int32 i = 0; i < vertexCount; i++)
{
b2Vec2 vertex = vertices[i];
polygon.AddPoint(vertex.x * PIXELS_PER_METER, window->GetHeight() - (vertex.y * PIXELS_PER_METER), sf::Color(0, 0, 0, 0), B2SFColor(color));
}
window->Draw(polygon);
}
void DrawSolidPolygon(const b2Vec2 *vertices, int32 vertexCount, const b2Color &color)
{
sf::Shape polygon;
for (int32 i = 0; i < vertexCount; i++)
{
b2Vec2 vertex = vertices[i];
polygon.AddPoint(vertex.x * PIXELS_PER_METER, window->GetHeight() - (vertex.y * PIXELS_PER_METER), B2SFColor(color)); //need transparant outline?
}
window->Draw(polygon);
}
void DrawCircle(const b2Vec2& center, float32 radius, const b2Color& color)
{
sf::Shape circle = sf::Shape::Circle(center.x * PPM, window->GetHeight() - (center.y * PPM), radius * PPM, sf::Color(0, 0, 0, 0), 1.0f, B2SFColor(color));
window->Draw(circle);
}
void DrawSolidCircle(const b2Vec2& center, float32 radius, const b2Vec2& axis, const b2Color& color)
{
sf::Shape circle = sf::Shape::Circle(center.x * PPM, window->GetHeight() - (center.y * PPM), radius * PPM, B2SFColor(color));
window->Draw(circle);
}
void DrawSegment(const b2Vec2& p1, const b2Vec2& p2, const b2Color& color) {}
void DrawTransform(const b2Transform& xf) {}
private:
sf::RenderWindow *window;
sf::Color B2SFColor(const b2Color &color)
{
sf::Color result((sf::Uint8) (color.r * 255), (sf::Uint8) (color.g * 255), (sf::Uint8) (color.b * 255));
return result;
}
};
int main()
{
sf::RenderWindow *App = new sf::RenderWindow(sf::VideoMode(800, 600, 32), "SFML + Box2D Test");
App->UseVerticalSync(true);
// ================= Init Physics ====================
b2World *world = new b2World(b2Vec2(0.0f, -10.0f), true);
DebugDraw *debugDraw = new DebugDraw(App);
debugDraw->SetFlags(b2DebugDraw::e_shapeBit);
world->SetDebugDraw(debugDraw);
// Define the ground body.
b2BodyDef groundBodyDef;
groundBodyDef.position.Set(0.0f * x_cor, 0.0f * y_cor);
b2Body* groundBody = world->CreateBody(&groundBodyDef);
b2PolygonShape groundBox;
groundBox.SetAsBox(500.f * x_cor, 10.0f * y_cor);
groundBody->CreateFixture(&groundBox, 0.0f);
// ====================================================
// ====================================
/*b2PolygonShape shape;
shape.SetAsBox(5.f * x_cor, 5.f * x_cor);
b2FixtureDef fd;
fd.shape = &shape;
fd.density = 1.0f;
fd.friction = 0.3f;
fd.restitution = 0.7f;
b2BodyDef bd;
bd.type = b2_dynamicBody;
bd.angle = M_PI / 4.f;
bd.position.Set(10.f * x_cor, 80.f * x_cor);
b2Body* body = world->CreateBody(&bd);
body->CreateFixture(&fd);*/
b2BodyDef bd;
bd.position.Set(3.0f, 5.0f);
bd.type = b2_dynamicBody;
bd.fixedRotation = true;
bd.allowSleep = false;
b2Body* body = world->CreateBody(&bd);
b2PolygonShape shape;
shape.SetAsBox(0.25f, 0.25f);
b2FixtureDef fd;
fd.shape = &shape;
fd.friction = 20.0f;
fd.density = 20.0f;
body->CreateFixture(&fd);
// ====================================
sf::Image Image;
if (!Image.LoadFromFile("moo.jpg"))
return 1;
//Image.Copy(Image, 0, 0, sf::IntRect(0, 0, 67 * 5, 68));
sf::Animation Sprite(Image, 45, 50, 5);
Sprite.SetLoopSpeed(20);
Sprite.Play(0, 4);
Sprite.SetBlendMode(sf::Blend::Alpha);
Sprite.SetCenter(Sprite.GetSize().x / 2, Sprite.GetSize().y / 2);
while (App->IsOpened())
{
sf::Event Event;
static std::vector<sf::Vector2f> points;
static sf::Color cl;
bool nonConvex = false;
while (App->GetEvent(Event))
{
if (Event.Type == sf::Event::Closed)
App->Close();
if (Event.Type == sf::Event::KeyPressed)
{
if (Event.Key.Code == sf::Key::Escape)
App->Close();
if (Event.Key.Code == sf::Key::W && abs(body->GetLinearVelocity().y) < 1.0f)
body->ApplyLinearImpulse(b2Vec2(0, 5 * body->GetMass()), body->GetWorldCenter());
}
}
{
if (App->GetInput().IsKeyDown(sf::Key::A) && abs(body->GetLinearVelocity().x) < 5.0f)
{
body->ApplyForce(b2Vec2(-30 * body->GetMass(), 0), body->GetPosition());
}
if (App->GetInput().IsKeyDown(sf::Key::D) && abs(body->GetLinearVelocity().x) < 5.0f)
{
body->ApplyForce(b2Vec2(30 * body->GetMass(), 0), body->GetPosition());
}
if (App->GetInput().IsKeyDown(sf::Key::D))
{
//if (Sprite.IsStopped())
{
Sprite.FlipX(false);
Sprite.Play(0, 5);
}
} else
if (App->GetInput().IsKeyDown(sf::Key::A))
{
//if (Sprite.IsStopped())
{
Sprite.FlipX(true);
Sprite.Play(0, 5);
}
} else
//if (!Sprite.IsStopped())
{
Sprite.Play(12, 22);
}
}
world->Step(App->GetFrameTime(), 1024, 1024);
world->ClearForces();
App->Clear();
// And draw all the stuff
world->DrawDebugData();
Sprite.Update();
Sprite.SetPosition(body->GetPosition().x * PPM, App->GetHeight() - (body->GetPosition().y * PPM));
App->Draw(Sprite);
App->Display();
}
return 0;
}

Given a Body that represents a rectangle, the following c# code will render a texture according to the Body's physics:
spritebatch.Begin();
spritebatch.Draw(texture,
Body.Position * Scale,
textureSourceRectangle,
Color.White,
Body.Rotation,
new Vector2(textureWidth / 2f, textureHeight / 2f),
1f,
SpriteEffects.None,
0);
spritebatch.End();
Scale is defined for me as 100.0f, meaning that a Body with Height set to 0.1f is equal to 0.1f * 100.0f = 10 pixels.
The same goes for Body.Position. (0.1f, 0.1f) in box2D is equal to (10,10) in screen coordinates.
It's also important to set the Origin to the center of the rectangle when drawing. This way the rotation happens around the center of your texture.

Related

Unity: detect click event on UIVertex

I am drawing lines on a canvas using the 'UIVertex' struct and I would like to be able to detect click events on the lines I have drawn.
Here is how I draw lines (largely inspired from this tutorial => https://www.youtube.com/watch?v=--LB7URk60A):
void DrawVerticesForPoint(Vector2 point, float angle, VertexHelper vh)
{
vertex = UIVertex.simpleVert;
//vertex.color = Color.red;
vertex.position = Quaternion.Euler(0, 0, angle) * new Vector3(-thickness / 2, 0);
vertex.position += new Vector3(unitWidth * point.x, unitHeight * point.y);
vh.AddVert(vertex);
vertex.position = Quaternion.Euler(0, 0, angle) * new Vector3(thickness / 2, 0);
vertex.position += new Vector3(unitWidth * point.x, unitHeight * point.y);
vh.AddVert(vertex);
}
Any idea?
Here is the solution I have found thanks to this post:
public bool PointIsOnLine(Vector3 point, UILineRenderer line)
{
Vector3 point1 = line.points[0];
Vector3 point2 = line.points[1];
var dirNorm = (point2 - point1).normalized;
var t = Vector2.Dot(point - point1, dirNorm);
var tClamped = Mathf.Clamp(t, 0, (point2 - point1).magnitude);
var closestPoint = point1 + dirNorm * tClamped;
var dist = Vector2.Distance(point, closestPoint);
if(dist < line.thickness / 2)
{
return true;
}
return false;
}
The UILineRenderer class is the class I have which represents my lines.
line.points[0] and line.points[1] contain the coordinates of the two points which determine the line length and position. line.thickness is the... thickness of the line :O

Why isn't my perspective transform working

I am building a test 3D renderer in WinForms using the objects in System.Numerics such as Vector3 and Matrix4x4.
The object drawn is a point cloud, centered around (0,0,0), and rotated about the origin. Each node renders as dots on the screen. Here is what the 3D shape should look like
Fake Perspective
and more specifically when viewed from the front the perspective should be obvious with the blue dots that are further away from the eye to be at a smaller distance from the center
Fake Perspective
The pipeline is roughly as follows:
Rotation transformation
Matrix4x4 RY = Matrix4x4.CreateRotationY(ry);
Perspective transformation (fov=90, aspect=1.0f, near=1f, far=100f)
Matrix4x4 P = Matrix4x4.CreatePerspectiveFieldOfView(fov.Radians(), 1.0f, 1f, 100f);
Camera transformation
Matrix4x4 C = RY * P;
var node = Vector3.Transform(face.Nodes[i], C);
Project to 2D
Vector2 point = new Vector2(node.X, node.Y);
View transformation
Matrix3x2 S = Matrix3x2.CreateScale(height / scale, -height / scale);
Matrix3x2 T = Matrix3x2.CreateTranslation(width / 2f, height / 2f);
Matrix3x2 V = S*T
point = Vector2.Transform(point, V);
Pixel Coordinates & Render
PointF pixel = new PointF(point.X, point.Y);
e.Graphics.FillEllipse(brush,pixel.X - 2, pixel.Y - 2, 4, 4);
So what I am seeing is an orthographic projection.
Program Output
The blue nodes further away are not smaller as expected. Somehow the perspective transformation is being ignored.
So my question is my usage of Matrix4x4.CreatePerspectiveFieldOfView() correct in step #2? And is the projection from 3D to 2D in step #4 correct?
Steps #1, #5 and #6 seem to be working exactly as intended, my issue is with steps #2-#4 somewhere.
Example code to reproduce the issue
Form1.cs
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
public Shape Object { get; set; }
protected override void OnLoad(EventArgs e)
{
base.OnLoad(e);
this.Object = Shape.DemoShape1();
}
protected override void OnPaint(PaintEventArgs e)
{
base.OnPaint(e);
float width = ClientSize.Width, height = ClientSize.Height;
float scale = 40f, fov = 90f;
Matrix4x4 RY = Matrix4x4.CreateRotationY(ry);
Matrix4x4 RX = Matrix4x4.CreateRotationX(rx);
Matrix4x4 P = Matrix4x4.CreatePerspectiveFieldOfView(fov.Radians(), 1.0f, 1f, 100f);
Matrix4x4 C = RY * RX * P;
Matrix3x2 S = Matrix3x2.CreateScale(
height / scale, -height / scale);
Matrix3x2 T = Matrix3x2.CreateTranslation(
width / 2f, height / 2f);
Matrix3x2 V = S * T;
using (var pen = new Pen(Color.Black, 0))
{
var arrow = new AdjustableArrowCap(4f, 9.0f);
pen.CustomEndCap = arrow;
using (var brush = new SolidBrush(Color.Black))
{
// Draw coordinate triad (omited)
// Each face has multiple nodes with the same color
foreach (var face in Object.Faces)
{
brush.Color = face.Color;
PointF[] points = new PointF[face.Nodes.Count];
for (int i = 0; i < points.Length; i++)
{
// transform nodes into draw points
var item = Vector4.Transform(face.Nodes[i], C);
var point = Vector2.Transform(item.Project(), V);
points[i] = point.ToPoint();
}
// Draw points as dots
e.Graphics.SmoothingMode = SmoothingMode.HighQuality;
for (int i = 0; i < points.Length; i++)
{
e.Graphics.FillEllipse(brush,
points[i].X - 2, points[i].Y - 2,
4, 4);
}
}
}
}
}
}
GraphicsExtensions.cs
public static class GraphicsExtensions
{
public static PointF ToPoint(this Vector2 vector)
=> new PointF(vector.X, vector.Y);
public static Vector2 Project(this Vector3 vector)
=> new Vector2(vector.X, vector.Y);
public static Vector2 Project(this Vector4 vector)
=> new Vector2(vector.X, vector.Y);
public static float Radians(this float degrees) => (float)(Math.PI/180) * degrees;
public static float Degrees(this float radians) => (float)(180/Math.PI) * radians;
}

C# - Use of compute shaders

I'm trying to implement, using SharpDX11, a ray/mesh intersection method using the GPU. I've seen from an older post (Older post) that this can be done using the Compute Shader; but I need help in order to create and define the buffer outside the .hlsl code.
My HLSL code is the following:
struct rayHit
{
float3 intersection;
};
cbuffer cbRaySettings : register(b0)
{
float3 rayFrom;
float3 rayDir;
uint TriangleCount;
};
StructuredBuffer<float3> positionBuffer : register(t0);
StructuredBuffer<uint3> indexBuffer : register(t1);
AppendStructuredBuffer<rayHit> appendRayHitBuffer : register(u0);
void TestTriangle(float3 p1, float3 p2, float3 p3, out bool hit, out float3 intersection)
{
//Perform ray/triangle intersection
//Compute vectors along two edges of the triangle.
float3 edge1, edge2;
float distance;
//Edge 1
edge1.x = p2.x - p1.x;
edge1.y = p2.y - p1.y;
edge1.z = p2.z - p1.z;
//Edge2
edge2.x = p3.x - p1.x;
edge2.y = p3.y - p1.y;
edge2.z = p3.z - p1.z;
//Cross product of ray direction and edge2 - first part of determinant.
float3 directioncrossedge2;
directioncrossedge2.x = (rayDir.y * edge2.z) - (rayDir.z * edge2.y);
directioncrossedge2.y = (rayDir.z * edge2.x) - (rayDir.x * edge2.z);
directioncrossedge2.z = (rayDir.x * edge2.y) - (rayDir.y * edge2.x);
//Compute the determinant.
float determinant;
//Dot product of edge1 and the first part of determinant.
determinant = (edge1.x * directioncrossedge2.x) + (edge1.y * directioncrossedge2.y) + (edge1.z * directioncrossedge2.z);
//If the ray is parallel to the triangle plane, there is no collision.
//This also means that we are not culling, the ray may hit both the
//back and the front of the triangle.
if (determinant == 0)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
float inversedeterminant = 1.0f / determinant;
//Calculate the U parameter of the intersection point.
float3 distanceVector;
distanceVector.x = rayFrom.x - p1.x;
distanceVector.y = rayFrom.y - p1.y;
distanceVector.z = rayFrom.z - p1.z;
float triangleU;
triangleU = (distanceVector.x * directioncrossedge2.x) + (distanceVector.y * directioncrossedge2.y) + (distanceVector.z * directioncrossedge2.z);
triangleU = triangleU * inversedeterminant;
//Make sure it is inside the triangle.
if (triangleU < 0.0f || triangleU > 1.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
//Calculate the V parameter of the intersection point.
float3 distancecrossedge1;
distancecrossedge1.x = (distanceVector.y * edge1.z) - (distanceVector.z * edge1.y);
distancecrossedge1.y = (distanceVector.z * edge1.x) - (distanceVector.x * edge1.z);
distancecrossedge1.z = (distanceVector.x * edge1.y) - (distanceVector.y * edge1.x);
float triangleV;
triangleV = ((rayDir.x * distancecrossedge1.x) + (rayDir.y * distancecrossedge1.y)) + (rayDir.z * distancecrossedge1.z);
triangleV = triangleV * inversedeterminant;
//Make sure it is inside the triangle.
if (triangleV < 0.0f || triangleU + triangleV > 1.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
//Compute the distance along the ray to the triangle.
float raydistance;
raydistance = (edge2.x * distancecrossedge1.x) + (edge2.y * distancecrossedge1.y) + (edge2.z * distancecrossedge1.z);
raydistance = raydistance * inversedeterminant;
//Is the triangle behind the ray origin?
if (raydistance < 0.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
intersection = rayFrom + (rayDir * distance);
hit = true;
}
[numthreads(64, 1, 1)]
void CS_RayAppend(uint3 tid : SV_DispatchThreadID)
{
if (tid.x >= TriangleCount)
return;
uint3 indices = indexBuffer[tid.x];
float3 p1 = positionBuffer[indices.x];
float3 p2 = positionBuffer[indices.y];
float3 p3 = positionBuffer[indices.z];
bool hit;
float3 p;
TestTriangle(p1, p2, p3, hit, p);
if (hit)
{
rayHit hitData;
hitData.intersection = p;
appendRayHitBuffer.Append(hitData);
}
}
While the following is part of my c# implementation but I'm not able to understand how lo load buffers for compute shader.
int count = obj.Mesh.Triangles.Count;
int size = 8; //int+float for every hit
BufferDescription bufferDesc = new BufferDescription() {
BindFlags = BindFlags.UnorderedAccess | BindFlags.ShaderResource,
Usage = ResourceUsage.Default,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.BufferStructured,
StructureByteStride = size,
SizeInBytes = size * count
};
Buffer buffer = new Buffer(device, bufferDesc);
UnorderedAccessViewDescription uavDescription = new UnorderedAccessViewDescription() {
Buffer = new UnorderedAccessViewDescription.BufferResource() { FirstElement = 0, Flags = UnorderedAccessViewBufferFlags.None, ElementCount = count },
Format = SharpDX.DXGI.Format.Unknown,
Dimension = UnorderedAccessViewDimension.Buffer
};
UnorderedAccessView uav = new UnorderedAccessView(device, buffer, uavDescription);
context.ComputeShader.SetUnorderedAccessView(0, uav);
var code = HLSLCompiler.CompileFromFile(#"Shaders\TestTriangle.hlsl", "CS_RayAppend", "cs_5_0");
ComputeShader _shader = new ComputeShader(device, code);
Buffer positionsBuffer = new Buffer(device, Utilities.SizeOf<Vector3>(), ResourceUsage.Default, BindFlags.None, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
context.UpdateSubresource(ref data, positionsBuffer);
context.ComputeShader.Set(_shader);
Inside my c# implementation i'm considering only one ray (with its origin and direction) and I would like to use the shader to check the intersection with all the triangles of the mesh. I'm already able to do that using the CPU but for 20k+ triangles the computation took too long even if i'm already using parallel coding.

How to Draw Circle in OpenTK Xamarin.Forms

I need to know how to draw transparent circle/ellipse using OpenTK in Xamarin.Forms.
I have tried with creating Vertex and Fragment shaders with reference of following link : How to draw circle on OpenGL ES 2.0 cross platform? But did not get anything on screen it is showing blank on screen.
Below is the Sample Code used to draw circle with OpenTK in Xamarin.Forms.
GL.ClearColor(0.0f, 0.0f, 0.0f, 1.0f);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.Enable(EnableCap.DepthTest);
GL.UseProgram(programID);
// The locations where we pass in our color and vertex data
positionInput = GL.GetAttribLocation(programID, "Position");
colorInput = GL.GetAttribLocation(programID, "SourceColor");
// The locations where we pass in unchanging data
projectionInput = GL.GetUniformLocation(programID, "Projection");
modelviewInput = GL.GetUniformLocation(programID, "Modelview");
Matrix4 modelview = Matrix4.CreateRotationX(rotation) * Matrix4.CreateRotationY(rotation) * Matrix4.CreateRotationZ(rotation) * Matrix4.CreateTranslation(xTranslation, yTranslation, -7f);
GL.UniformMatrix4(modelviewInput, false, ref modelview);
float h = 4.0f * (height / width);
Matrix4 projection = Matrix4.CreatePerspectiveOffCenter(-2, 2, -h / 2f, h / 2f, 4, 10);
GL.Viewport(0, 0, (int)width, (int)height);
GL.UniformMatrix4(projectionInput, false, ref projection);
GL.BindBuffer(BufferTarget.ArrayBuffer, colorDataBuffer);
GL.EnableVertexAttribArray(colorInput);
GL.VertexAttribPointer(colorInput, 4, VertexAttribPointerType.Float, false, 0, 0);
float DEGREE_TO_RAD = (float)( 3.14 / 180);
int M_IN_DEGREE = 370;
int N_IN_DEGREE = 100;
int nCount = 1;
int index = 0;
int size = 2;
float[] stVertexArray = new float[2*360];
stVertexArray[0] = 0;
stVertexArray[1] = 0;
for( int nR =N_IN_DEGREE; nR < M_IN_DEGREE; nR++ )
{
float fX = (float) System.Math.Sin((float)nR * DEGREE_TO_RAD ) ;
float fY = (float) System.Math.Cos((float)nR * DEGREE_TO_RAD );
stVertexArray[nCount*2] = fX;
stVertexArray[nCount*2 + 1] = fY;
nCount++;
}
GL.BindBuffer(BufferTarget.ArrayBuffer, stVertexArray.Length);
GL.EnableVertexAttribArray(0);
GL.VertexAttribPointer (index,size, VertexAttribPointerType.Float, false, stVertexArray.Length, IntPtr.Zero);
GL.DrawElements(BeginMode.LineLoop, stVertexArray.Length, DrawElementsType.UnsignedByte,stVertexArray);
GL.Finish();

XNA Loading Models

I am pretty new to the 3D side of XNA and am converting a 2D game.
Its basically designed as a grid and I am drawing with the code below. This works but is a bit laggy, am I doing this completely wrong? Presumably there shouldn't be much lag at this point when we are only talking about a few small models.
protected override void Draw(GameTime gameTime)
{
fpsTimer += gameTime.ElapsedGameTime;
fpsCount++;
if (fpsTimer >= TimeSpan.FromSeconds(1))
{ fpsTimer = TimeSpan.FromSeconds(0); fps = fpsCount; fpsCount = 0; }
GraphicsDevice.Clear(Color.CornflowerBlue);
projection = Matrix.CreatePerspectiveFieldOfView(MathHelper.PiOver4, device.Viewport.AspectRatio, 1.0f, 1000.0f);
world = Matrix.CreateTranslation(new Vector3(0, 0, 0));
view = Matrix.CreateLookAt(new Vector3(xPos, yPos, zHeight), new Vector3(xPos, yPos + zRotation, 0), new Vector3(0, 5, 0));
// DRAW OBJECTS ON MAP
drawMap(view, world, projection);
spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.AlphaBlend);
spriteBatch.DrawString(font, "" + fps, new Vector2(100, 100), Color.Black);
spriteBatch.End();
base.Draw(gameTime);
}
private void drawMap(Matrix view, Matrix world, Matrix projection)
{
GraphicsDevice.BlendState = BlendState.Opaque;
GraphicsDevice.DepthStencilState = DepthStencilState.Default;
GraphicsDevice.SamplerStates[0] = SamplerState.LinearWrap;
int scale = Globals.scale;
int screenWidthScale = Globals.screenwidth / scale;
int screenHeightScale = Globals.screenheight / scale;
int mapXtile = Convert.ToInt32(xPos/2);
int mapYtile = Convert.ToInt32(yPos/2);
for (int i = Convert.ToInt32(xPos/2) - 30; i < Convert.ToInt32(xPos/2) + 30; i++)
{
for (int a = Convert.ToInt32(yPos/2); a < Convert.ToInt32(yPos/2) + 50; a++)
{
if (mapXtile > 0 && mapYtile > 0 && mapXtile < Globals.mapsizex && mapYtile < Globals.mapsizey)
{
int currentTile = Globals.levelArray[mapXtile, mapYtile].TyleType;
// DRAW TREES
if (currentTile == tyleType.tree)
{
if (Globals.levelArray[mapXtile, mapYtile].typeModifier == 1)
{
Matrix worldMatrix = Matrix.CreateScale(0.8f, 0.8f, 0.8f) * Matrix.CreateRotationX(1.5f) * Matrix.CreateTranslation(new Vector3(i * 2 + 0.2f, a * 2 - 0.4f, -0.1f));
tree.Draw(worldMatrix, view, projection);
}
if (Globals.levelArray[mapXtile, mapYtile].typeModifier == 2)
{
Matrix worldMatrix = Matrix.CreateScale(0.8f, 0.8f, 0.8f) * Matrix.CreateRotationX(1.5f) * Matrix.CreateTranslation(new Vector3(i * 2+0.2f, a * 2-0.4f, -0.1f));
tree2.Draw(worldMatrix, view, projection);
}
}
}
mapYtile = mapYtile + 1;
//mapYtile++;
}
mapXtile = mapXtile + 1;
mapYtile = Convert.ToInt32(yPos / 2);
}
}

Categories