Is there any way to make a primitive and use it over and over again? ex: if I make one cube, can I create 100 and make a 10x10 grid? I've tried using a for loop and updating the x and z coords with each loop thru, but it only moves the one cube thats created in the beginning. My class was created using an example from a book. I know how to move the cube around the area by changing the coords in the PositionCube method. What can I do in my main game class that will allow me to create a simple 10x10 grid?
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Graphics;
namespace Cube_Chaser
{
class Cube
{
private GraphicsDevice device;
private Texture2D texture;
public Vector3 location;
private Vector3 position;
private VertexBuffer cubeVertexBuffer;
private List<VertexPositionTexture> vertices = new List<VertexPositionTexture>();
public Cube(GraphicsDevice graphicsDevice, Vector3 playerLocation, float minDistance, Texture2D texture)
{
device = graphicsDevice;
this.texture = texture;
PositionCube(playerLocation, minDistance);
BuildFace(new Vector3(0, 0, 0), new Vector3(0, 1, 1));
BuildFace(new Vector3(0, 0, 1), new Vector3(1, 1, 1));
BuildFace(new Vector3(1, 0, 1), new Vector3(1, 1, 0));
BuildFace(new Vector3(1, 0, 0), new Vector3(0, 1, 0));
BuildFaceHorizontal(new Vector3(0, 1, 0), new Vector3(1, 1, 1));
BuildFaceHorizontal(new Vector3(0, 0, 1), new Vector3(1, 0, 0));
cubeVertexBuffer = new VertexBuffer(device, VertexPositionTexture.VertexDeclaration, vertices.Count, BufferUsage.WriteOnly);
cubeVertexBuffer.SetData<VertexPositionTexture>(vertices.ToArray());
this.position = position;
}
private void BuildFace(Vector3 p1, Vector3 p2)
{
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 1, 0));
vertices.Add(BuildVertex(p1.X, p2.Y, p1.Z, 1, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p1.Y, p2.Z, 0, 0));
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 1, 0));
}
private void BuildFaceHorizontal(Vector3 p1, Vector3 p2)
{
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p1.Y, p1.Z, 1, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 1, 0));
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 1, 0));
vertices.Add(BuildVertex(p1.X, p1.Y, p2.Z, 0, 0));
}
private VertexPositionTexture BuildVertex(float x, float y, float z, float u, float v)
{
return new VertexPositionTexture(new Vector3(x, y, z), new Vector2(u, v));
}
public void PositionCube(Vector3 playerLocation, float minDistance)
{
location = new Vector3(.5f, .5f, .5f);
}
public void Draw(Camera camera, BasicEffect effect)
{
effect.VertexColorEnabled = false;
effect.TextureEnabled = true;
effect.Texture = texture;
Matrix center = Matrix.CreateTranslation(new Vector3(-0.5f, -0.5f, -0.5f));
Matrix scale = Matrix.CreateScale(0.05f);
Matrix translate = Matrix.CreateTranslation(location);
effect.World = center * scale * translate;
effect.View = camera.View;
effect.Projection = camera.Projection;
foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
device.SetVertexBuffer(cubeVertexBuffer);
device.DrawPrimitives(PrimitiveType.TriangleList, 0, cubeVertexBuffer.VertexCount / 3);
}
}
}
}
I took #nico-schetler 's answer and created the classes for you.
Cube.cs
class Cube
{
private GraphicsDevice device;
private VertexBuffer cubeVertexBuffer;
public Cube(GraphicsDevice graphicsDevice)
{
device = graphicsDevice;
var vertices = new List<VertexPositionTexture>();
BuildFace(vertices, new Vector3(0, 0, 0), new Vector3(0, 1, 1));
BuildFace(vertices, new Vector3(0, 0, 1), new Vector3(1, 1, 1));
BuildFace(vertices, new Vector3(1, 0, 1), new Vector3(1, 1, 0));
BuildFace(vertices, new Vector3(1, 0, 0), new Vector3(0, 1, 0));
BuildFaceHorizontal(vertices, new Vector3(0, 1, 0), new Vector3(1, 1, 1));
BuildFaceHorizontal(vertices, new Vector3(0, 0, 1), new Vector3(1, 0, 0));
cubeVertexBuffer = new VertexBuffer(device, VertexPositionTexture.VertexDeclaration, vertices.Count, BufferUsage.WriteOnly);
cubeVertexBuffer.SetData<VertexPositionTexture>(vertices.ToArray());
}
private void BuildFace(List<VertexPositionTexture> vertices, Vector3 p1, Vector3 p2)
{
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 1, 0));
vertices.Add(BuildVertex(p1.X, p2.Y, p1.Z, 1, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p1.Y, p2.Z, 0, 0));
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 1, 0));
}
private void BuildFaceHorizontal(List<VertexPositionTexture> vertices, Vector3 p1, Vector3 p2)
{
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p1.Y, p1.Z, 1, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 1, 0));
vertices.Add(BuildVertex(p1.X, p1.Y, p1.Z, 0, 1));
vertices.Add(BuildVertex(p2.X, p2.Y, p2.Z, 1, 0));
vertices.Add(BuildVertex(p1.X, p1.Y, p2.Z, 0, 0));
}
private VertexPositionTexture BuildVertex(float x, float y, float z, float u, float v)
{
return new VertexPositionTexture(new Vector3(x, y, z), new Vector2(u, v));
}
public void Draw( BasicEffect effect)
{
foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
device.SetVertexBuffer(cubeVertexBuffer);
device.DrawPrimitives(PrimitiveType.TriangleList, 0, cubeVertexBuffer.VertexCount / 3);
}
}
}
CubeDrawable.cs
public class DrawableList<T> : DrawableGameComponent
{
private BasicEffect effect;
private Camera camera;
private class Entity
{
public Vector3 Position { get; set; }
public Matrix Orientation { get; set; }
public Texture2D Texture { get; set; }
}
private Cube cube;
private List<Entity> entities = new List<Entity>();
public DrawableList (Game game, Camera camera, BasicEffect effect)
: base( game )
{
this.effect = effect;
cube = new Cube (game.GraphicsDevice);
this.camera = camera;
}
public void Add( Vector3 position, Matrix orientation, Texture2D texture )
{
entities.Add (new Entity() {
Position = position,
Orientation = orientation,
Texture = texture
});
}
public override void Draw (GameTime gameTime )
{
base.Draw (gameTime);
foreach (var item in entities) {
effect.VertexColorEnabled = false;
effect.TextureEnabled = true;
effect.Texture = item.Texture;
Matrix center = Matrix.CreateTranslation(new Vector3(-0.5f, -0.5f, -0.5f));
Matrix scale = Matrix.CreateScale(0.05f);
Matrix translate = Matrix.CreateTranslation(item.Position);
effect.World = center * scale * translate;
effect.View = camera.View;
effect.Projection = camera.Projection;
cube.Draw (effect);
}
}
}
Usage
camera = new Camera (graphics.GraphicsDevice);
effect = new BasicEffect (graphics.GraphicsDevice);
cubes = new DrawableList<Cube> (this, camera, effect);
Components.Add (cubes);
for (int i=0 ; i < 50; i++)
{
cubes.Add (new Vector3( i*0.5f, 50.0f, 50.0f), Matrix.Identity, logoTexture);
}
You should separate the logical representation of the cube from the physical one. The physical representation would be the vertex buffer etc. This is the same for every cube. You can affect the rendering through e.g. world transforms (as you already did).
The logical representation is your cube class. You will need 100 instances of them (each with their own position, scale etc.). The logical representation can reference the physical one. So there is no need to have more than one vertex buffer.
Related
I have a little problem with rendering in my SharpDX Direct11 App.
I had being tested rendering scene on a texture, and then draw this texture on backBuffer... but unfortunately renderTexture do not contains primitives which should be drawn. Texture is only filled by color.
Whole project on github: https://github.com/Kordi3112/SharpDXTest11
Main code part with rendering methods:
public override void Render()
{
//Camera
var proj = Matrix.OrthoLH(3 * Form.Bounds.Width / Form.Bounds.Height, 3, 0.01f, 100f);
var view = Matrix.LookAtLH(new Vector3(0, 0, -10), new Vector3(0, 0, 20), Vector3.UnitY);
var viewProj = Matrix.Multiply(view, proj);
var world = Matrix.Identity;
var worldViewProj = world * viewProj;
worldViewProj.Transpose();
//Update wvp matrix
Context.UpdateSubresource(ref worldViewProj, ContantBuffer);
DrawOnTexture();
//Set BackBuffer as render target
Context.OutputMerger.SetTargets(depthView, renderView);
// Clear views
Context.ClearDepthStencilView(depthView, DepthStencilClearFlags.Depth, 1.0f, 0);
Context.ClearRenderTargetView(renderView, Color.Pink);
//Set TextureColor Shader
Effect2.ApplyShader(Context);
//Set Buffers
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(VertexBuffer2, Utilities.SizeOf<VertexPositionColorTexture>(), 0));
Context.InputAssembler.SetIndexBuffer(IndexBuffer, Format.R32_UInt, 0);
//Set Texture to Shader
Context.PixelShader.SetShaderResource(0, RenderTexture.ShaderResourceView);
//Draw
Context.DrawIndexed(6, 0, 0);
// Present!
SwapChain.Present(0, PresentFlags.None);
}
private void DrawOnTexture()
{
//Set Color Shader
Effect1.ApplyShader(Context);
//Set Buffers
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(VertexBuffer, Utilities.SizeOf<VertexPositionColor>(), 0));
Context.InputAssembler.SetIndexBuffer(IndexBuffer, Format.R32_UInt, 0);
//Set Target
RenderTexture.SetRenderTarget(Context, depthView);
//Clear Targets - Green Bgound
RenderTexture.ClearRenderTarget(Context, depthView, 0, 1, 0, 1);
//Draw on RenderTarget
Context.DrawIndexed(6, 0, 0);
}
After call: Context.DrawIndexed(6, 0, 0); in private void DrawOnTexture() primitive should be drawn.
What this code above do
What i wanted to get
What's wrong with my code?
I'm sure the problem is not matrix or camera. When i will modify code to render primitive directly on backBuffer then its drawing normaly.
public override void Render()
{
//Camera
var proj = Matrix.OrthoLH(3 * Form.Bounds.Width / Form.Bounds.Height, 3, 0.01f, 100f);
var view = Matrix.LookAtLH(new Vector3(0, 0, -10), new Vector3(0, 0, 20), Vector3.UnitY);
var viewProj = Matrix.Multiply(view, proj);
var world = Matrix.Identity;
var worldViewProj = world * viewProj;
worldViewProj.Transpose();
//Update wvp matrix
Context.UpdateSubresource(ref worldViewProj, ContantBuffer);
//DrawOnTexture();
//Set BackBuffer as render target
Context.OutputMerger.SetTargets(depthView, renderView);
// Clear views
Context.ClearDepthStencilView(depthView, DepthStencilClearFlags.Depth, 1.0f, 0);
Context.ClearRenderTargetView(renderView, Color.Pink);
//Set Color Shader
Effect1.ApplyShader(Context);
//Set Buffers
Context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(VertexBuffer, Utilities.SizeOf<VertexPositionColor>(), 0));
Context.InputAssembler.SetIndexBuffer(IndexBuffer, Format.R32_UInt, 0);
//Set Texture to Shader
//Context.PixelShader.SetShaderResource(0, RenderTexture.ShaderResourceView);
//Draw
Context.DrawIndexed(6, 0, 0);
// Present!
SwapChain.Present(0, PresentFlags.None);
}
output
Vertex Buffers declaration:
//Position Color
VertexBuffer = Buffer.Create(Device, BindFlags.VertexBuffer, new[] {
new VertexPositionColor(new Vector4(-1, -1, 0, 1), Color.Red.ToVector4()),
new VertexPositionColor(new Vector4(-1, 1, 0, 1), Color.Green.ToVector4()),
new VertexPositionColor(new Vector4(1, 1, 0, 1), Color.Blue.ToVector4()),
new VertexPositionColor(new Vector4(1, -1, 0, 1), Color.Yellow.ToVector4())
});
//Position Color Texture
VertexBuffer2 = Buffer.Create(Device, BindFlags.VertexBuffer, new[] {
new VertexPositionColorTexture(new Vector4(-1, -1, 0, 1), Color.White.ToVector4(), new Vector2(0,1)),
new VertexPositionColorTexture(new Vector4(-1, 1, 0, 1), Color.White.ToVector4(),new Vector2(0,0)),
new VertexPositionColorTexture(new Vector4(1, 1, 0, 1), Color.White.ToVector4(),new Vector2(1,0)),
new VertexPositionColorTexture(new Vector4(1, -1, 0, 1), Color.White.ToVector4(),new Vector2(1,1))
});
IndexBuffer = Buffer.Create(Device, BindFlags.IndexBuffer, new[] {
0,1,2,
0,2,3
});
I am working on a renderer, and I am having some troubles with the perspective projection matrix.
Following is my perspective projection matrix.
public static Matrix4 Projection(float _zNear, float _zFar, float _Width, float _Height, float _fov)
{
float _ar = _Width / _Height;
float _tanHalffov = (float)Math.Tan(Math_of_Rotation.Radians_of(_fov / 2));
float _zRange = _zFar - _zNear;
return new Matrix4(new Vector4(1/(_tanHalffov * _ar), 0 , 0 , 0),
new Vector4(0 , 1 / _tanHalffov , 0 , 0),
new Vector4(0 , 0 , -(_zFar + _zNear) / _zRange , 2*_zNear*_zFar / _zRange),
new Vector4(0 , 0 , 1 , 0));
}
I then multiplied it with the camera's transform matrix and the model's transform matrix.
it works but the z direction seems stretched a bit, and if I make zFar larger, the stretching is even more obvious, so I figured it may be something to do with the zRange, but I have divided it with zRange in the matrix, so isn't it supposed to be rescaled?
Following is the result of my program.
the 1:1:1 cube looks weird after the projection
even weirder in the corner
---Update---
This is the vertex shader
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
uniform vec3 cam_pos;
uniform mat4 transform;
uniform mat4 nptransform;
vec4 temp_Pos;
out vec3 normal0;
out vec2 texCoord0;
out vec3 cam_angle;
out vec3 position0;
void main()
{
temp_Pos = nptransform * vec4(position, 1.0);
position0 = vec3(temp_Pos.x,temp_Pos.y,temp_Pos.z);
cam_angle = normalize(cam_pos - position0);
normal0 = normal;
texCoord0 = texCoord;
gl_Position = transform * vec4(position, 1.0);//the bug is about this line
}
The following is the complete code of my matrix
public Matrix4 GetTransform(Vector3 _OffSet)
{
return Matrix4.Translation(Position - _OffSet) * Matrix4.RotateX(Rotation.x) * Matrix4.RotateY(Rotation.y) * Matrix4.RotateZ(Rotation.z) * Matrix4.Scale(Scale.x, Scale.y, Scale.z);
}
public Matrix4 GetProjectdTransform(Vector3 _OffSet)//This is the one I sent to the shader.
{
Transform CameraTransform = Core.The_Camera.Attaching_GameObject.transform;
return Matrix4.Projection(Core.MainCamera.zNear, Core.MainCamera.zFar, Core.MainCamera.Width, Core.MainCamera.Height, Core.MainCamera.fov) * Matrix4.RotateX(CameraTransform.Rotation.x) * Matrix4.RotateY(CameraTransform.Rotation.y) * Matrix4.RotateZ(CameraTransform.Rotation.z) * Matrix4.CameraTranslation(CameraTransform.Position) * GetTransform(_OffSet);
}
And there is the detail of the matrix functions, but there shouldn't be any problem, I tested them a lot of times.
public static Matrix4 CameraTranslation(Vector3 _CameraPosition)
{
return new Matrix4(new Vector4(1, 0, 0, -_CameraPosition.x),
new Vector4(0, 1, 0, -_CameraPosition.y),
new Vector4(0, 0, 1, -_CameraPosition.z),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 Translation(Vector3 _Position)
{
return new Matrix4(new Vector4(1, 0, 0, _Position.x),
new Vector4(0, 1, 0, _Position.y),
new Vector4(0, 0, 1, _Position.z),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 Scale(float _x, float _y, float _z)
{
return new Matrix4(new Vector4(_x, 0, 0, 0),
new Vector4(0, _y, 0, 0),
new Vector4(0, 0, _z, 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 RotateX(float _Angle)
{
double _Radians = Math_of_Rotation.Radians_of(_Angle);
return new Matrix4(new Vector4(1, 0, 0, 0),
new Vector4(0, (float)Math.Cos(_Radians), (float)Math.Sin(_Radians), 0),
new Vector4(0, -(float)Math.Sin(_Radians), (float)Math.Cos(_Radians), 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 RotateY(float _Angle)
{
double _Radians = Math_of_Rotation.Radians_of(_Angle);
return new Matrix4(new Vector4((float)Math.Cos(_Radians), 0, -(float)Math.Sin(_Radians), 0),
new Vector4(0, 1, 0, 0),
new Vector4((float)Math.Sin(_Radians), 0, (float)Math.Cos(_Radians), 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 RotateZ(float _Angle)
{
double _Radians = Math_of_Rotation.Radians_of(_Angle);
return new Matrix4(new Vector4((float)Math.Cos(_Radians), -(float)Math.Sin(_Radians), 0, 0),
new Vector4((float)Math.Sin(_Radians), (float)Math.Cos(_Radians), 0, 0),
new Vector4(0, 0, 1, 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 Projection(float _zNear, float _zFar, float _Width, float _Height, float _fov)
{
float _ar = _Width / _Height;
float _tanHalffov = (float)Math.Tan(Math_of_Rotation.Radians_of(_fov / 2));
float _zRange = _zFar - _zNear;
return new Matrix4(new Vector4((_tanHalffov ) , 0 , 0 , 0),
new Vector4(0 , _tanHalffov , 0 , 0),
new Vector4(0 , 0 , -(_zFar + _zNear) / _zRange , 2*_zNear*_zFar / _zRange),
new Vector4(0 , 0 , 1 , 0));
}
You need to transpose the matrix and invert some components if you want to do the same as Matrix4.CreatePerspectiveFieldOfView.
public static Matrix4 Projection(float _zNear, float _zFar, float _Width, float _Height, float _fov)
{
float _ar = _Width / _Height;
float _tanHalffov = (float)Math.Tan(Math_of_Rotation.Radians_of(_fov / 2));
float _zRange = _zFar - _zNear;
return new Matrix4(new Vector4(1/(_tanHalffov * _ar), 0 , 0 , 0),
new Vector4(0 , 1 / _tanHalffov, 0 , 0),
new Vector4(0 , 0 , -(_zFar + _zNear) / _zRange, -1),
new Vector4(0 , 0 , -2*_zNear*_zFar / _zRange , 0));
}
OpenGL matrices are stored in column-major order. The 1st column is the x-axis, followed by the y- and z-axes. The fourth column is the translation.
This means that each line (Vector4) represents one column of the matrix.
The usual OpenGL coordinate system is a right handed system. In view space the z-axis points against the line of sight. The Normalized Device Space is a left handed system. Therefore the z-axis is inverted by the projection matrix. See Left- vs. Right-handed coordinate systems.
You can find a working C#/OpenTK example at 3D Geometry
#Rabbid76 Thanks very very much for your helps. I solved the question. The answer is...... I was never wrong. I ran through tons of test, and I found that the answer were in deed correct, and the outcome is suppose to look that wired. Cause, if you think about it the projection matrix makes x and y smaller while fov is larger(wider view means smaller object), but cause z is the input of a linear function
(_zFar + _zNear) / (_zFar - _zNear) * z + -2 * _zNear * _zFar / (_zFar - _zNear)
so when changing fov the length of a cube's z is never change but the x and y is smaller, that's why it looks weird.
As the hints #Rabbid76 so kindly to remind me, I think cause my game engine is also left handed system, so the matrix is different.
Proof: Cube in unity also looks wired when the fov is 90
And: So as mine
I have generated a 2D Hectogon in my scene view, however I am now confused as to how to make the shape three dimensional. Any help in the maths or method that is used to calculate this would be greatly appreciated. I have only just started with C# and I feel this is a tall order considering the lack of new relevant content on OpenTk in terms of most of the calls used in most tutorials are now obsolete.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Drawing;
using System.IO;
using OpenTK;
using OpenTK.Input;
using OpenTK.Graphics.OpenGL;
using OpenTK.Graphics;
namespace SimpleGame
{
class Game : GameWindow
{
public Game() : base(1280, 720, new GraphicsMode(32, 24, 0, 4)) // screen resilotion
{
}
int pgmID;
int vsID;
int fsID;
int attribute_vcol;
int attribute_vpos;
int uniform_mview;
int vbo_position;
int vbo_color;
int vbo_mview;
int ibo_elements;
Vector3[] vertdata;
Vector3[] coldata;
Matrix4[] mviewdata;
int[] indicedata;
float time = 0.0f;
void initProgram()
{
pgmID = GL.CreateProgram();
loadShader("F:/Year 1/Semester 2/Simulation In Games/SimpleGame/SimpleGame/vs.glsl", ShaderType.VertexShader, pgmID, out vsID);
loadShader("F:/Year 1/Semester 2/Simulation In Games/SimpleGame/SimpleGame/fs.glsl", ShaderType.FragmentShader, pgmID, out fsID);
GL.LinkProgram(pgmID);
Console.WriteLine(GL.GetProgramInfoLog(pgmID));
attribute_vpos = GL.GetAttribLocation(pgmID, "vPosition");
attribute_vcol = GL.GetAttribLocation(pgmID, "vColor");
uniform_mview = GL.GetUniformLocation(pgmID, "modelview");
GL.GenBuffers(1, out vbo_position);
GL.GenBuffers(1, out vbo_color);
GL.GenBuffers(1, out vbo_mview);
GL.GenBuffers(1, out ibo_elements);
}
protected override void OnLoad(EventArgs e)
{
base.OnLoad(e);
initProgram();
vertdata = new Vector3[] {
//new Vector3(0.0f,0.0f,0.0f), // center
//new Vector3(2.0f, 0f,0f), // right hand side
//new Vector3(0f,2f,0f), // up
new Vector3(0.0f,0.0f,-0.8f), // center point
new Vector3(2.0f,0.0f,-0.8f), // right hand side
new Vector3(1.0f,1.7f,-0.8f), // right hand top
new Vector3(-1.0f,1.7f,-0.8f), // right hand top
new Vector3(-2.0f,0.0f,-0.8f), // left hand top
new Vector3(-1.0f,-1.7f,-0.8f),
new Vector3(1.0f,-1.7f,-0.8f), // right hand top
};
indicedata = new int[]{
//front
0, 1, 2,
0, 2, 3,
//back
0, 3, 4,
0, 4, 5,
//left
0, 5, 6,
0, 6, 1,
};
coldata = new Vector3[] { new Vector3(1f, 0f, 0f),
new Vector3( 0f, 0f, 1f),
new Vector3( 0f, 1f, 0f),new Vector3(1f, 0f, 0f),
new Vector3( 0f, 0f, 1f),
new Vector3( 0f, 1f, 0f),new Vector3(1f, 0f, 0f),
new Vector3( 0f, 0f, 1f)};
mviewdata = new Matrix4[]{
Matrix4.Identity
};
Title = "Hello OpenTK!";
GL.ClearColor(Color.DarkTurquoise);
GL.PointSize(5f);
}
void loadShader(String filename, ShaderType type, int program, out int address)
{
address = GL.CreateShader(type);
using (StreamReader sr = new StreamReader(filename))
{
GL.ShaderSource(address, sr.ReadToEnd());
}
GL.CompileShader(address);
GL.AttachShader(program, address);
Console.WriteLine(GL.GetShaderInfoLog(address));
}
protected override void OnRenderFrame(FrameEventArgs e)
{
base.OnRenderFrame(e);
GL.Viewport(0, 0, Width, Height);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.Enable(EnableCap.DepthTest);
GL.EnableVertexAttribArray(attribute_vpos);
GL.EnableVertexAttribArray(attribute_vcol);
GL.DrawElements(BeginMode.Triangles, indicedata.Length, DrawElementsType.UnsignedInt, 0);
GL.DisableVertexAttribArray(attribute_vpos);
GL.DisableVertexAttribArray(attribute_vcol);
GL.Flush();
SwapBuffers();
}
protected override void OnUpdateFrame(FrameEventArgs e)
{
base.OnUpdateFrame(e);
GL.BindBuffer(BufferTarget.ArrayBuffer, vbo_position);
GL.BufferData<Vector3>(BufferTarget.ArrayBuffer, (IntPtr)(vertdata.Length * Vector3.SizeInBytes), vertdata, BufferUsageHint.StaticDraw);
GL.VertexAttribPointer(attribute_vpos, 3, VertexAttribPointerType.Float, false, 0, 0);
GL.BindBuffer(BufferTarget.ArrayBuffer, vbo_color);
GL.BufferData<Vector3>(BufferTarget.ArrayBuffer, (IntPtr)(coldata.Length * Vector3.SizeInBytes), coldata, BufferUsageHint.StaticDraw);
GL.VertexAttribPointer(attribute_vcol, 3, VertexAttribPointerType.Float, true, 0, 0);
time += (float)e.Time;
mviewdata[0] = Matrix4.CreateRotationY(0.2f time) Matrix4.CreateRotationX(0.0f time) Matrix4.CreateTranslation(0.0f, -1.0f, -4.0f) *
Matrix4.CreatePerspectiveFieldOfView(1.3f, ClientSize.Width / (float)ClientSize.Height, 1.0f, 40.0f); // rotation
GL.UniformMatrix4(uniform_mview, false, ref mviewdata[0]);
GL.UseProgram(pgmID);
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, ibo_elements);
GL.BufferData(BufferTarget.ElementArrayBuffer, (IntPtr)(indicedata.Length * sizeof(int)), indicedata, BufferUsageHint.StaticDraw);
}
}
}
I don't think there is a built in method for creating prisms. A prism based on a heptagon (7-sided polygon) is made up from two heptagons (one on the bottom, one on the top) plus 7 vertical 4-sided polygons. So the algorithm for extruding a prism from a horizontal polygon would be (in pseudo code)
create_prism(bottom : polygon, height : float) : body
var top : polygon
top = bottom.Clone()
for all vertices v of top
v.z = v.z + height
end
var b = new body
b.Add(bottom)
b.Add(top)
for i : integer = 0 to bottom.Count - 1
var j : integer
j = (i + 1) modulo bottom.Count
var side = new polygon[4]
side[0] = bottom[i]
side[1] = bottom[j]
side[2] = top[j]
side[3] = top[i]
b.Add(side)
end
return b
end
I'm having a problem with XNA 4.0 and couldn't even google it. It occurs in my main project as well as in my test project which is very plain version to reduce unnecessary code.
I need to use my own custom vertex declaration and use it to draw textured primitives (or why not models too).
Drawing and texturing works fine with BasicEffect and any built-in vertex declarations (like VertexPositionColorTexture)...but what on earth is wrong that textures aren't drawn properly if I use BasicEffect with my custom vertex declaration? I'd love to keep all combinations of built-in types in one VD. My only idea as a fix is that I should make a new vertex/pixel shader but would it help? And if it would, how should I do it?
I tried to upload images to describe but I'd need at least 10 reputation so I'll explain in words:
With my custom VD, textures of my square (and any other shape) object seems to be tiled instead of scaled/fit. Also, textures won't rotate when I rotate the object.
Here's my custom vertex declaration:
namespace WindowsGame2
{
public struct VertexPositionNormalColorTexture : IVertexType
{
public Vector3 Position;
public Vector3 Normal;
public Color Color;
public Vector2 TextureCoordinate;
VertexDeclaration IVertexType.VertexDeclaration
{
get { return VertexDeclaration; }
}
public readonly static VertexDeclaration VertexDeclaration =
new VertexDeclaration(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float) * 3, VertexElementFormat.Vector3, VertexElementUsage.Normal, 0),
new VertexElement((sizeof(float) * 3) * 2, VertexElementFormat.Color, VertexElementUsage.Color, 0),
new VertexElement((sizeof(float) * 3) * 3, VertexElementFormat.Vector2, VertexElementUsage.TextureCoordinate, 0)
);
public VertexPositionNormalColorTexture(Vector3 p)
{
Position = p;
Normal = Vector3.Zero;
Color = Color.White;
TextureCoordinate = Vector2.Zero;
}
public VertexPositionNormalColorTexture(Vector3 p, Color c)
{
Position = p;
Normal = Vector3.Zero;
Color = c;
TextureCoordinate = Vector2.Zero;
}
public VertexPositionNormalColorTexture(Vector3 p, Vector2 t)
{
Position = p;
Normal = Vector3.Zero;
Color = Color.White;
TextureCoordinate = t;
}
public VertexPositionNormalColorTexture(Vector3 p, Color c, Vector2 t)
{
Position = p;
Normal = Vector3.Zero;
Color = c;
TextureCoordinate = t;
}
public VertexPositionNormalColorTexture(Vector3 p, Vector3 n, Color c)
{
Position = p;
Normal = n;
Color = c;
TextureCoordinate = Vector2.Zero;
}
public VertexPositionNormalColorTexture(Vector3 p, Vector3 n, Vector2 t)
{
Position = p;
Normal = n;
Color = Color.White;
TextureCoordinate = t;
}
public VertexPositionNormalColorTexture(Vector3 p, Vector3 n, Color c, Vector2 t)
{
Position = p;
Normal = n;
Color = c;
TextureCoordinate = t;
}
}
}
And the game class:
using System;
using System.Collections.Generic;
using System.Linq;
using Microsoft.Xna.Framework;
using Microsoft.Xna.Framework.Audio;
using Microsoft.Xna.Framework.Content;
using Microsoft.Xna.Framework.GamerServices;
using Microsoft.Xna.Framework.Graphics;
using Microsoft.Xna.Framework.Input;
using Microsoft.Xna.Framework.Media;
namespace WindowsGame2
{
public class Game1 : Microsoft.Xna.Framework.Game
{
GraphicsDeviceManager graphics;
SpriteBatch spriteBatch;
ViewerManager viewer;
List<VertexPositionNormalColorTexture> vertices;
List<short> indices;
Texture2D thumbnail;
VertexBuffer vertexBuf;
IndexBuffer indexBuf;
RasterizerState rasterizerState;
BasicEffect basicEffect;
Matrix worldMatrix;
Matrix viewMatrix;
Matrix projectionMatrix;
public Game1()
{
graphics = new GraphicsDeviceManager(this);
Content.RootDirectory = "Content";
}
protected override void Initialize()
{
viewer = new ViewerManager(graphics, new Vector3(0.0f, 0.0f, 5.0f), new Vector3(0.0f, 0.0f, 0.0f), 500);
vertices = new List<VertexPositionNormalColorTexture>() {
new VertexPositionNormalColorTexture(new Vector3(-1, -1, 0), Color.Yellow, new Vector2(0, 1)),
new VertexPositionNormalColorTexture(new Vector3(-1, 1, 0), Color.Yellow, new Vector2(0, 0)),
new VertexPositionNormalColorTexture(new Vector3(1, 1, 0), Color.Yellow, new Vector2(1, 0)),
new VertexPositionNormalColorTexture(new Vector3(-1, -1, 0), Color.Yellow, new Vector2(0, 1)),
new VertexPositionNormalColorTexture(new Vector3(1, 1, 0), Color.Yellow, new Vector2(1, 0)),
new VertexPositionNormalColorTexture(new Vector3(1, -1, 0), Color.Yellow, new Vector2(1, 1)),
};
indices = new List<short>() {
0, 1, 2, 3, 4, 5
};
basicEffect = new BasicEffect(graphics.GraphicsDevice);
worldMatrix = Matrix.CreateTranslation(0.0f, 0.0f, 0.0f) * Matrix.CreateScale(3);
viewMatrix = Matrix.CreateLookAt(new Vector3(0.0f, 0.0f, 5.0f), new Vector3(0.0f, 0.0f, 0.0f), Vector3.Up);
projectionMatrix = Matrix.CreatePerspectiveFieldOfView(MathHelper.ToRadians(90), graphics.GraphicsDevice.Viewport.AspectRatio, 1f, 50f);
vertexBuf = new VertexBuffer(graphics.GraphicsDevice, VertexPositionNormalColorTexture.VertexDeclaration, 500, BufferUsage.WriteOnly);
indexBuf = new IndexBuffer(graphics.GraphicsDevice, IndexElementSize.SixteenBits, 500, BufferUsage.WriteOnly);
rasterizerState = new RasterizerState();
rasterizerState.CullMode = CullMode.None;
base.Initialize();
}
protected override void LoadContent()
{
spriteBatch = new SpriteBatch(GraphicsDevice);
thumbnail = this.Content.Load<Texture2D>("GameThumbnail");
}
protected override void UnloadContent()
{
this.Content.Unload();
}
protected override void Update(GameTime gameTime)
{
if (Keyboard.GetState().IsKeyDown(Keys.Escape))
this.Exit();
base.Update(gameTime);
}
protected override void Draw(GameTime gameTime)
{
GraphicsDevice.Clear(Color.CornflowerBlue);
graphics.GraphicsDevice.RasterizerState = rasterizerState;
basicEffect.World = worldMatrix;
basicEffect.View = viewMatrix;
basicEffect.Projection = projectionMatrix;
basicEffect.VertexColorEnabled = true;
basicEffect.TextureEnabled = true;
basicEffect.Texture = thumbnail;
vertexBuf.SetData<VertexPositionNormalColorTexture>(vertices.ToArray());
indexBuf.SetData<short>(indices.ToArray());
graphics.GraphicsDevice.SetVertexBuffer(vertexBuf);
graphics.GraphicsDevice.Indices = indexBuf;
foreach (EffectPass pass in basicEffect.CurrentTechnique.Passes)
{
pass.Apply();
graphics.GraphicsDevice.DrawUserIndexedPrimitives(
PrimitiveType.TriangleList,
vertices.ToArray(),
0,
vertices.Count,
indices.ToArray(),
0,
2,
VertexPositionNormalColorTexture.VertexDeclaration);
}
graphics.GraphicsDevice.Indices = null;
graphics.GraphicsDevice.SetVertexBuffer(null);
base.Draw(gameTime);
}
}
}
I found out the problem. I allocated too much memory for a vertex which resulted in weird texturing. It's in this part of the vertex declaration:
new VertexElement((sizeof(float) * 3) * 2, VertexElementFormat.Color, VertexElementUsage.Color, 0),
new VertexElement((sizeof(float) * 3) * 3, VertexElementFormat.Vector2, VertexElementUsage.TextureCoordinate, 0)
Size of type Color isn't actually float/int but byte. Therefore I had to put it like this:
new VertexElement((sizeof(float) * 3) * 2 + 4, VertexElementFormat.Vector2, VertexElementUsage.TextureCoordinate, 0)
I've been working with OpenGL using the OpenTK library for .NET, writing my own engine. I placed 3 different objects, one spinning cube and 2 adjacent cubes. Everything seemed to work fine until I changed the color of the quad on top of the objects.
I'm rendering cubes with a green top, on the left the block on the back is being rendered over the block in the front. I can't seem to find out where I'm going wrong with this, when the camera is set to look from the other side it renders correctly.
The following is the related code in classes with irrelevant or unrelated methods, properties and attributes omitted:
GameState.cs
class GameState : State
{
// TEMP: Test Block
SimpleBlock block;
int i = 0;
public override void Render()
{
base.Render();
// Set OpenGL Settings
GL.Viewport(0, 0, 1024, 768);
GL.Enable(EnableCap.CullFace);
// Reset the Matrices
Matrices.ClearMatrices();
// Set Camera Settings (Field of view in radians)
Matrices.ProjectionMatrix = Matrix4.CreatePerspectiveFieldOfView((float)Math.PI / 2, (1024.0f / 768.0f), 1, 1000);
// Create the Camera
// this has to be in reverse
Matrix4 viewMatrix = Matrix4.CreateRotationX((float)Math.PI/8);
viewMatrix = viewMatrix.Translate(0, -2, -4);
// Multiply it with the ModelView (Which at this point is set to a value that we can just use = and it has the same result)
Matrices.ModelViewMatrix = viewMatrix;
// Render the Block
Matrices.Push();
Matrices.ModelViewMatrix = Matrices.ModelViewMatrix.Translate(2, 0, 0);
Matrices.ModelViewMatrix = Matrices.ModelViewMatrix.Translate(0.5f, 0, 0.5f);
Matrices.ModelViewMatrix = Matrices.ModelViewMatrix.Rotate(0, i / 40.0f, 0);
block.Render();
Matrices.Pop();
// Render the Block Again Twice
Matrices.Push();
Matrices.ModelViewMatrix = Matrices.ModelViewMatrix.Translate(-2, 0, 0);
Matrices.ModelViewMatrix = Matrices.ModelViewMatrix.Translate(0.5f, 0, 0.5f);
block.Render();
Matrices.ModelViewMatrix = Matrices.ModelViewMatrix.Translate(0, 0, -1);
block.Render();
Matrices.Pop();
// Increment Rotation Test Variable
i++;
}
}
SimpleBlock.cs
class SimpleBlock : IBlock
{
public void Render()
{
// Send the Shader Parameters to the GPU
Shader.Bind();
Shader.SendMatrices();
// Begin Rendering the Polys
GL.Begin(BeginMode.Triangles);
// Front Quad
Shader.SetColor(Color4.SaddleBrown);
GL.Normal3(0, 0, 1);
GLUtils.QuadVertices(
new Vector3(-0.5f, 1, 0.5f),
new Vector3(-0.5f, 0, 0.5f),
new Vector3( 0.5f, 1, 0.5f),
new Vector3( 0.5f, 0, 0.5f));
// Right Quad
GL.Normal3(1, 0, 0);
GLUtils.QuadVertices(
new Vector3(0.5f, 1, 0.5f),
new Vector3(0.5f, 0, 0.5f),
new Vector3(0.5f, 1, -0.5f),
new Vector3(0.5f, 0, -0.5f));
// Back Quad
GL.Normal3(0, 0, -1);
GLUtils.QuadVertices(
new Vector3( 0.5f, 1, -0.5f),
new Vector3( 0.5f, 0, -0.5f),
new Vector3(-0.5f, 1, -0.5f),
new Vector3(-0.5f, 0, -0.5f));
// Left Quad
GL.Normal3(-1, 0, 0);
GLUtils.QuadVertices(
new Vector3(-0.5f, 1, -0.5f),
new Vector3(-0.5f, 0, -0.5f),
new Vector3(-0.5f, 1, 0.5f),
new Vector3(-0.5f, 0, 0.5f));
// Bottom Quad
GL.Normal3(0, -1, 0);
GLUtils.QuadVertices(
new Vector3(-0.5f, 0, 0.5f),
new Vector3(-0.5f, 0, -0.5f),
new Vector3( 0.5f, 0, 0.5f),
new Vector3( 0.5f, 0, -0.5f));
// Top Quad
Shader.SetColor(Color4.Green);
GL.Normal3(0, 1, 0);
GLUtils.QuadVertices(
new Vector3(-0.5f, 1, -0.5f),
new Vector3(-0.5f, 1, 0.5f),
new Vector3(0.5f, 1, -0.5f),
new Vector3(0.5f, 1, 0.5f));
// Done!
GL.End();
}
}
BasicFragment.glfs
#version 130
// MultiColor Attribute
in vec4 multiColor;
// Output color
out vec4 gl_FragColor;
void main()
{
// Set fragment
gl_FragColor = multiColor;
}
BasicVertex.glvs
#version 130
// Transformation Matrices
uniform mat4 ProjectionMatrix;
uniform mat4 ModelViewMatrix;
// Vertex Position Attribute
in vec3 VertexPos;
// MultiColor Attributes
in vec4 MultiColor;
out vec4 multiColor;
void main()
{
// Process Colors
multiColor = MultiColor;
// Process Vertex
gl_Position = ProjectionMatrix * ModelViewMatrix * vec4(VertexPos.x, VertexPos.y, VertexPos.z, 1);
}
MainWindow.cs
// Extends OpenTK's GameWindow Class
class MainWindow : GameWindow
{
public MainWindow()
: base(1024, 768, new GraphicsMode(32, 0, 0, 4))
{
this.Title = "Trench Wars";
this.WindowBorder = WindowBorder.Fixed;
this.ClientSize = new Size(1024, 768);
// Set VSync On
this.VSync = VSyncMode.Adaptive;
}
protected override void OnRenderFrame(FrameEventArgs e)
{
base.OnRenderFrame(e);
// Clear Screen
GL.ClearColor(Color4.CornflowerBlue);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
// Do State-Specific Rendering
StateEngine.Render();
// Pull a Wicked Bluffing move in Poker
GL.Flush();
// Swap Buffers
this.SwapBuffers();
}
}
It seems like you did forget to enable depth testing. glEnable(GL_DEPTH_TEST) before rendering the geometry is your friend (or given the language bindings you're using GL.Enable(EnableCap.DepthTest);).