Remove area other than textures from OpenGL control - c# opentk - c#

Using below shader code I can display frames from three cameras to a single openGL control. The starting location of this opengl control should be from center of screen and should start from left end to full screen width. That is width of control is screen width and height is half of screen height. But the problem is there is area other than textures and it appears as ClearColor (which is set as blue color).
if (uv.y > 1.0)
discard;
Can I remove/delete this extra area from GLControl?
int y = Screen.PrimaryScreen.Bounds.Height - this.PreferredSize.Height;
glControl1.Location = new Point(0, y/2);
private void OpenGL_SizeChanged(object sender, EventArgs e)
{
glControl1.Width = this.Width;
glControl1.Height = this.Height/2;
}
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Nearest);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapS, (int)TextureWrapMode.ClampToBorder);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapT, (int)TextureWrapMode.ClampToBorder);
private void CreateShaders()
{
/***********Vert Shader********************/
vertShader = GL.CreateShader(ShaderType.VertexShader);
GL.ShaderSource(vertShader, #"attribute vec3 a_position;
varying vec2 vTexCoordIn;
//uniform float aspect;
void main() {
vTexCoordIn=( a_position.xy+1)/2;
gl_Position = vec4(a_position,1);
}");
GL.CompileShader(vertShader);
/***********Frag Shader ****************/
fragShader = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(fragShader, #"
uniform sampler2D sTexture;
uniform sampler2D sTexture1;
uniform sampler2D sTexture2;
uniform vec2 sTexSize;
uniform vec2 sTexSize1;
uniform vec2 sTexSize2;
varying vec2 vTexCoordIn;
void main ()
{
vec2 vTexCoord=vec2(vTexCoordIn.x,vTexCoordIn.y);
if ( vTexCoord.x < 1.0/3.0 )
{
vec2 uv = vec2(vTexCoord.x * 3.0, vTexCoord.y);
uv.y *= sTexSize.x / sTexSize.y;
if (uv.y > 1.0)
discard;
else
gl_FragColor = texture2D(sTexture, uv);
}
else if ( vTexCoord.x >= 1.0/3.0 && vTexCoord.x < 2.0/3.0 )
{
vec2 uv = vec2(vTexCoord.x * 3.0 - 1.0, vTexCoord.y);
uv.y *= sTexSize1.x / sTexSize1.y;
if (uv.y > 1.0)
discard;
else
gl_FragColor = texture2D(sTexture1, uv);
}
else if ( vTexCoord.x >= 2.0/3.0 )
{
vec2 uv = vec2(vTexCoord.x * 3.0 - 2.0, vTexCoord.y);
uv.y *= sTexSize2.x / sTexSize2.y;
if (uv.y > 1.0)
discard;
else
gl_FragColor = texture2D(sTexture2, uv);
}
}");
GL.CompileShader(fragShader);
}

I think you should do the resizing on the CPU side instead of shaders ...
resize all frames to common height
sum the resized frames widths
rescale all frames so the summed width is equal to your window/desktop width
resize window height to match new common height after #3
Looks like you are doing #1,#2,#3 inside your shaders bud ignoring #4 resulting in that empty area. You can not match both x and y size of window without breaking aspect ratio of the camera images. So leave x size and change the y size of the window to remedy your problem.
Here small VCL/C++/legacy GL example (sorry I do not code in C# and too lazy to code the new stuff for this):
//---------------------------------------------------------------------------
#include <vcl.h>
#include <gl\gl.h>
#pragma hdrstop
#include "Unit1.h"
//---------------------------------------------------------------------------
#pragma package(smart_init)
#pragma resource "*.dfm"
TForm1 *Form1; // VCL Application window object
int xs,ys; // window resolution
HDC hdc=NULL; // device context for GL
HGLRC hrc=NULL; // rendering context for GL
//---------------------------------------------------------------------------
const int camera_res[]= // (x,y) resolutions of each camera
{
320,200,
640,480,
352,288,
0,0
};
float frame_pos[128]; // (x0,x1) position of each frame
void frame_resize(int xs,int &ys) // position/resize frames and change ys to common height so xs fit without breaking aspect ratio
{
int i,j;
float dx,dy,x,y;
// comon height placemet
for (x=0.0,i=0,j=0;camera_res[i];)
{
dx=camera_res[i]; i++;
dy=camera_res[i]; i++;
dx*=1000.0/dy; // any non zero common height for example 1000
frame_pos[j]=x; x+=dx; j++;
frame_pos[j]=x-1.0; j++;
}
frame_pos[j]=-1.0; j++;
frame_pos[j]=-1.0; j++;
// rescale summed width x to match xs
x=float(xs)/x; // scale
ys=float(1000.0*x); // common height
for (j=0;frame_pos[j]>-0.1;)
{
frame_pos[j]*=x; j++;
frame_pos[j]*=x; j++;
}
}
//---------------------------------------------------------------------------
void gl_draw()
{
if (hrc==NULL) return;
glClearColor(0.0,0.0,0.0,0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
// view in pixel units
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glTranslatef(-1.0,+1.0,0.0);
glScalef(2.0/float(xs),-2.0/float(ys),1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// render rectangle
int i; float x0,y0,x1,y1;
y0=0.0; y1=ys-1;
glColor3f(1.0,1.0,1.0);
for (i=0;frame_pos[i]>-0.1;)
{
x0=frame_pos[i]; i++;
x1=frame_pos[i]; i++;
// here bind (i/2) camera frame as texture (only one texture at a time)
glBegin(GL_LINE_LOOP);
glTexCoord2f(0.0,0.0); glVertex2f(x0,y0);
glTexCoord2f(0.0,1.0); glVertex2f(x0,y1);
glTexCoord2f(1.0,1.0); glVertex2f(x1,y1);
glTexCoord2f(1.0,0.0); glVertex2f(x1,y0);
glEnd();
}
glFlush();
SwapBuffers(hdc);
}
//---------------------------------------------------------------------------
__fastcall TForm1::TForm1(TComponent* Owner):TForm(Owner)
{
// desktop You can hardcode xs,ys instead
TCanvas *scr=new TCanvas();
scr->Handle=GetDC(NULL);
xs=scr->ClipRect.Width(); // desktop width
ys=scr->ClipRect.Height(); // desktop height
delete scr;
// window This is important
int ys0=ys; // remember original height
frame_resize(xs,ys); // compute sizes and placements
SetBounds(0,(ys0-ys)>>1,xs,ys); // resize window and place in the center of screen
// GL init most likely you can ignore this you already got GL
hdc = GetDC(Handle); // get device context for this App window
PIXELFORMATDESCRIPTOR pfd;
ZeroMemory( &pfd, sizeof( pfd ) ); // set the pixel format for the DC
pfd.nSize = sizeof( pfd );
pfd.nVersion = 1;
pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;
pfd.iPixelType = PFD_TYPE_RGBA;
pfd.cColorBits = 24;
pfd.cDepthBits = 24;
pfd.iLayerType = PFD_MAIN_PLANE;
SetPixelFormat(hdc,ChoosePixelFormat(hdc, &pfd),&pfd);
hrc = wglCreateContext(hdc); // create current rendering context
if(hrc == NULL)
{
ShowMessage("Could not initialize OpenGL Rendering context !!!");
Application->Terminate();
}
if(wglMakeCurrent(hdc, hrc) == false)
{
ShowMessage("Could not make current OpenGL Rendering context !!!");
wglDeleteContext(hrc); // destroy rendering context
Application->Terminate();
}
// resize GL framebufers this is important
glViewport(0,0,xs,ys);
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormDestroy(TObject *Sender)
{
// GL exit most likely you can ignore this
wglMakeCurrent(NULL, NULL); // release current rendering context
wglDeleteContext(hrc); // destroy rendering context
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormPaint(TObject *Sender)
{
gl_draw();
}
//---------------------------------------------------------------------------
void __fastcall TForm1::FormKeyDown(TObject *Sender, WORD &Key,TShiftState Shift)
{
if (Key==27) Close(); // Escape exits app
}
//---------------------------------------------------------------------------
Ignore the VCL stuff the only important thing here is the frame_resize function and its use. The gl_draw just renders rectangles instead of your frames so to remedy just bind texture and use GL_QUAD instead of GL_LINE_LOOP. Or port it to the new GL stuff so VBO/VAO ...
I encoded it so it support any number of cameras above 0 ... just be sure the frame_pos array is big enough (2 entries per camera).
As you can see no shaders are need. Of coarse in the new GL style you need shaders so in them just copy texel from texture to fragment ...

Related

Panning in SharpGL distorts image

I have this code in SharpGL where zoom and pan were implemented. This is de draw method:
private void OpenGLDraw(object sender, SharpGL.WPF.OpenGLRoutedEventArgs args)
{
// Enable finish button
if (openFileOk && dispCBOk) { finish_btn.IsEnabled = true; }
else { finish_btn.IsEnabled = false; }
// Get the OpenGL object
OpenGL gl = GLControl.OpenGL;
// Set The Material Color
float[] glfMaterialColor = new float[] { 0.4f, 0.2f, 0.8f, 1.0f };
float w = (float)GLControl.ActualWidth;
float h = (float)GLControl.ActualHeight;
// Perspective projection
gl.Viewport(0, 0, (int)w, (int)h);
gl.Ortho(0, w, 0, h, -100, 100);
// Clear The Screen And The Depth Buffer
gl.Clear(OpenGL.GL_COLOR_BUFFER_BIT | OpenGL.GL_DEPTH_BUFFER_BIT);
gl.Material(OpenGL.GL_FRONT_AND_BACK, OpenGL.GL_AMBIENT_AND_DIFFUSE, glfMaterialColor);
gl.MatrixMode(MatrixMode.Modelview);
gl.LoadIdentity();
gl.Scale(scale,scale,1);
// Camera Position
gl.LookAt(_position[0], _position[1], _position[2],
_viewPoint[0], _viewPoint[1], _viewPoint[2],
_upVector[0], _upVector[1], _upVector[2]);
// Draw the helix
float[][] points = vertexes.ToArray();
Helix(points.Length, points);
}
What I have here is a perspective projection using Viewport (the ortho projection and scale are used for zooming).
This is the code for panning whatever is drawn on the screen:
// pan
// y axis
// Up
if (keyCode == 1)
{
_viewPoint[1] += _moveDistance;
_position[1] += _moveDistance;
}
// Down
else if (keyCode == 2)
{
_viewPoint[1] += -_moveDistance;
_position[1] += -_moveDistance;
}
// x axis
// Left
else if (keyCode == 3)
{
_viewPoint[2] += _moveDistance;
_position[2] += _moveDistance;
}
// Right
else if (keyCode == 4)
{
_viewPoint[2] += -_moveDistance;
_position[2] += -_moveDistance;
}
The thing is that when I move too much to any one of the sides (up, down, left, right), the drawing gets really distorted. I didn't notice it at first because I was working with the image very zoomed in and couldn't see the difference, but now I see that the distortion always happens, it's just more intense the more you move away from the image.
All I want to know is why this happens, and if possible, a solution for it, thank you.
Here is an example of what happens:
Normal view
Distorted view

Crop the top and bottom part of texture using opentk

I have a slider which have value ranges from zero to one. Now using the value of this slider , I want to crop the image from bottom to half of image And from top to half of image. I have done the first one (bottom crop) by resizing the height of GLControl. Not sure that this is the proper way to achieve this. But it is working fine. I have no idea for second option (cropping from top to half of image). Please help to get it.
Attached is the outputs that I got when performing bottom crop with values 0,0.4,1.0 respectively.
int FramereHeight = (glControl2.Height / 2) / 10; // crop the middle camera upto half of it's height
if (NumericUpdownMiddleBottomCropVal != 0.0)//value ranges from zero to one
{
glControl2.Height = glControl2.Height - Convert.ToInt32(NumericUpdownMiddleBottomCropVal * 10 * FramereHeight);
}
public void CreateShaders()
{
/***********Vert Shader********************/
vertShader = GL.CreateShader(ShaderType.VertexShader);
GL.ShaderSource(vertShader, #"attribute vec3 a_position;
varying vec2 vTexCoord;
void main() {
vTexCoord = (a_position.xy+1)/2 ;
gl_Position =vec4(a_position, 1);
}");
GL.CompileShader(vertShader);
/***********Frag Shader ****************/
fragShader = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(fragShader, #"precision highp float;
uniform sampler2D sTexture_2;varying vec2 vTexCoord;
uniform float sSelectedCropVal;
uniform float sSelectedTopCropVal;uniform float sSelectedBottomCropVal;
void main ()
{
if (abs(vTexCoord.y-0.5) * 2.0 > 1.0 - 0.5*sSelectedCropVal)
discard;
vec4 color = texture2D (sTexture_2, vec2(vTexCoord.x, vTexCoord.y));
gl_FragColor =color;
}"); GL.CompileShader(fragShader);
}
I assume that sSelectedCropVal is in range [0.0, 1.0].
You can discard the fragments depending on the v corodiante:
if ((0.5 - vTexCoord.y) * 2.0 > 1.0-sSelectedCropVal)
discard;
if ((vTexCoord.y - 0.5) * 2.0 > 1.0-sSelectedBottomCropVal)
discard;
Complete shader:
precision highp float;
uniform sampler2D sTexture_2;varying vec2 vTexCoord;
uniform float sSelectedCropVal;
uniform float sSelectedTopCropVal;uniform float sSelectedBottomCropVal;
void main ()
{
if ((0.5 - vTexCoord.y) * 2.0 > 1.0-sSelectedCropVal)
discard;
if ((vTexCoord.y - 0.5) * 2.0 > 1.0-sSelectedBottomCropVal)
discard;
vec4 color = texture2D(sTexture_2, vTexCoord.xy);
gl_FragColor = color;
}

Display three textures equally on openGL control - OpenTK C#

I have three textures which should display on an opengl control in a way that those three should equally be in it. Means texture1 should be in 0 to 0.33 of glcontrol. And texture2 should be in 0.33 to 0.66 And texture3 in remain place. I have done like below. But right portion of middle image get a blurred area. Please help to correct
private void CreateShaders()
{
/***********Vert Shader********************/
vertShader = GL.CreateShader(ShaderType.VertexShader);
GL.ShaderSource(vertShader, #"attribute vec3 a_position;
varying vec2 vTexCoordIn;
void main() {
vTexCoordIn=( a_position.xy+1)/2 ;
gl_Position = vec4(a_position,1);
}");
GL.CompileShader(vertShader);
/***********Frag Shader ****************/
fragShader = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(fragShader, #"
uniform sampler2D sTexture1;
uniform sampler2D sTexture2;
uniform sampler2D sTexture3;
varying vec2 vTexCoordIn;
void main ()
{
vec2 vTexCoord=vec2(vTexCoordIn.x,vTexCoordIn.y);
if ( vTexCoord.x<0.3 )
gl_FragColor = texture2D (sTexture1, vec2(vTexCoord.x*2.0, vTexCoord.y));
else if ( vTexCoord.x>=0.3 && vTexCoord.x<=0.6 )
gl_FragColor = texture2D (sTexture2, vec2(vTexCoord.x*2.0, vTexCoord.y));
else
gl_FragColor = texture2D (sTexture3, vec2(vTexCoord.x*2.0, vTexCoord.y));
}");
GL.CompileShader(fragShader);
}
Means texture1 should be in 0 to 0.33 of glcontrol. And texture2 should be in 0.33 to 0.66 And texture3 in remain place.
If the texture coordinates are in range [0, 0.33] then sTexture1 has to be drawn and the texture coordinates have to be mapped from [0, 0.33] to [0, 1]:
if ( vTexCoord.x < 1.0/3.0 )
gl_FragColor = texture2D(sTexture1, vec2(vTexCoord.x * 3.0, vTexCoord.y));
If the texture coordinates are in range [0.33, 0.66] then sTexture2 has to be drawn and the texture coordinates have to be mapped from [0.33, 0.66] to [0, 1]:
else if ( vTexCoord.x >= 1.0/3.0 && vTexCoord.x < 2.0/3.0 )
gl_FragColor = texture2D(sTexture2, vec2(vTexCoord.x * 3.0 - 1.0, vTexCoord.y));
f the texture coordinates are in range [0.66, 1] then sTexture3 has to be drawn and the texture coordinates have to be mapped from [0.66, 1] to [0, 1]:
else if ( vTexCoord.x >= 2.0/3.0 )
gl_FragColor = texture2D(sTexture2, vec2(vTexCoord.x * 3.0 - 2.0, vTexCoord.y));
gl_FragColor = texture2D (sTexture3, vec2(vTexCoord.x*2.0, vTexCoord.y));
^^^^
Multiplying x-coordinate by 2.0 leads to values exceeding 1.0 if x >= 0.5. If your sampler is set up to CLAMP_TO_EDGE (which seems to be the case), this results in repeatedly sampling the same texel on the edge of the texture (which will appear as smearing / blurring as you mentioned).

Shader that wraps around as it offsets a texture

I'd like to create a tiled shader that wraps back onto itself as it offsets a transparent texture over time. The result will make a "moving walkway/travellator" across a mesh using a texture representing a single step.
This is what I have so far. But it doesn't wrap around (without using external dependencies as setting the texture's wrap mode to repeat)
Shader "Custom/ScrollingTextureUnlitShader"
{
Properties
{
_FadeValue ("Fade Value", Range(0, 1)) = 1
_ColorTint ("Color Tint", Color) = (1,1,1,1)
_MainTex ("Base (RGB)", 2D) = "white" {}
_ScrollXSpeed ("X Scroll Speed", Range(-10, 10)) = -5
_ScrollYSpeed ("Y Scroll Speed", Range(-10, 10)) = 0
}
SubShader
{
Tags { "RenderType"="Transparent" "Queue"="Transparent" }
LOD 200
CGPROGRAM
#pragma surface surf Unlit alpha
float _FadeValue;
float4 _ColorTint;
sampler2D _MainTex;
fixed _ScrollXSpeed;
fixed _ScrollYSpeed;
struct Input
{
float2 uv_MainTex;
};
void surf (Input IN, inout SurfaceOutput o)
{
fixed2 scrolledUV = IN.uv_MainTex;
fixed xScrollValue = _ScrollXSpeed * _Time;
fixed yScrollValue = _ScrollYSpeed * _Time;
scrolledUV += fixed2 (xScrollValue, yScrollValue);
half4 c = tex2D (_MainTex, scrolledUV);
o.Albedo = c.rgb * _ColorTint;
o.Alpha = c.a * _FadeValue;
}
inline fixed4 LightingUnlit (SurfaceOutput s, fixed3 lightDir, fixed3 viewDir, fixed atten)
{
fixed4 c;
c.rgb = s.Albedo;
c.a = s.Alpha;
return c;
}
ENDCG
}
FallBack "Transparent/Diffuse"
}

Modelview Difference between GLSL and HLSL?

I'm wondering if there is a difference between GLSL and HLSL Mathematics.
I'm using a selfmade Engine which works with openTK fine. My SharpDx Implementation gets everyday a bit further.
I'm currently working on the ModelviewProjection Matrix.
To see if it works I use a simple project which works fine with OpenTK.
So I changed the Shader code from GLSL to HLSL because the rest of the program uses the engine functions. The programm did not work I couldn't see the geometry, so I changed the Modelview Matrix and the Projections Matrix to the Identity Matrix. Aftwards it worked I saw the geometry. So I changed a bit of the GLSL becuase I wanted a similar GLSL code to the HLSL and I also changed the Matrixes to the identy too. Afterwards I did not see anything it didn't work.... So I'm stuck... Any of you have an Idea?
Anyways long story short
My HLSL Shader Code
public string Vs = #"cbuffer Variables : register(b0){
float4 Testfarbe;
float4x4 FUSEE_MVP;
}
struct VS_IN
{
float4 pos : POSITION;
float4 tex : TEXCOORD;
float4 normal : NORMAL;
};
struct PS_IN
{
float4 pos : SV_POSITION;
float4 col : COLOR;
float4 tex : TEXCOORD;
float4 normal : NORMAL;
};
PS_IN VS( VS_IN input )
{
PS_IN output = (PS_IN)0;
input.pos.w = 1.0f;
output.pos = mul(input.pos,FUSEE_MVP);
output.col = Testfarbe;
/*output.col = FUSEE_MV._m00_m01_m02_m03;*/
/* output.normal = input.normal;
output.tex = input.tex;*/
/* if (FUSEE_MV._m00 == 4.0f)
output.col = float4(1,0,0,1);
else
output.col = float4(0,0,1,1);*/
return output;
}
";
string Ps = #"
SamplerState pictureSampler;
Texture2D imageFG;
struct PS_IN
{
float4 pos : SV_POSITION;
float4 col : COLOR;
float4 tex : TEXCOORD;
float4 normal : NORMAL;
};
float4 PS( PS_IN input ) : SV_Target
{
return input.col;
/*return imageFG.Sample(pictureSampler,input.tex);*/
}";
So I changed my old working OpenTk project to see where the difference ist between openTK and SharpDx relating to the math calculations.
The HLSL code
public string Vs = #"
/* Copies incoming vertex color without change.
* Applies the transformation matrix to vertex position.
*/
attribute vec4 fuColor;
attribute vec3 fuVertex;
attribute vec3 fuNormal;
attribute vec2 fuUV;
varying vec4 vColor;
varying vec3 vNormal;
varying vec2 vUV;
uniform mat4 FUSEE_MVP;
uniform mat4 FUSEE_ITMV;
void main()
{
gl_Position = FUSEE_MVP * vec4(fuVertex, 1.0);
/*vNormal = mat3(FUSEE_ITMV[0].xyz, FUSEE_ITMV[1].xyz, FUSEE_ITMV[2].xyz) * fuNormal;*/
vUV = fuUV;
}";
public string Ps = #"
/* Copies incoming fragment color without change. */
#ifdef GL_ES
precision highp float;
#endif
uniform vec4 vColor;
varying vec3 vNormal;
void main()
{
gl_FragColor = vColor * dot(vNormal, vec3(0, 0, 1));
}";
In the main code itself I only read an Obj file and set the Identity matrix
public override void Init()
{
Mesh = MeshReader.LoadMesh(#"Assets/Teapot.obj.model");
//ShaderProgram sp = RC.CreateShader(Vs, Ps);
sp = RC.CreateShader(Vs, Ps);
_vTextureParam = sp.GetShaderParam("Testfarbe");//vColor
}
public override void RenderAFrame()
{
...
var mtxRot = float4x4.CreateRotationY(_angleHorz) * float4x4.CreateRotationX(_angleVert);
var mtxCam = float4x4.LookAt(0, 200, 500, 0, 0, 0, 0, 1, 0);
// first mesh
RC.ModelView = float4x4.CreateTranslation(0, -50, 0) * mtxRot * float4x4.CreateTranslation(-150, 0, 0) * mtxCam;
RC.SetShader(sp);
//mapping
RC.SetShaderParam(_vTextureParam, new float4(0.0f, 1.0f, 0.0f, 1.0f));
RC.Render(Mesh);
Present();
}
public override void Resize()
{
RC.Viewport(0, 0, Width, Height);
float aspectRatio = Width / (float)Height;
RC.Projection = float4x4.CreatePerspectiveFieldOfView(MathHelper.PiOver4, aspectRatio, 1, 5000);
}
The both programs side by side
What I should also add is as soon as the values of my ModelView identity are bigger than 1.5 I don't see anything in my window ? anyone knows that might causing this?
I edited the Post and the Image so you see a bigger difference.
I had earlier in this post the identity Matrix. If I use the Identity Matrix with this Obj-File
v 0.0 0.5 0.5
v 0.5 0.0 0.5
v -0.5 0.0 0.5
vt 1 0 0
vt 0 1 0
vt 0 0 0
f 1/2 2/3 3/1
I saw in my SharpDX project the triangle and in my openTK not. But I tink the Teapot thing is a bit better to show the difference within the to project where only the Shadercode is different! I mean I could've done something wrong in the SharpDX Implementation for this Enginge but lets assume their is everything right. At least I hope so if you guys tell my the ShaderCode is just wrong ;)
I hope I could describe my problem clear so you understand it.
OK So you've to Transpose the Matrix...

Categories