I'm trying to implement the resizing of render target when window/control is resized.
However when doing so it is not working as expected (maybe cause i'm not doing it correctly) as the rendered texture is not filling my entire render target view.
Now, when ever the window is resized, i reset my render target view and any other render target (texture) [Please see code below]
this.ImgSource.SetRenderTargetDX11(null);
Disposer.SafeDispose(ref this.m_RenderTargetView);
Disposer.SafeDispose(ref this.m_d11Factory);
Disposer.SafeDispose(ref this.RenderTarget);
int width = (int)sizeInfo.Width;
int height = (int)sizeInfo.Height;
Texture2DDescription colordesc = new Texture2DDescription
{
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
Format = PIXEL_FORMAT,
Width = width,
Height = height,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Default,
OptionFlags = ResourceOptionFlags.Shared,
CpuAccessFlags = CpuAccessFlags.None,
ArraySize = 1
};
this.RenderTarget = new Texture2D(this.Device, colordesc);
m_RenderTargetView = new RenderTargetView(this.Device, this.RenderTarget);
m_depthStencil = CreateTexture2D(this.Device, width, height, BindFlags.DepthStencil, Format.D24_UNorm_S8_UInt);
m_depthStencilView = new DepthStencilView(this.Device, m_depthStencil);
Device.ImmediateContext.Rasterizer.SetViewport(0, 0, width, height, 0.0f, 1.0f);
Device.ImmediateContext.OutputMerger.SetTargets(m_depthStencilView, m_RenderTargetView);
SetShaderAndVertices(sizeInfo);
SetShaderAndVertices Method: (sizeInfo is the size of he render target)
protected void SetShaderAndVertices(Size rendersize)
{
var device = this.Device;
var context = device.ImmediateContext;
ShaderBytecode shaderCode = GetShaderByteCode(eEffectType.Texture);
layout = new InputLayout(device, shaderCode, new[] {
new InputElement("SV_Position", 0, Format.R32G32B32A32_Float, 0, 0),
new InputElement("TEXCOORD", 0, Format.R32G32_Float, 32, 0),
});
// Write vertex data to a datastream
var stream = new DataStream(Utilities.SizeOf<VertexPositionTexture>() * 4, true, true);
stream.WriteRange(new[]
{
new VertexPositionTexture(
new Vector4(-1, 1, 0.0f, 1.0f), // position top-left
new Vector2(0f,0f)
),
new VertexPositionTexture(
new Vector4(1, 1, 0.0f, 1.0f), // position top-right
new Vector2(1,0)
),
new VertexPositionTexture(
new Vector4(-1, -1, 0.0f, 1.0f), // position bottom-left
new Vector2(0,1)
),
new VertexPositionTexture(
new Vector4(1, -1, 0.0f, 1.0f), // position bottom-right
new Vector2(1,1)
),
});
stream.Position = 0;
// Instantiate VertexPositionTexture buffer from vertex data
//
vertices = new SharpDX.Direct3D11.Buffer(device, stream, new BufferDescription()
{
BindFlags = BindFlags.VertexBuffer,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None,
SizeInBytes = Utilities.SizeOf<VertexPositionTexture>() * 4,
Usage = ResourceUsage.Default,
StructureByteStride = 0
});
stream.Dispose();
// Prepare All the stages
// for primitive topology https://msdn.microsoft.com/en-us/library/bb196414.aspx#ID4E2BAC
context.InputAssembler.InputLayout = (layout);
context.InputAssembler.PrimitiveTopology = (PrimitiveTopology.TriangleStrip);
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(vertices, Utilities.SizeOf<VertexPositionTexture>(), 0));
context.OutputMerger.SetTargets(m_RenderTargetView);
}
Shader File:
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
cbuffer PerObject: register(b0)
{
float4x4 WorldViewProj;
};
// ------------------------------------------------------
// A shader that accepts Position and Texture
// ------------------------------------------------------
struct VertexShaderInput
{
float4 Position : SV_Position;
float2 TextureUV : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : SV_Position;
float2 TextureUV : TEXCOORD0;
};
VertexShaderOutput VSMain(VertexShaderInput input)
{
VertexShaderOutput output = (VertexShaderOutput)0;
output.Position = input.Position;
output.TextureUV = input.TextureUV;
return output;
}
float4 PSMain(VertexShaderOutput input) : SV_Target
{
return ShaderTexture.Sample(Sampler, input.TextureUV);
}
// ------------------------------------------------------
// A shader that accepts Position and Color
// ------------------------------------------------------
struct ColorVS_IN
{
float4 pos : SV_Position;
float4 col : COLOR;
};
struct ColorPS_IN
{
float4 pos : SV_Position;
float4 col : COLOR;
};
ColorPS_IN ColorVS(ColorVS_IN input)
{
ColorPS_IN output = (ColorPS_IN)0;
output.pos = input.pos;
output.col = input.col;
return output;
}
float4 ColorPS(ColorPS_IN input) : SV_Target
{
return input.col;
}
// ------------------------------------------------------
// Techniques
// ------------------------------------------------------
technique11 Color
{
pass P0
{
SetGeometryShader(0);
SetVertexShader(CompileShader(vs_5_0, ColorVS()));
SetPixelShader(CompileShader(ps_5_0, ColorPS()));
}
}
technique11 TextureLayer
{
pass P0
{
SetGeometryShader(0);
SetVertexShader(CompileShader(vs_5_0, VSMain()));
SetPixelShader(CompileShader(ps_5_0, PSMain()));
}
}
I would like to be able to either stretch the image or not thus maintaining the aspect ratio based on my requirement.
Also my texture data is updated from another thread via mapping the bitmap data to my render target.
Note: Texture fills the entire render target view if the mapped image is of same size as my rendered target view.
Please see screen dumps below:
Screenshots:
1st Screen:
Size for 1st screen dump image:
---- Display Image size (835, 626) on render target of Size(720, 576)
2nd Screen:
Size for 2nd screen dump image:
Display Image size (899, 674) on render target of Size(899, 676)
Any more information then do let me know and I will happily provide.
Thanks.
P.S:
I have also posted this question in another forum but had no luck hence posting here hoping someone would be able to direct me in the correct direction.
Using C# , SharpDx with Directx11 and D3DImage && not using Swapchains
Also see below is the code used to map texture data:
Device.ImmediateContext.ClearRenderTargetView(this.m_RenderTargetView, Color4.Black);
Texture2DDescription colordesc = new Texture2DDescription
{
BindFlags = BindFlags.ShaderResource,
Format = PIXEL_FORMAT,
Width = iWidthOfImage,
Height = iHeightOfImage,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Dynamic,
OptionFlags = ResourceOptionFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
ArraySize = 1
};
Texture2D newFrameTexture = new Texture2D(this.Device, colordesc);
DataStream dtStream = null;
DataBox dBox = Device.ImmediateContext.MapSubresource(newFrameTexture, 0, MapMode.WriteDiscard, 0, out dtStream);
if (dtStream != null)
{
int iRowPitch = dBox.RowPitch;
for (int iHeightIndex = 0; iHeightIndex < iHeightOfImage; iHeightIndex++)
{
//Copy the image bytes to Texture
// we write row strides multiplies by bytes per pixel
// as our case is bgra32 which is 4 bytes
dtStream.Position = iHeightIndex * iRowPitch;
Marshal.Copy(decodedData, iHeightIndex * iWidthOfImage * 4, new IntPtr(dtStream.DataPointer.ToInt64() + iHeightIndex * iRowPitch), iWidthOfImage * 4);
}
}
Device.ImmediateContext.UnmapSubresource(newFrameTexture, 0);
Device.ImmediateContext.CopySubresourceRegion(newFrameTexture, 0, null, this.RenderTarget, 0);
After resizing, your new window's aspect ratio doesn't match your texture, so it can't cover the entire client area of the window. You'll have to enlarge the textured geometry so that it covers the entire client area when projected onto the camera plane. This will necessarily mean that a portion of the texture is clipped.
Basically, this is the same problem when watching 4:3 aspect ratio TV programming on a 16:9 monitor. You can either zoom it to fill the screen, whereby the 4:3 content is cropped or stretched, or you can letter box the 4:3 content with black on either side of the content.
Related
I am capturing InkStrokes and have a need to create a scaled bitmap image of the strokes in the background. The captured images need to be of uniform size regardless of how big the bounding box of the ink.
For example, if original ink stroke is drawn and the bounding box top/left is 100,100 and size is 200,200 on the ink canvas, I want the ink to start at 0,0 of the new rendered bitmap that is 50,50 size (ignore impact of stroke width right now).
I have figured out how to scale the ink strokes (thanks StackOverflow) but not how to move the strokes. Right now, it seems I have to create a bitmap the size of the InkCanvas, render the scaled ink, then crop bigger image to the correct size.
I've tried using the InkStroke.PointTranslate via
var scaleMatrix = Matrix3x2.CreateScale(scale);
scaleMatrix.Translation = -offset; // top/left of ink stroke bounding box
stroke.PointTransform = scaleMatrix;
But the coordinates do not come out correct.
Any help much appreciated.
You can combine transformations by multiplying matrices. This works for me
var strokes = inkCanvas.InkPresenter.StrokeContainer.GetStrokes();
var boundingBox = inkCanvas.InkPresenter.StrokeContainer.BoundingRect;
var matrix1 = Matrix3x2.CreateTranslation((float)-boundingBox.X, (float)-boundingBox.Y);
var matrix2 = Matrix3x2.CreateScale(0.5f);
var builder = new InkStrokeBuilder();
var newStrokeList = new List<InkStroke>();
foreach (var stroke in strokes)
{
newStrokeList.Add(builder.CreateStrokeFromInkPoints
(stroke.GetInkPoints(), matrix1 * matrix2));
}
//Add the translated and scaled strokes to the inkcanvas
inkCanvas.InkPresenter.StrokeContainer.AddStrokes(newStrokeList);
Maybe I was still doing something wrong, but it appears you cannot use InkStrokeBuilder.CreateStrokeFromInkPoints with more than one kind of transform. I tried all kinds of combinations/approaches, and just could not get it to work.
Here is my solution...
private static IList<InkStroke> GetScaledAndTransformedStrokes(IList<InkStroke> strokeList, float scale)
{
var builder = new InkStrokeBuilder();
var newStrokeList = new List<InkStroke>();
var boundingBox = strokeList.GetBoundingBox();
foreach (var singleStroke in strokeList)
{
var translateMatrix = new Matrix(1, 0, 0, 1, -boundingBox.X, -boundingBox.Y);
var newInkPoints = new List<InkPoint>();
var originalInkPoints = singleStroke.GetInkPoints();
foreach (var point in originalInkPoints)
{
var newPosition = translateMatrix.Transform(point.Position);
var newInkPoint = new InkPoint(newPosition, point.Pressure, point.TiltX, point.TiltY, point.Timestamp);
newInkPoints.Add(newInkPoint);
}
var newStroke = builder.CreateStrokeFromInkPoints(newInkPoints, new Matrix3x2(scale, 0, 0, scale, 0, 0));
newStrokeList.Add(newStroke);
}
return newStrokeList;
}
I ended up having to apply my own translate transform then use the builder.CreateStrokeFromInkPoints with a scale matrix applied to get the results I wanted. GetBoundingBox is my own extension:
public static class RectExtensions
{
public static Rect CombineWith(this Rect r, Rect rect)
{
var top = (r.Top < rect.Top) ? r.Top : rect.Top;
var left = (r.Left < rect.Left) ? r.Left : rect.Left;
var bottom = (r.Bottom < rect.Bottom) ? rect.Bottom : r.Bottom;
var right = (r.Right < rect.Right) ? rect.Right : r.Right;
var newRect = new Rect(new Point(left, top), new Point(right, bottom));
return newRect;
}
}
Short Version of Problem
I am trying to access the contents of a RenderTexture in Unity which I have been drawing with an own Material using Graphics.Blit.
Graphics.Blit (null, renderTexture, material);
My material converts some yuv image to rgb successfully, which I have tested by assigning it to the texture of an UI element. The result is the correct RGB image visible on the screen.
However, I also need the raw data for a QR code scanner. I am doing this like I would access it from a camera as explained here. In a comment down there, it was mentioned that the extraction is also possible from a RenderTexture that was filled with Graphics.Blit. But when I am trying to that, my texture only contains the value 205 everywhere. This is the code I am using in the Update function, directly after the Graphics.Blit call:
RenderTexture.active = renderTexture;
texture.ReadPixels (new Rect (0, 0, width, height), 0, 0);
texture.Apply ();
RenderTexture.active = null;
When assigning this texture to the same UI element, it is gray and slightly transparent. When viewing the image values, they are all 205.
Why is the possible? May there be problems with the formats that the RenderTexture and Texture2D I am trying to fill?
Complete Code
In the following I add the whole code I am using. The names of the variables slightly differ to the ones I used above but they do essentially the same:
/**
* This class continously converts the y and uv textures in
* YUV color space to a RGB texture, which can be used somewhere else
*/
public class YUV2RGBConverter : MonoBehaviour {
public Material yuv2rgbMat;
// Input textures, set these when they are available
[HideInInspector]
public Texture2D yTex;
[HideInInspector]
public Texture2D uvTex;
// Output, the converted textures
[HideInInspector]
public RenderTexture rgbRenderTex;
[HideInInspector]
public Texture2D rgbTex;
[HideInInspector]
public Color32[] rawRgbData;
/// Describes how often per second the image should be transferred to the CPU
public float GPUTransferRate = 1.0f;
private float timeSinceLastGPUTransfer = 0.0f;
private int width;
private int height;
/**
* Initializes the used textures
*/
void Start () {
updateSize (width, height);
}
/**
* Depending on the sizes of the texture, creating the needed textures for this class
*/
public void updateSize(int width, int height)
{
// Generate the input textures
yTex = new Texture2D(width / 4, height, TextureFormat.RGBA32, false);
uvTex = new Texture2D ((width / 2) * 2 / 4, height / 2, TextureFormat.RGBA32, false);
// Generate the output texture
rgbRenderTex = new RenderTexture(width, height, 0);
rgbRenderTex.antiAliasing = 0;
rgbTex = new Texture2D (width, height, TextureFormat.RGBA32, false);
// Set to shader
yuv2rgbMat.SetFloat("_TexWidth", width);
yuv2rgbMat.SetFloat("_TexHeight", height);
}
/**
* Sets the y and uv textures to some dummy data
*/
public void fillYUWithDummyData()
{
// Set the y tex everywhere to time rest
float colorValue = (float)Time.time - (float)((int)Time.time);
for (int y = 0; y < yTex.height; y++) {
for (int x = 0; x < yTex.width; x++) {
Color yColor = new Color (colorValue, colorValue, colorValue, colorValue);
yTex.SetPixel (x, y, yColor);
}
}
yTex.Apply ();
// Set the uv tex colors
for (int y = 0; y < uvTex.height; y++) {
for (int x = 0; x < uvTex.width; x++) {
int firstXCoord = 2 * x;
int secondXCoord = 2 * x + 1;
int yCoord = y;
float firstXRatio = (float)firstXCoord / (2.0f * (float)uvTex.width);
float secondXRatio = (float)secondXCoord / (2.0f * (float)uvTex.width);
float yRatio = (float)y / (float)uvTex.height;
Color uvColor = new Color (firstXRatio, yRatio, secondXRatio, yRatio);
uvTex.SetPixel (x, y, uvColor);
}
}
uvTex.Apply ();
}
/**
* Continuously convert y and uv texture to rgb texture with custom yuv2rgb shader
*/
void Update () {
// Draw to it with the yuv2rgb shader
yuv2rgbMat.SetTexture ("_YTex", yTex);
yuv2rgbMat.SetTexture ("_UTex", uvTex);
Graphics.Blit (null, rgbRenderTex, yuv2rgbMat);
// Only scan once per second
if (timeSinceLastGPUTransfer > 1 / GPUTransferRate) {
timeSinceLastGPUTransfer = 0;
// Fetch its pixels and set it to rgb texture
RenderTexture.active = rgbRenderTex;
rgbTex.ReadPixels (new Rect (0, 0, width, height), 0, 0);
rgbTex.Apply ();
RenderTexture.active = null;
rawRgbData = rgbTex.GetPixels32 ();
} else {
timeSinceLastGPUTransfer += Time.deltaTime;
}
}
}
Ok, sorry that I have to answer my question on my own. The solution is very easy:
The width and height property that I was using in this line:
rgbTex.ReadPixels (new Rect (0, 0, width, height), 0, 0);
where not initialized, so they where 0.
I just had to add those lines to the updateSize function:
this.width = width;
this.height = height;
I am trying to create a function that takes a gray scale image and a color and colors the gray scale image using that color shade but keeps the shading levels of the gray scale image. The function also should not color the transparent parts of the image. I have multiple layers (multiple png's) I will be combining later and only need to color certain layers. I have looked around and found similar things but not quite what I need. I know how to do it in HTML5 on front end for the user using Canvas but I need a way to achieve same thing on the backend using I am guessing either a manual method using unlocked bitmap memory calls or a ColorMatrix class. Can anyone help me, graphics aren't my strongest area but I am slowly learning. See the function below for what I need in C# that I did in javascript. Doing the hidden canvas stuff isn't as important because I am doing this server side for saving to PNG file...
function drawImage(imageObj, color) {
var hidden_canvas = document.createElement("canvas");
hidden_canvas.width = imageObj.width;
hidden_canvas.height = imageObj.height;
var hidden_context = hidden_canvas.getContext("2d");
// draw the image on the hidden canvas
hidden_context.drawImage(imageObj, 0, 0);
if (color !== undefined) {
var imageData = hidden_context.getImageData(0, 0, imageObj.width, imageObj.height);
var data = imageData.data;
for (var i = 0; i < data.length; i += 4) {
var brightness = 0.34 * data[i] + 0.5 * data[i + 1] + 0.16 * data[i + 2];
//red
data[i] = brightness + color.R;
//green
data[i + 1] = brightness + color.G;
//blue
data[i + 2] = brightness + color.B;
}
//overwrite original image
hidden_context.putImageData(imageData, 0, 0);
}
var canvas = document.getElementById('card');
var context = canvas.getContext('2d');
context.drawImage(hidden_canvas, 0, 0);
};
This should do the job:
public static Bitmap MakeChromaChange(Bitmap bmp0, Color tCol, float gamma)
{
Bitmap bmp1 = new Bitmap(bmp0.Width, bmp0.Height);
using (Graphics g = Graphics.FromImage(bmp1))
{
float f = (tCol.R + tCol.G + tCol.B) / 765f;
float tr = tCol.R / 255f - f;
float tg = tCol.G / 255f - f;
float tb = tCol.B / 255f - f;
ColorMatrix colorMatrix = new ColorMatrix(new float[][]
{ new float[] {1f + tr, 0, 0, 0, 0},
new float[] {0, 1f + tg, 0, 0, 0},
new float[] {0, 0, 1f + tb, 0, 0},
new float[] {0, 0, 0, 1, 0},
new float[] {0, 0, 0, 0, 1} });
ImageAttributes attributes = new ImageAttributes();
attributes.SetGamma(gamma);
attributes.SetColorMatrix(colorMatrix);
g.DrawImage(bmp0, new Rectangle(0, 0, bmp0.Width, bmp0.Height),
0, 0, bmp0.Width, bmp0.Height, GraphicsUnit.Pixel, attributes);
}
return bmp1;
}
Note that I kept a gamma parameter; if you don't need it keep the value at 1f;
Here it is at work, adding first red then more red and some blue :
Transparent pixels are not affected.
For more on ColorMatrix here is a really nice intro!
As a fun project I applied the known colors to a known face:
I'm using slimdx to build a small visualizer, however, i recently stumbled into a problem. When i transform the triangle with the mvp matrix it disappears.
Constant buffers are loaded properly because i can see the right color loaded through them.
The triangle used as test is seen if i don't transform it in the vertex shader.
So i suppose the problem is in either the view matrix or the projection matrix.
Moreover i don't know if i should transpose them..
vertices = new DataStream(12 * 3, true, true);
vertices.Write(new Vector3(0.0f, 0.5f, 0.5f));
vertices.Write(new Vector3(0.5f, -0.5f, 0.5f));
vertices.Write(new Vector3(-0.5f, -0.5f, 0.5f));
vertices.Position = 0;
vertexBuffer = new Buffer(device, vertices, 12 * 3, ResourceUsage.Default, BindFlags.VertexBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
// configure the Input Assembler portion of the pipeline with the vertex data
dc3D.InputAssembler.InputLayout = baseShaders.GetInputLayout();
dc3D.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
dc3D.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(vertexBuffer, 12, 0));
// set the shaders
dc3D.VertexShader.Set(baseShaders.GetVertexShader());
dc3D.PixelShader.Set(baseShaders.GetPixelShader());
cbufferData = new Buffer(device, new BufferDescription
{
Usage = ResourceUsage.Default,
SizeInBytes = System.Runtime.InteropServices.Marshal.SizeOf(typeof(BaseShaders.ConstantBuffer)),
BindFlags = BindFlags.ConstantBuffer
});
dc3D.VertexShader.SetConstantBuffer(cbufferData, 0);
Vector3 eye = new Vector3(4, 4, 4);
Vector3 target = new Vector3(0, 0, 0);
Vector3 up = new Vector3(0, 1, 0);
Matrix.LookAtLH(ref eye, ref target, ref up, out cbuffer.view) ;
//for now width and height are hardcoded.
Matrix.PerspectiveFovLH((float)Math.PI / 4, 617/643.0f, 1.0f, 100.0f, out cbuffer.projection);
cbuffer.color = new Vector4(0.1f, 1.0f, 1.0f, 1.0f);
//Matrix.Transpose(cbuffer.view);
//Matrix.Transpose(cbuffer.projection);
// update constant buffers.
var data = new DataStream(System.Runtime.InteropServices.Marshal.SizeOf(typeof(BaseShaders.ConstantBuffer)), true, true);
data.Write(cbuffer);
data.Position = 0;
dc3D.UpdateSubresource(new DataBox(0, 0, data), cbufferData, 0);
Its been some hours now, and i didn't find any solution.
Oh, here is the vertex shader code:
cbuffer ConstantBuffer : register(b0)
{
matrix view;
matrix projection;
float4 color;
}
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut main(float4 position : POSITION)
{
VOut output;
output.position = mul(mul(position, view), projection);
output.color = color;
return output;
}
By making some small changes, and in fact transposing the matrix i managed to get the code to work.
Here is the vertex shader:
cbuffer ConstantBuffer : register(b0)
{
float4x4 mvp;
float4 color;
}
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut main(float4 position : POSITION)
{
VOut output;
output.position = mul(position, mvp);
output.color = color;
return output;
}
Here is the code changed:
Vector3 eye = new Vector3(1, 1, 1);
Vector3 target = new Vector3(0, 0, 0);
Vector3 up = new Vector3(0, 1, 0);
Matrix view = new Matrix();
Matrix projection = new Matrix();
view = Matrix.LookAtLH(eye, target, up) ;
projection = Matrix.PerspectiveFovLH((float)Math.PI / 4, 617/643.0f, 0.1f, 100.0f);
cbuffer.color = new Vector4(0.1f, 1.0f, 1.0f, 1.0f);
cbuffer.mvp = Matrix.Multiply(view, projection);
Matrix.Transpose(ref cbuffer.mvp, out cbuffer.mvp);
I've ported 1-1 this code from C++/OpenGL to C# SharpGL:
float[] cameraAngle = { 0, 0, 0 };
float[] cameraPosition = { 0, 0, 10 };
float[] modelPosition = { 0, 0, 0 };
float[] modelAngle = { 0, 0, 0 };
float[] matrixView = new float[16];
float[] matrixModel = new float[16];
float[] matrixModelView = new float[16];
// clear buffer
gl.ClearColor(0.1f, 0.1f, 0.1f, 1);
gl.Clear(OpenGL.COLOR_BUFFER_BIT | OpenGL.DEPTH_BUFFER_BIT | OpenGL.STENCIL_BUFFER_BIT);
// initialze ModelView matrix
gl.PushMatrix();
gl.LoadIdentity();
// ModelView matrix is product of viewing matrix and modeling matrix
// ModelView_M = View_M * Model_M
// First, transform the camera (viewing matrix) from world space to eye space
// Notice all values are negated, because we move the whole scene with the
// inverse of camera transform
gl.Rotate(-cameraAngle[0], 1, 0, 0); // pitch
gl.Rotate(-cameraAngle[1], 0, 1, 0); // heading
gl.Rotate(-cameraAngle[2], 0, 0, 1); // roll
gl.Translate(-cameraPosition[0], -cameraPosition[1], -cameraPosition[2]);
// we have set viewing matrix upto this point. (Matrix from world space to eye space)
// save the view matrix only
gl.GetFloat(OpenGL.MODELVIEW_MATRIX, matrixView); // save viewing matrix
//=========================================================================
// always Draw the grid at the origin (before any modeling transform)
//DrawGrid(10, 1);
// In order to get the modeling matrix only, reset OpenGL.MODELVIEW matrix
gl.LoadIdentity();
// transform the object
// From now, all transform will be for modeling matrix only. (transform from object space to world space)
gl.Translate(modelPosition[0], modelPosition[1], modelPosition[2]);
gl.Rotate(modelAngle[0], 1, 0, 0);
gl.Rotate(modelAngle[1], 0, 1, 0);
gl.Rotate(modelAngle[2], 0, 0, 1);
// save modeling matrix
gl.GetFloat(OpenGL.MODELVIEW_MATRIX, matrixModel);
//=========================================================================
// re-strore OpenGL.MODELVIEW matrix by multiplying matrixView and matrixModel before drawing the object
// ModelView_M = View_M * Model_M
gl.LoadMatrixf(matrixView); // Mmv = Mv
gl.MultMatrixf(matrixModel); // Mmv *= Mm
// save ModelView matrix
gl.GetFloat(OpenGL.MODELVIEW_MATRIX, matrixModelView);
//=========================================================================
// Draw a teapot after ModelView transform
// v' = Mmv * v
//DrawAxis(4);
//DrawTeapot();
gl.PopMatrix();
It doesn't look like the ModelView matrix gets multiplied, the result is the Identity Matrix!
What could be wrong??
Thanks
wrong glMatrixMode?