I'm using code which tries to work like Glu.Project() since OpenTK doesn't support Glu.
Vector4 pos = new Vector4(s.Position.X, 0.0f, s.Position.Y, 1.0f);
Matrix4 mov = new Matrix4();
Matrix4 prj = new Matrix4();
Matrix4 mpj = new Matrix4();
float[] vp = new float[4];
GL.GetFloat(GetPName.ModelviewMatrix, out mov);
GL.GetFloat(GetPName.ProjectionMatrix, out prj);
GL.GetFloat(GetPName.Viewport, vp);
Matrix4.Mult(ref prj, ref mov, out mpj);
Vector4.Transform(ref pos, ref mpj, out pos);
// Final mathematics as described in OpenGL 2.1 Glu specs
s.set2DPos(new Vector2f( (vp[0] + (vp[2] * (pos.X + 1) / 2.0f)),
(vp[1] + (vp[3] * (pos.Y + 1) / 2.0f)) ));
// Final mathematics as described in OpenGL 3 Vector specs
s.set2DPos(new Vector2f( (view[2] / 2 * pos.X + view[0]),
(view[3] / 2 * pos.X + view[1]) ));
// Neither of them work, but in relation OpenGL 3 vector specs work better.
s is a class which primary exists as a model in 3D space at s.Position.
But the values I'm getting from this are astronomically far beyond the window boundaries.
The ModelView matrix from a breakpoint:
{(1, 0, 0, 0)
(0, 0.7071068, 0.7071068, 0)
(0, -0.7071068, 0.7071068, 0)
(0, -141.4214, -141.4214, 1)}
The Projection matrix from a breakpoint:
{(1.931371, 0, 0, 0)
(0, 2.414213, 0, 0)
(0, 0, -1.0002, -1)
(0, 0, -2.0002, 0)}
Am I doing something wrong or did I get something wrong? Am I missing something?
Vector2 GetScreenCoordinates(Vector3 ObjectCoordinate)
{
// ref: http://www.songho.ca/opengl/gl_transform.html
Vector4 obj = new Vector4(ObjectCoordinate.X, ObjectCoordinate.Y, ObjectCoordinate.Z, 1.0f);
Matrix4 projection = new Matrix4();
Matrix4 modelView = new Matrix4();
Vector4 viewPort = new Vector4();
GL.GetFloat(GetPName.ModelviewMatrix, out modelView);
GL.GetFloat(GetPName.ProjectionMatrix, out projection);
GL.GetFloat(GetPName.Viewport, out viewPort);
Vector4
eye = Vector4.Transform(obj, modelView),
clip = Vector4.Transform(eye, projection);
Vector3
ndc = new Vector3(clip.X / clip.W, clip.Y / clip.W, clip.Z / clip.W);
Vector2
w = new Vector2(viewPort.Z / 2 * ndc.X + viewPort.X + viewPort.Z / 2,
viewPort.W / 2 * ndc.Y + viewPort.Y + viewPort.W / 2);
return w;
}
Have you sanity checked the s.Position value before using it? What about the projection and transformation matrices you apply to the vector, are they sane looking?
I'm not familiar with OpenTK, but the mathematics prior to set2DPos() look sensible enough.
Here is how it works:
GL.GetFloat(GetPName.ModelviewMatrix, out model);
GL.GetFloat(GetPName.ProjectionMatrix, out proj);
GL.GetFloat(GetPName.Viewport, view);
Matrix4.Transpose(ref model, out model);
Matrix4.Transpose(ref proj, out proj);
Vector4 posa = new Vector4(0.0f, s.Position.Y, 1.0f, s.Position.X);
Vector4 posb = new Vector4(s.Position.Y, 1.0f, s.Position.X, 0.0f);
Vector4 posc = new Vector4(1.0f, s.Position.X, 0.0f, s.Position.Y);
Vector4 one = new Vector4(1.0f, 1.0f, 1.0f, 1.0f);
Matrix4 posv = new Matrix4(pos, posa, posb, posc);
Matrix4 ProjPos = Matrix4.Mult(Matrix4.Mult(proj, model), posv);
Matrix4.Transpose(ref ProjPos, out ProjPos);
Vector2f posout = new Vector2f(
(0 + (this.glc.Width * (ProjPos.Column0.X / ProjPos.Column0.W + 1.0f)) - (this.glc.Width / 2.0f)),
(0 + (this.glc.Height * (ProjPos.Column0.Y / ProjPos.Column0.W + 1.0f)) - (this.glc.Height / 2.0f))
);
In case anyone needs it :)
Related
I am drawing lines on a canvas using the 'UIVertex' struct and I would like to be able to detect click events on the lines I have drawn.
Here is how I draw lines (largely inspired from this tutorial => https://www.youtube.com/watch?v=--LB7URk60A):
void DrawVerticesForPoint(Vector2 point, float angle, VertexHelper vh)
{
vertex = UIVertex.simpleVert;
//vertex.color = Color.red;
vertex.position = Quaternion.Euler(0, 0, angle) * new Vector3(-thickness / 2, 0);
vertex.position += new Vector3(unitWidth * point.x, unitHeight * point.y);
vh.AddVert(vertex);
vertex.position = Quaternion.Euler(0, 0, angle) * new Vector3(thickness / 2, 0);
vertex.position += new Vector3(unitWidth * point.x, unitHeight * point.y);
vh.AddVert(vertex);
}
Any idea?
Here is the solution I have found thanks to this post:
public bool PointIsOnLine(Vector3 point, UILineRenderer line)
{
Vector3 point1 = line.points[0];
Vector3 point2 = line.points[1];
var dirNorm = (point2 - point1).normalized;
var t = Vector2.Dot(point - point1, dirNorm);
var tClamped = Mathf.Clamp(t, 0, (point2 - point1).magnitude);
var closestPoint = point1 + dirNorm * tClamped;
var dist = Vector2.Distance(point, closestPoint);
if(dist < line.thickness / 2)
{
return true;
}
return false;
}
The UILineRenderer class is the class I have which represents my lines.
line.points[0] and line.points[1] contain the coordinates of the two points which determine the line length and position. line.thickness is the... thickness of the line :O
I am working on a renderer, and I am having some troubles with the perspective projection matrix.
Following is my perspective projection matrix.
public static Matrix4 Projection(float _zNear, float _zFar, float _Width, float _Height, float _fov)
{
float _ar = _Width / _Height;
float _tanHalffov = (float)Math.Tan(Math_of_Rotation.Radians_of(_fov / 2));
float _zRange = _zFar - _zNear;
return new Matrix4(new Vector4(1/(_tanHalffov * _ar), 0 , 0 , 0),
new Vector4(0 , 1 / _tanHalffov , 0 , 0),
new Vector4(0 , 0 , -(_zFar + _zNear) / _zRange , 2*_zNear*_zFar / _zRange),
new Vector4(0 , 0 , 1 , 0));
}
I then multiplied it with the camera's transform matrix and the model's transform matrix.
it works but the z direction seems stretched a bit, and if I make zFar larger, the stretching is even more obvious, so I figured it may be something to do with the zRange, but I have divided it with zRange in the matrix, so isn't it supposed to be rescaled?
Following is the result of my program.
the 1:1:1 cube looks weird after the projection
even weirder in the corner
---Update---
This is the vertex shader
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 normal;
uniform vec3 cam_pos;
uniform mat4 transform;
uniform mat4 nptransform;
vec4 temp_Pos;
out vec3 normal0;
out vec2 texCoord0;
out vec3 cam_angle;
out vec3 position0;
void main()
{
temp_Pos = nptransform * vec4(position, 1.0);
position0 = vec3(temp_Pos.x,temp_Pos.y,temp_Pos.z);
cam_angle = normalize(cam_pos - position0);
normal0 = normal;
texCoord0 = texCoord;
gl_Position = transform * vec4(position, 1.0);//the bug is about this line
}
The following is the complete code of my matrix
public Matrix4 GetTransform(Vector3 _OffSet)
{
return Matrix4.Translation(Position - _OffSet) * Matrix4.RotateX(Rotation.x) * Matrix4.RotateY(Rotation.y) * Matrix4.RotateZ(Rotation.z) * Matrix4.Scale(Scale.x, Scale.y, Scale.z);
}
public Matrix4 GetProjectdTransform(Vector3 _OffSet)//This is the one I sent to the shader.
{
Transform CameraTransform = Core.The_Camera.Attaching_GameObject.transform;
return Matrix4.Projection(Core.MainCamera.zNear, Core.MainCamera.zFar, Core.MainCamera.Width, Core.MainCamera.Height, Core.MainCamera.fov) * Matrix4.RotateX(CameraTransform.Rotation.x) * Matrix4.RotateY(CameraTransform.Rotation.y) * Matrix4.RotateZ(CameraTransform.Rotation.z) * Matrix4.CameraTranslation(CameraTransform.Position) * GetTransform(_OffSet);
}
And there is the detail of the matrix functions, but there shouldn't be any problem, I tested them a lot of times.
public static Matrix4 CameraTranslation(Vector3 _CameraPosition)
{
return new Matrix4(new Vector4(1, 0, 0, -_CameraPosition.x),
new Vector4(0, 1, 0, -_CameraPosition.y),
new Vector4(0, 0, 1, -_CameraPosition.z),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 Translation(Vector3 _Position)
{
return new Matrix4(new Vector4(1, 0, 0, _Position.x),
new Vector4(0, 1, 0, _Position.y),
new Vector4(0, 0, 1, _Position.z),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 Scale(float _x, float _y, float _z)
{
return new Matrix4(new Vector4(_x, 0, 0, 0),
new Vector4(0, _y, 0, 0),
new Vector4(0, 0, _z, 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 RotateX(float _Angle)
{
double _Radians = Math_of_Rotation.Radians_of(_Angle);
return new Matrix4(new Vector4(1, 0, 0, 0),
new Vector4(0, (float)Math.Cos(_Radians), (float)Math.Sin(_Radians), 0),
new Vector4(0, -(float)Math.Sin(_Radians), (float)Math.Cos(_Radians), 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 RotateY(float _Angle)
{
double _Radians = Math_of_Rotation.Radians_of(_Angle);
return new Matrix4(new Vector4((float)Math.Cos(_Radians), 0, -(float)Math.Sin(_Radians), 0),
new Vector4(0, 1, 0, 0),
new Vector4((float)Math.Sin(_Radians), 0, (float)Math.Cos(_Radians), 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 RotateZ(float _Angle)
{
double _Radians = Math_of_Rotation.Radians_of(_Angle);
return new Matrix4(new Vector4((float)Math.Cos(_Radians), -(float)Math.Sin(_Radians), 0, 0),
new Vector4((float)Math.Sin(_Radians), (float)Math.Cos(_Radians), 0, 0),
new Vector4(0, 0, 1, 0),
new Vector4(0, 0, 0, 1));
}
public static Matrix4 Projection(float _zNear, float _zFar, float _Width, float _Height, float _fov)
{
float _ar = _Width / _Height;
float _tanHalffov = (float)Math.Tan(Math_of_Rotation.Radians_of(_fov / 2));
float _zRange = _zFar - _zNear;
return new Matrix4(new Vector4((_tanHalffov ) , 0 , 0 , 0),
new Vector4(0 , _tanHalffov , 0 , 0),
new Vector4(0 , 0 , -(_zFar + _zNear) / _zRange , 2*_zNear*_zFar / _zRange),
new Vector4(0 , 0 , 1 , 0));
}
You need to transpose the matrix and invert some components if you want to do the same as Matrix4.CreatePerspectiveFieldOfView.
public static Matrix4 Projection(float _zNear, float _zFar, float _Width, float _Height, float _fov)
{
float _ar = _Width / _Height;
float _tanHalffov = (float)Math.Tan(Math_of_Rotation.Radians_of(_fov / 2));
float _zRange = _zFar - _zNear;
return new Matrix4(new Vector4(1/(_tanHalffov * _ar), 0 , 0 , 0),
new Vector4(0 , 1 / _tanHalffov, 0 , 0),
new Vector4(0 , 0 , -(_zFar + _zNear) / _zRange, -1),
new Vector4(0 , 0 , -2*_zNear*_zFar / _zRange , 0));
}
OpenGL matrices are stored in column-major order. The 1st column is the x-axis, followed by the y- and z-axes. The fourth column is the translation.
This means that each line (Vector4) represents one column of the matrix.
The usual OpenGL coordinate system is a right handed system. In view space the z-axis points against the line of sight. The Normalized Device Space is a left handed system. Therefore the z-axis is inverted by the projection matrix. See Left- vs. Right-handed coordinate systems.
You can find a working C#/OpenTK example at 3D Geometry
#Rabbid76 Thanks very very much for your helps. I solved the question. The answer is...... I was never wrong. I ran through tons of test, and I found that the answer were in deed correct, and the outcome is suppose to look that wired. Cause, if you think about it the projection matrix makes x and y smaller while fov is larger(wider view means smaller object), but cause z is the input of a linear function
(_zFar + _zNear) / (_zFar - _zNear) * z + -2 * _zNear * _zFar / (_zFar - _zNear)
so when changing fov the length of a cube's z is never change but the x and y is smaller, that's why it looks weird.
As the hints #Rabbid76 so kindly to remind me, I think cause my game engine is also left handed system, so the matrix is different.
Proof: Cube in unity also looks wired when the fov is 90
And: So as mine
I am very new to OpenGL and am using the latest version of OpenTK with C#.
My camera class currently does the following,
public Matrix4 GetProjectionMatrix()
{
return Matrix4.CreatePerspectiveFieldOfView(_fov, AspectRatio, 0.01f, 100f);
}
public Matrix4 GetViewMatrix()
{
Vector3 lookAt = new Vector3(myObject.Pos.X, myObject.Pos.Y, myObject.Pos.Z);
return Matrix4.LookAt(Position, lookAt, _up);
}
I have a slightly weird use case, where my game window will be long, something like a 4:12 ratio, and it will present a long object. From my reading, online the best way to present this the way I want is to do a lense shift (Oblique Frustum).
I've seen articles online on how to do this, namely:
http://www.terathon.com/code/oblique.html
https://docs.unity3d.com/Manual/ObliqueFrustum.html
But I am having trouble translating this to OpenTk.
Was wondering if anyone on here has done something similar to this in OpenTK.
EDIT:
This kind of worked, but not quite what I was looking for :(
private float sgn(float a)
{
if (a > 0.0F) return (1.0F);
if (a < 0.0F) return (-1.0F);
return (0.0F);
}
public Matrix4 CreatePerspectiveFieldOfView(Matrix4 projectionMatrix)
{
Vector4 clipPlane = new Vector4(0.0f, 0.7f, 1.0f , 1.0f);
Vector4 q = new Vector4
{
X = (sgn(clipPlane.X) + projectionMatrix.M13) / projectionMatrix.M11,
Y = (sgn(clipPlane.Y) + projectionMatrix.M23) / projectionMatrix.M22,
Z = -1.0f,
W = (1.0F + projectionMatrix.M33) / projectionMatrix.M34
};
Vector4 c = clipPlane * (2.0F / Vector4.Dot(clipPlane, q));
projectionMatrix.M31 = c.X;
projectionMatrix.M32 = c.Y;
projectionMatrix.M33 = c.Z + 1.0f;
projectionMatrix.M34 = c.W;
return projectionMatrix;
}
EDIT 2:
Basically what I am looking to do, is bring the look at point closer to the edge of the frustum like so:
There are some obvious issues. OpenGL matrices are column major order. Hence, i is the column and j is the row for the Mij properties of Matrix4:
In the following columns are from the top to the bottom and rows are form the left to the right, because that is the representation of the fields of the matrix in memory and how a matrix "looks" in the debuger:
row1 row2 row3 row4 indices
column1 (M11, M12, M13, M14) ( 0, 1, 2, 3)
column2 (M21, M22, M23, M24) ( 4, 5, 6, 7)
column3 (M31, M32, M33, M34) ( 8, 9, 10, 11)
column4 (M41, M42, M43, M44) (12, 13, 14, 15)
Thus
q.x = (sgn(clipPlane.x) + matrix[8]) / matrix[0];
q.y = (sgn(clipPlane.y) + matrix[9]) / matrix[5];
q.z = -1.0F;
q.w = (1.0F + matrix[10]) / matrix[14];
has to be translated to:
Vector4 q = new Vector4
{
X = (sgn(clipPlane.X) + projectionMatrix.M31) / projectionMatrix.M11,
Y = (sgn(clipPlane.Y) + projectionMatrix.M32) / projectionMatrix.M22,
Z = -1.0f,
W = (1.0f + projectionMatrix.M33) / projectionMatrix.M43
};
and
```cpp
matrix[2] = c.x;
matrix[6] = c.y;
matrix[10] = c.z + 1.0F;
matrix[14] = c.w;
has to be
projectionMatrix.M13 = c.X;
projectionMatrix.M23 = c.Y;
projectionMatrix.M33 = c.Z + 1.0f;
projectionMatrix.M43 = c.W;
If you want an asymmetric perspective projection, then consider to to create the perojection matrix by Matrix4.CreatePerspectiveOffCenter.
public Matrix4 GetProjectionMatrix()
{
var offset_x = -0.0f;
var offset_y = -0.005f; // just for instance
var tan_fov_2 = Math.Tan(_fov / 2.0);
var near = 0.01f;
var far = 100.0f;
var left = -near * AspectRatio * tan_fov_2 + offset_x;
var right = near * AspectRatio * tan_fov_2 + offset_x;
var bottom = -near * tan_fov_2 + offset_y;
var top = near * tan_fov_2 + offset_y;
return Matrix4.CreatePerspectiveOffCenter(left, right, bottom, top, near, far);
}
I've a big problem: I've an OpenTK window open where I draw textures, images, etc. I've to do a little videogame in this manner for a test and I'ld like to show text on it that shows game infos.
Actually I've been only able to open a Window form with text and it's not what I need.
Is there a manner to show text in a OpenTK window?
I can't use OpenTK 3.0, so QuickFont has to be excluded.
I can use GL Class.
Thank you very much!
One possibility would be to use FreeType library to load a TrueType Font to texture objects.
SharpFont provides Cross-platform FreeType bindings for C#.
The source can be found at GitHub - Robmaister/SharpFont.
(x64 SharpFont.dll and freetype6.dll from MonoGame.Dependencies)
A full example can be found at GitHub - Rabbid76/c_sharp_opengl.
The example eis based on LearnOpenGL - Text Rendering.
Load the font and glyph information for the characters and create a texture object for each character:
public struct Character
{
public int TextureID { get; set; }
public Vector2 Size { get; set; }
public Vector2 Bearing { get; set; }
public int Advance { get; set; }
}
// initialize library
Library lib = new Library();
Face face = new Face(lib, "FreeSans.ttf");
face.SetPixelSizes(0, 32);
// set 1 byte pixel alignment
GL.PixelStore(PixelStoreParameter.UnpackAlignment, 1);
// Load first 128 characters of ASCII set
for (uint c = 0; c < 128; c++)
{
try
{
// load glyph
//face.LoadGlyph(c, LoadFlags.Render, LoadTarget.Normal);
face.LoadChar(c, LoadFlags.Render, LoadTarget.Normal);
GlyphSlot glyph = face.Glyph;
FTBitmap bitmap = glyph.Bitmap;
// create glyph texture
int texObj = GL.GenTexture();
GL.BindTexture(TextureTarget.Texture2D, texObj);
GL.TexImage2D(TextureTarget.Texture2D, 0,
PixelInternalFormat.R8, bitmap.Width, bitmap.Rows, 0,
PixelFormat.Red, PixelType.UnsignedByte, bitmap.Buffer);
// set texture parameters
GL.TextureParameter(texObj, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TextureParameter(texObj, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TextureParameter(texObj, TextureParameterName.TextureWrapS, (int)TextureWrapMode.ClampToEdge);
GL.TextureParameter(texObj, TextureParameterName.TextureWrapT, (int)TextureWrapMode.ClampToEdge);
// add character
Character ch = new Character();
ch.TextureID = texObj;
ch.Size = new Vector2(bitmap.Width, bitmap.Rows);
ch.Bearing = new Vector2(glyph.BitmapLeft, glyph.BitmapTop);
ch.Advance = (int)glyph.Advance.X.Value;
_characters.Add(c, ch);
}
catch (Exception ex)
{
Console.WriteLine(ex);
}
}
Create a Vertex Array Object which draws a quad by 2 trinagles:
// bind default texture
GL.BindTexture(TextureTarget.Texture2D, 0);
// set default (4 byte) pixel alignment
GL.PixelStore(PixelStoreParameter.UnpackAlignment, 4);
float[] vquad =
{
// x y u v
0.0f, -1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f,
0.0f, -1.0f, 0.0f, 0.0f,
1.0f, 0.0f, 1.0f, 1.0f,
1.0f, -1.0f, 1.0f, 0.0f
};
// Create [Vertex Buffer Object](https://www.khronos.org/opengl/wiki/Vertex_Specification#Vertex_Buffer_Object)
_vbo = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, _vbo);
GL.BufferData(BufferTarget.ArrayBuffer, 4 * 6 * 4, vquad, BufferUsageHint.StaticDraw);
// [Vertex Array Object](https://www.khronos.org/opengl/wiki/Vertex_Specification#Vertex_Array_Object)
_vao = GL.GenVertexArray();
GL.BindVertexArray(_vao);
GL.EnableVertexAttribArray(0);
GL.VertexAttribPointer(0, 2, VertexAttribPointerType.Float, false, 4 * 4, 0);
GL.EnableVertexAttribArray(1);
GL.VertexAttribPointer(1, 2, VertexAttribPointerType.Float, false, 4 * 4, 2 * 4);
Furthermore create a method which draws a string at specified position which a given direction:
public void RenderText(string text, float x, float y, float scale, Vector2 dir)
{
GL.ActiveTexture(TextureUnit.Texture0);
GL.BindVertexArray(_vao);
float angle_rad = (float)Math.Atan2(dir.Y, dir.X);
Matrix4 rotateM = Matrix4.CreateRotationZ(angle_rad);
Matrix4 transOriginM = Matrix4.CreateTranslation(new Vector3(x, y, 0f));
// Iterate through all characters
float char_x = 0.0f;
foreach (var c in text)
{
if (_characters.ContainsKey(c) == false)
continue;
Character ch = _characters[c];
float w = ch.Size.X * scale;
float h = ch.Size.Y * scale;
float xrel = char_x + ch.Bearing.X * scale;
float yrel = (ch.Size.Y - ch.Bearing.Y) * scale;
// Now advance cursors for next glyph (note that advance is number of 1/64 pixels)
char_x += (ch.Advance >> 6) * scale; // Bitshift by 6 to get value in pixels (2^6 = 64 (divide amount of 1/64th pixels by 64 to get amount of pixels))
Matrix4 scaleM = Matrix4.CreateScale(new Vector3(w, h, 1.0f));
Matrix4 transRelM = Matrix4.CreateTranslation(new Vector3(xrel, yrel, 0.0f));
Matrix4 modelM = scaleM * transRelM * rotateM * transOriginM; // OpenTK `*`-operator is reversed
GL.UniformMatrix4(0, false, ref modelM);
// Render glyph texture over quad
GL.BindTexture(TextureTarget.Texture2D, ch.TextureID);
// Render quad
GL.DrawArrays(PrimitiveType.Triangles, 0, 6);
}
GL.BindVertexArray(0);
GL.BindTexture(TextureTarget.Texture2D, 0);
}
Vertex shader:
#version 460
layout (location = 0) in vec2 in_pos;
layout (location = 1) in vec2 in_uv;
out vec2 vUV;
layout (location = 0) uniform mat4 model;
layout (location = 1) uniform mat4 projection;
void main()
{
vUV = in_uv.xy;
gl_Position = projection * model * vec4(in_pos.xy, 0.0, 1.0);
}
Fragment shader:
#version 460
in vec2 vUV;
layout (binding=0) uniform sampler2D u_texture;
layout (location = 2) uniform vec3 textColor;
out vec4 fragColor;
void main()
{
vec2 uv = vUV.xy;
float text = texture(u_texture, uv).r;
fragColor = vec4(textColor.rgb*text, text);
}
See the example:
Matrix4 projectionM = Matrix4.CreateScale(new Vector3(1f/this.Width, 1f/this.Height, 1.0f));
projectionM = Matrix4.CreateOrthographicOffCenter(0.0f, this.Width, this.Height, 0.0f, -1.0f, 1.0f);
GL.ClearColor(0.2f, 0.3f, 0.3f, 1.0f);
GL.Clear(ClearBufferMask.ColorBufferBit);
GL.Enable(EnableCap.Blend);
GL.BlendFunc(BlendingFactor.SrcAlpha, BlendingFactor.OneMinusSrcAlpha);
text_prog.Use();
GL.UniformMatrix4(1, false, ref projectionM);
GL.Uniform3(2, new Vector3(0.5f, 0.8f, 0.2f));
font.RenderText("This is sample text", 25.0f, 50.0f, 1.2f, new Vector2(1f, 0f));
GL.Uniform3(2, new Vector3(0.3f, 0.7f, 0.9f));
font.RenderText("(C) LearnOpenGL.com", 50.0f, 200.0f, 0.9f, new Vector2(1.0f, -0.25f));
I need to know how to draw transparent circle/ellipse using OpenTK in Xamarin.Forms.
I have tried with creating Vertex and Fragment shaders with reference of following link : How to draw circle on OpenGL ES 2.0 cross platform? But did not get anything on screen it is showing blank on screen.
Below is the Sample Code used to draw circle with OpenTK in Xamarin.Forms.
GL.ClearColor(0.0f, 0.0f, 0.0f, 1.0f);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.Enable(EnableCap.DepthTest);
GL.UseProgram(programID);
// The locations where we pass in our color and vertex data
positionInput = GL.GetAttribLocation(programID, "Position");
colorInput = GL.GetAttribLocation(programID, "SourceColor");
// The locations where we pass in unchanging data
projectionInput = GL.GetUniformLocation(programID, "Projection");
modelviewInput = GL.GetUniformLocation(programID, "Modelview");
Matrix4 modelview = Matrix4.CreateRotationX(rotation) * Matrix4.CreateRotationY(rotation) * Matrix4.CreateRotationZ(rotation) * Matrix4.CreateTranslation(xTranslation, yTranslation, -7f);
GL.UniformMatrix4(modelviewInput, false, ref modelview);
float h = 4.0f * (height / width);
Matrix4 projection = Matrix4.CreatePerspectiveOffCenter(-2, 2, -h / 2f, h / 2f, 4, 10);
GL.Viewport(0, 0, (int)width, (int)height);
GL.UniformMatrix4(projectionInput, false, ref projection);
GL.BindBuffer(BufferTarget.ArrayBuffer, colorDataBuffer);
GL.EnableVertexAttribArray(colorInput);
GL.VertexAttribPointer(colorInput, 4, VertexAttribPointerType.Float, false, 0, 0);
float DEGREE_TO_RAD = (float)( 3.14 / 180);
int M_IN_DEGREE = 370;
int N_IN_DEGREE = 100;
int nCount = 1;
int index = 0;
int size = 2;
float[] stVertexArray = new float[2*360];
stVertexArray[0] = 0;
stVertexArray[1] = 0;
for( int nR =N_IN_DEGREE; nR < M_IN_DEGREE; nR++ )
{
float fX = (float) System.Math.Sin((float)nR * DEGREE_TO_RAD ) ;
float fY = (float) System.Math.Cos((float)nR * DEGREE_TO_RAD );
stVertexArray[nCount*2] = fX;
stVertexArray[nCount*2 + 1] = fY;
nCount++;
}
GL.BindBuffer(BufferTarget.ArrayBuffer, stVertexArray.Length);
GL.EnableVertexAttribArray(0);
GL.VertexAttribPointer (index,size, VertexAttribPointerType.Float, false, stVertexArray.Length, IntPtr.Zero);
GL.DrawElements(BeginMode.LineLoop, stVertexArray.Length, DrawElementsType.UnsignedByte,stVertexArray);
GL.Finish();