I'm writing a Level Editor in C# and OpenTK, and wanted to see if a more complex shader would work fine. So I grabbed this mine craft shader here https://www.shadertoy.com/view/MdlGz4.
But I can't get it to work, I keep getting this error:
Failed to link shader program!
Fragment info
-------------
Internal error: assembly compile error for fragment shader at offset 392911:
-- error message --
line 16405, column 1: error: too many instructions
-- internal assembly text --
!!NVfp5.0
OPTION NV_bindless_texture;
OPTION NV_shader_atomic_float;
# cgc version 3.1.0001, build date Feb 9 2013
# command line args:
#vendor NVIDIA Corporation
#version 3.1.0.1
#profile gp5fp
The shadertoy version runs fine in my browser. So I know my hardware (GTX 680) can handle it. I'm using #version 400 in the fragment shader, in the hope that a higher version allows for more instructions. But that did not help either.
So my question is, how do I have to load/compile such a long fragment shader?
Here's the actual shader that I'm trying to run:
Vertex:
#version 400
uniform mat4 camera;
uniform mat4 model;
in vec3 in_vertex;
in vec4 in_color;
in vec2 in_uv;
out vec2 frag_TexCoord;
out vec4 frag_Color;
void main()
{
gl_Position = camera * model * vec4(in_vertex, 1);
frag_TexCoord = in_uv;
frag_Color = in_color;
}
Fragment:
#version 400
uniform vec2 iResolution;
uniform float iGlobalTime;
in vec4 frag_Color;
in vec2 frag_TexCoord;
out vec4 final_color;
// Created by Reinder Nijhoff 2013
//
// port of javascript minecraft: http://jsfiddle.net/uzMPU/
// original code by Markus Persson: https://twitter.com/notch/status/275331530040160256
float hash( float n ) {
return fract(sin(n)*43758.5453);
}
// port of minecraft
bool getMaterialColor( int i, vec2 coord, out vec3 color ) {
// 16x16 tex
vec2 uv = floor( coord );
float n = uv.x + uv.y*347.0 + 4321.0 * float(i);
float h = hash(n);
float br = 1. - h * (96./255.
);
color = vec3( 150./255., 108./255., 74./255.); // 0x966C4A;
if (i == 4) {
color = vec3( 127./255., 127./255., 127./255.); // 0x7F7F7F;
}
float xm1 = mod((uv.x * uv.x * 3. + uv.x * 81.) / 4., 4.);
if (i == 1) {
if( uv.y < (xm1 + 18.)) {
color = vec3( 106./255., 170./255., 64./255.); // 0x6AAA40;
} else if (uv.y < (xm1 + 19.)) {
br = br * (2. / 3.);
}
}
if (i == 7) {
color = vec3( 103./255., 82./255., 49./255.); // 0x675231;
if (uv.x > 0. && uv.x < 15.
&& ((uv.y > 0. && uv.y < 15.) || (uv.y > 32. && uv.y < 47.))) {
color = vec3( 188./255., 152./255., 98./255.); // 0xBC9862;
float xd = (uv.x - 7.);
float yd = (mod(uv.y, 16.) - 7.);
if (xd < 0.)
xd = 1. - xd;
if (yd < 0.)
yd = 1. - yd;
if (yd > xd)
xd = yd;
br = 1. - (h * (32./255.) + mod(xd, 4.) * (32./255.));
} else if ( h < 0.5 ) {
br = br * (1.5 - mod(uv.x, 2.));
}
}
if (i == 5) {
color = vec3( 181./255., 58./255., 21./255.); // 0xB53A15;
if ( mod(uv.x + (floor(uv.y / 4.) * 5.), 8.) == 0. || mod( uv.y, 4.) == 0.) {
color = vec3( 188./255., 175./255., 165./255.); // 0xBCAFA5;
}
}
if (i == 9) {
color = vec3( 64./255., 64./255., 255./255.); // 0x4040ff;
}
float brr = br;
if (uv.y >= 32.)
brr /= 2.;
if (i == 8) {
color = vec3( 80./255., 217./255., 55./255.); // 0x50D937;
if ( h < 0.5) {
return false;
}
}
color *= brr;
return true;
}
int getMap( vec3 pos ) {
vec3 posf = floor( (pos - vec3(32.)) );
float n = posf.x + posf.y*517.0 + 1313.0*posf.z;
float h = hash(n);
if( h > sqrt( sqrt( dot( posf.yz, posf.yz )*0.16 ) ) - 0.8 ) {
return 0;
}
return int( hash( n * 465.233 ) * 16. );
}
vec3 renderMinecraft( vec2 uv ) {
float xRot = sin( iGlobalTime*0.5 ) * 0.4 + (3.1415 / 2.);
float yRot = cos( iGlobalTime*0.5 ) * 0.4;
float yCos = cos(yRot);
float ySin = sin(yRot);
float xCos = cos(xRot);
float xSin = sin(xRot);
vec3 opos = vec3( 32.5 + iGlobalTime * 6.4, 32.5, 32.5 );
float gggxd = (uv.x - 0.5) * (iResolution.x / iResolution.y );
float ggyd = (1.-uv.y - 0.5);
float ggzd = 1.;
float gggzd = ggzd * yCos + ggyd * ySin;
vec3 _posd = vec3( gggxd * xCos + gggzd * xSin,
ggyd * yCos - ggzd * ySin,
gggzd * xCos - gggxd * xSin );
vec3 col = vec3( 0. );
float br = 1.;
vec3 bdist = vec3( 255. - 100., 255. - 0., 255. - 50. );
float ddist = 0.;
float closest = 32.;
for ( int d = 0; d < 3; d++) {
float dimLength = _posd[d];
float ll = abs( 1. / dimLength );
vec3 posd = _posd * ll;;
float initial = fract( opos[d] );
if (dimLength > 0.) initial = 1. - initial;
float dist = ll * initial;
vec3 pos = opos + posd * initial;
if (dimLength < 0.) {
pos[d] -= 1.;
}
for (int i=0; i<30; i++) {
if( dist > closest )continue;
//int tex = getMap( mod( pos, 64. ) );
int tex = getMap( pos );
if (tex > 0) {
vec2 texcoord;
texcoord.x = mod(((pos.x + pos.z) * 16.), 16.);
texcoord.y = mod((pos.y * 16.), 16.) + 16.;
if (d == 1) {
texcoord.x = mod(pos.x * 16., 16.);
texcoord.y = mod(pos.z * 16., 16.);
if (posd.y < 0.)
texcoord.y += 32.;
}
if ( getMaterialColor( tex, texcoord, col ) ) {
ddist = 1. - (dist / 32.);
br = bdist[d];
closest = dist;
}
}
pos += posd;
dist += ll;
}
}
return col * ddist * (br/255.);
}
void main()
{
vec2 uv = frag_TexCoord.xy / iResolution.xy;
final_color = vec4( renderMinecraft( uv ) ,1.0);
}
Clarification as to where the error happens:
The frist two steps of compiling the vertex and fragment shader work fine. It's the linking (GL.GetProgram()) that returns 0. The call to GL.GetProgramInfo returns the above mentioned error and lists the program in assembly code (# 19337 instructions, 8 R-regs). So it does have so many lines.
_vertexShader = GL.CreateShader(ShaderType.VertexShader);
GL.ShaderSource(_vertexShader, vertexProgram);
GL.CompileShader(_vertexShader);
GL.GetShader(_vertexShader, ShaderParameter.CompileStatus, out result);
if (result == 0)
{
System.Diagnostics.Debug.WriteLine(GL.GetString(StringName.ShadingLanguageVersion));
System.Diagnostics.Debug.WriteLine(GL.GetShaderInfoLog(_vertexShader));
}
//Create Fragment Shader
_fragShader = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(_fragShader, fragmentProgram);
GL.CompileShader(_fragShader);
GL.GetShader(_fragShader, ShaderParameter.CompileStatus, out result);
if (result == 0)
{
System.Diagnostics.Debug.WriteLine(GL.GetString(StringName.ShadingLanguageVersion));
System.Diagnostics.Debug.WriteLine(GL.GetShaderInfoLog(_fragShader));
}
//Link to program
_shader = GL.CreateProgram();
GL.AttachShader(_shader, _vertexShader);
GL.AttachShader(_shader, _fragShader);
GL.LinkProgram(_shader);
GL.GetProgram(_shader, ProgramParameter.LinkStatus, out result);
if (result == 0)
{
System.Diagnostics.Debug.WriteLine("Failed to link shader program!");
System.Diagnostics.Debug.WriteLine(GL.GetProgramInfoLog(_shader));
GL.DeleteProgram(_shader);
}
Related
With the help of this link i can apply projection on my texture.
Now I want to cut/remove equal area from top and bottom of my glcontrol and then need to apply same projection on remain area. I have tried like below. But as shown in the image top and bottom curve is missing on projection.
How can I bring it back in remain area?
precision highp float;
uniform sampler2D sTexture;
varying vec2 vTexCoord;
void main()
{
float img_h_px = 432.0; // height of the image in pixel
float area_h_px = 39.0; // area height in pixel
float w = area_h_px/img_h_px;
if (vTexCoord.y < w || vTexCoord.y > (1.0-w)){
gl_FragColor= vec4(1.0,0.0,1.0,1.0);
}
else
{
vec2 pos = vTexCoord.xy * 2.0 - 1.0;
float b = 0.5;
float v_scale = (1.0 + b) / (1.0 + b * sqrt(1.0 - pos.x*pos.x));
float u = asin( pos.x ) / 3.1415 + 0.5;
float v = (pos.y * v_scale) * 0.5 + 0.5;
if ( v < 0.0 || v > 1.0 )
discard;
vec3 texColor = texture2D( u_texture, vec2(u, v) ).rgb;
gl_FragColor = vec4( texColor.rgb, 1.0 );
}
}
The size of bottom and top area (sum of bottom and top area), relative to the size of the control is 2.0*area_h_px/img_h_px = 2.0*w.
The ration (h_ratio) of the control size and the "visible" area is:
float w = area_h_px/img_h_px;
float h_ratio = 1.0 - 2.0*w;
You've to scale the y coordinate for the texture lookup by the ration of the "visible" area and the control size, this is reciprocal of h_ratio (1.0/h_ratio):
float v = (pos.y * v_scale / h_ratio) * 0.5 + 0.5;
Final shader:
precision highp float;
uniform sampler2D sTexture;
varying vec2 vTexCoord;
void main()
{
float img_h_px = 432.0; // height of the image in pixel
float area_h_px = 39.0; // area height in pixel
float w = area_h_px/img_h_px;
float h_ratio = 1.0 - 2.0*w;
vec2 pos = vTexCoord.xy * 2.0 - 1.0;
float b = 0.5;
float v_scale = (1.0 + b) / (1.0 + b * sqrt(1.0 - pos.x*pos.x));
float u = asin(pos.x) / 3.1415 + 0.5;
float v = (pos.y * v_scale / h_ratio) * 0.5 + 0.5;
vec3 texColor = texture2D(sTexture, vec2(u, v)).rgb;
vec4 color = vec4(texColor.rgb, 1.0);
if (vTexCoord.y < w || vTexCoord.y > (1.0-w))
color = vec4(1.0, 0.0, 1.0, 1.0);
else if (v < 0.0 || v > 1.0)
discard;
gl_FragColor = color;
}
If you want to tint the entire area in purple, then you've to set color, instead of discarding the fragments:
if (v < 0.0 || v > 1.0)
color = vec4(1.0, 0.0, 1.0, 1.0);
I'm trying to implement, using SharpDX11, a ray/mesh intersection method using the GPU. I've seen from an older post (Older post) that this can be done using the Compute Shader; but I need help in order to create and define the buffer outside the .hlsl code.
My HLSL code is the following:
struct rayHit
{
float3 intersection;
};
cbuffer cbRaySettings : register(b0)
{
float3 rayFrom;
float3 rayDir;
uint TriangleCount;
};
StructuredBuffer<float3> positionBuffer : register(t0);
StructuredBuffer<uint3> indexBuffer : register(t1);
AppendStructuredBuffer<rayHit> appendRayHitBuffer : register(u0);
void TestTriangle(float3 p1, float3 p2, float3 p3, out bool hit, out float3 intersection)
{
//Perform ray/triangle intersection
//Compute vectors along two edges of the triangle.
float3 edge1, edge2;
float distance;
//Edge 1
edge1.x = p2.x - p1.x;
edge1.y = p2.y - p1.y;
edge1.z = p2.z - p1.z;
//Edge2
edge2.x = p3.x - p1.x;
edge2.y = p3.y - p1.y;
edge2.z = p3.z - p1.z;
//Cross product of ray direction and edge2 - first part of determinant.
float3 directioncrossedge2;
directioncrossedge2.x = (rayDir.y * edge2.z) - (rayDir.z * edge2.y);
directioncrossedge2.y = (rayDir.z * edge2.x) - (rayDir.x * edge2.z);
directioncrossedge2.z = (rayDir.x * edge2.y) - (rayDir.y * edge2.x);
//Compute the determinant.
float determinant;
//Dot product of edge1 and the first part of determinant.
determinant = (edge1.x * directioncrossedge2.x) + (edge1.y * directioncrossedge2.y) + (edge1.z * directioncrossedge2.z);
//If the ray is parallel to the triangle plane, there is no collision.
//This also means that we are not culling, the ray may hit both the
//back and the front of the triangle.
if (determinant == 0)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
float inversedeterminant = 1.0f / determinant;
//Calculate the U parameter of the intersection point.
float3 distanceVector;
distanceVector.x = rayFrom.x - p1.x;
distanceVector.y = rayFrom.y - p1.y;
distanceVector.z = rayFrom.z - p1.z;
float triangleU;
triangleU = (distanceVector.x * directioncrossedge2.x) + (distanceVector.y * directioncrossedge2.y) + (distanceVector.z * directioncrossedge2.z);
triangleU = triangleU * inversedeterminant;
//Make sure it is inside the triangle.
if (triangleU < 0.0f || triangleU > 1.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
//Calculate the V parameter of the intersection point.
float3 distancecrossedge1;
distancecrossedge1.x = (distanceVector.y * edge1.z) - (distanceVector.z * edge1.y);
distancecrossedge1.y = (distanceVector.z * edge1.x) - (distanceVector.x * edge1.z);
distancecrossedge1.z = (distanceVector.x * edge1.y) - (distanceVector.y * edge1.x);
float triangleV;
triangleV = ((rayDir.x * distancecrossedge1.x) + (rayDir.y * distancecrossedge1.y)) + (rayDir.z * distancecrossedge1.z);
triangleV = triangleV * inversedeterminant;
//Make sure it is inside the triangle.
if (triangleV < 0.0f || triangleU + triangleV > 1.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
//Compute the distance along the ray to the triangle.
float raydistance;
raydistance = (edge2.x * distancecrossedge1.x) + (edge2.y * distancecrossedge1.y) + (edge2.z * distancecrossedge1.z);
raydistance = raydistance * inversedeterminant;
//Is the triangle behind the ray origin?
if (raydistance < 0.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
intersection = rayFrom + (rayDir * distance);
hit = true;
}
[numthreads(64, 1, 1)]
void CS_RayAppend(uint3 tid : SV_DispatchThreadID)
{
if (tid.x >= TriangleCount)
return;
uint3 indices = indexBuffer[tid.x];
float3 p1 = positionBuffer[indices.x];
float3 p2 = positionBuffer[indices.y];
float3 p3 = positionBuffer[indices.z];
bool hit;
float3 p;
TestTriangle(p1, p2, p3, hit, p);
if (hit)
{
rayHit hitData;
hitData.intersection = p;
appendRayHitBuffer.Append(hitData);
}
}
While the following is part of my c# implementation but I'm not able to understand how lo load buffers for compute shader.
int count = obj.Mesh.Triangles.Count;
int size = 8; //int+float for every hit
BufferDescription bufferDesc = new BufferDescription() {
BindFlags = BindFlags.UnorderedAccess | BindFlags.ShaderResource,
Usage = ResourceUsage.Default,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.BufferStructured,
StructureByteStride = size,
SizeInBytes = size * count
};
Buffer buffer = new Buffer(device, bufferDesc);
UnorderedAccessViewDescription uavDescription = new UnorderedAccessViewDescription() {
Buffer = new UnorderedAccessViewDescription.BufferResource() { FirstElement = 0, Flags = UnorderedAccessViewBufferFlags.None, ElementCount = count },
Format = SharpDX.DXGI.Format.Unknown,
Dimension = UnorderedAccessViewDimension.Buffer
};
UnorderedAccessView uav = new UnorderedAccessView(device, buffer, uavDescription);
context.ComputeShader.SetUnorderedAccessView(0, uav);
var code = HLSLCompiler.CompileFromFile(#"Shaders\TestTriangle.hlsl", "CS_RayAppend", "cs_5_0");
ComputeShader _shader = new ComputeShader(device, code);
Buffer positionsBuffer = new Buffer(device, Utilities.SizeOf<Vector3>(), ResourceUsage.Default, BindFlags.None, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
context.UpdateSubresource(ref data, positionsBuffer);
context.ComputeShader.Set(_shader);
Inside my c# implementation i'm considering only one ray (with its origin and direction) and I would like to use the shader to check the intersection with all the triangles of the mesh. I'm already able to do that using the CPU but for 20k+ triangles the computation took too long even if i'm already using parallel coding.
I just realized that a global variable in Unity must be initialized in a function. Being new to shaders, I can't tell if I am doing this the wrong way.
For example, when I define a const variable:
const float PI = 3.14159265359;
then try to write a code that uses in the frag functiion:
fixed4 frag(v2f i) : SV_Target
{
float result = PI * otherVariable;
///Use the result value...
}
It doesn't work. I gets a black screen as the result or the expected result wont show up.
The confusing part is that when I re-initialize the global variable (PI) again in the frag function, I get the expected result.
For example, this is what works:
float PI = 3.14159265359;
then try to write a code that uses in the frag function:
fixed4 frag(v2f i) : SV_Target
{
//re-initialize
PI = 3.14159265359;
float result = PI * otherVariable;
}
I have so many constant global variables that I can't keep re-initiated them.
One complete example of this problem is this Shadertoy code I ported.
Shader "Unlit/Atmospheric Scattering 2"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
//https://www.shadertoy.com/view/lslXDr
// math const
float PI = 3.14159265359;
float DEG_TO_RAD = (3.14159265359 / 180.0);
float MAX = 10000.0;
// scatter const
float K_R = 0.166;
float K_M = 0.0025;
float E = 14.3;
float C_R = float3(0.3, 0.7, 1.0); // 1 / wavelength ^ 4
float G_M = -0.85; // Mie g
float R = 1.0;
float R_INNER = 0.7;
float SCALE_H = 4.0 / (1.0 - 0.7);
float SCALE_L = 1.0 / (1.0 - 0.7);
int NUM_OUT_SCATTER = 10;
float FNUM_OUT_SCATTER = 10.0;
int NUM_IN_SCATTER = 10;
float FNUM_IN_SCATTER = 10.0;
// angle : pitch, yaw
float3x3 rot3xy(float2 angle) {
float2 c = cos(angle);
float2 s = sin(angle);
return float3x3(
c.y, 0.0, -s.y,
s.y * s.x, c.x, c.y * s.x,
s.y * c.x, -s.x, c.y * c.x
);
}
// ray direction
float3 ray_dir(float fov, float2 size, float2 pos) {
float2 xy = pos - size * 0.5;
float cot_half_fov = tan((90.0 - fov * 0.5) * DEG_TO_RAD);
float z = size.y * 0.5 * cot_half_fov;
return normalize(float3(xy, -z));
}
// ray intersects sphere
// e = -b +/- sqrt( b^2 - c )
float2 ray_vs_sphere(float3 p, float3 dir, float r) {
float b = dot(p, dir);
float c = dot(p, p) - r * r;
float d = b * b - c;
if (d < 0.0) {
return float2(MAX, -MAX);
}
d = sqrt(d);
return float2(-b - d, -b + d);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float phase_mie(float g, float c, float cc) {
float gg = g * g;
float a = (1.0 - gg) * (1.0 + cc);
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt(b);
b *= 2.0 + gg;
//b = mul(b,sqrt(b));
//b = mul(b,2.0 + gg);
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float phase_reyleigh(float cc) {
return 0.75 * (1.0 + cc);
}
float density(float3 p) {
return exp(-(length(p) - R_INNER) * SCALE_H);
}
float optic(float3 p, float3 q) {
float3 step = (q - p) / FNUM_OUT_SCATTER;
float3 v = p + step * 0.5;
float sum = 0.0;
for (int i = 0; i < NUM_OUT_SCATTER; i++) {
sum += density(v);
v += step;
}
sum *= length(step) * SCALE_L;
//sum = mul(sum,length(step) * SCALE_L);
return sum;
}
float3 in_scatter(float3 o, float3 dir, float2 e, float3 l) {
float len = (e.y - e.x) / FNUM_IN_SCATTER;
float3 step = dir * len;
float3 p = o + dir * e.x;
float3 v = p + dir * (len * 0.5);
float3 sum = float3(0.,0.,0.);
for (int i = 0; i < NUM_IN_SCATTER; i++) {
float2 f = ray_vs_sphere(v, l, R);
float3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u)) * (PI * 4.0);
sum += density(v) * exp(-n * (K_R * C_R + K_M));
v += step;
}
sum *= len * SCALE_L;
//sum = mul(sum,len * SCALE_L);
float c = dot(dir, -l);
float cc = c * c;
return sum * (K_R * C_R * phase_reyleigh(cc) + K_M * phase_mie(G_M, c, cc)) * E;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
fixed4 frag(v2f i) : SV_Target
{
//Re-initialize BEGIN
// math const
PI = 3.14159265359;
DEG_TO_RAD = (3.14159265359 / 180.0);
MAX = 10000.0;
// scatter const
K_R = 0.166;
K_M = 0.0025;
E = 14.3;
C_R = float3(0.3, 0.7, 1.0); // 1 / wavelength ^ 4
G_M = -0.85; // Mie g
R = 1.0;
R_INNER = 0.7;
SCALE_H = 4.0 / (1.0 - 0.7);
SCALE_L = 1.0 / (1.0 - 0.7);
NUM_OUT_SCATTER = 10;
FNUM_OUT_SCATTER = 10.0;
NUM_IN_SCATTER = 10;
FNUM_IN_SCATTER = 10.0;
//Re-initialize END
float4 fragColor = 0;
float2 fragCoord = i.vertex.xy;
// default ray dir
float3 dir = ray_dir(45.0, _ScreenParams.xy, fragCoord.xy);
// default ray origin
float3 eye = float3(0.0, 0.0, 2.4);
// rotate camera
float3x3 rot = rot3xy(float2(0.0, _Time.y * 0.5));
/* dir = rot * dir;
eye = rot * eye;*/
dir = mul(rot,dir);
eye = mul(rot,eye);
// sun light dir
float3 l = float3(0, 0, 1);
float2 e = ray_vs_sphere(eye, dir, R);
if (e.x > e.y) {
discard;
}
float2 f = ray_vs_sphere(eye, dir, R_INNER);
e.y = min(e.y, f.x);
float3 I = in_scatter(eye, dir, e, l);
fragColor = float4(I, 1.0);
return fragColor;
}
ENDCG
}
}
}
This shader is attached to a Material that is then attached to the camera with the code below:
[ExecuteInEditMode]
public class CameraEffect : MonoBehaviour
{
public Material mat;
// Called by camera to apply image effect
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (mat != null)
{
Graphics.Blit(source, destination, mat);
}
else
{
Graphics.Blit(source, destination);
}
}
}
It works fine but notice how I had to re-initialize the PI, DEG_TO_RAD, MAX and other global variables...Without doing that it won't work.The screen is simply black and this is not the first shader that has caused this same issue.
Why is this happening?
Am I declaring the variables the wrong way?
Hi!
Any constants you can write like this:
//float my_constant = 104.3;
#define my_constant 104.3f
You can create custom library for shaders and use variables and functions as you wish. For example create file with name MyConstants.cginc and put in in your project. Code:
#ifndef MY_CONSTANTS_INCLUDED
#define MY_CONSTANTS_INCLUDED
#define DEG_TO_RAD 0.01745329251994f
#define MAX 10000.0f
#define PI 3.14159265359f
//scatter const
// .....
// .....
float3 ray_dir(float fov, float2 size, float2 pos) {
float2 xy = pos - size * 0.5;
float cot_half_fov = tan((90.0 - fov * 0.5) * DEG_TO_RAD);
float z = size.y * 0.5 * cot_half_fov;
return normalize(float3(xy, -z));
}
//.... and other methods
#endif
And Using library in your shader
//.....
#include "UnityCG.cginc"
#include "MyConstants.cginc"
//.....
I'm using a simple attenuation algorithm to darken walls based on their distance from light sources.
The end goal is to develop a light-mapping system in which the brightness for each wall is calculated in a pre-pass (including shadowing from other walls), and then that light-map image is blended with the wall texture.
+ =
Besides shadowing, I have the light-maps working, and the result replicate the shader code exactly. The problem is it is slow, and adding raycasted shadow checking is only going to make it worse.
My question is this, how can I perform these calculations on the GPU? Is a third party library/module required, or can it be done natively through OpenGL (OpenTK in my case)?
Alternatively, I'd be happy to switch to deferred rendering/lighting with cube shadow mapping but I'm yet to come across any information I can get my head around.
c# lightmap (run once for each wall)
public void createLightMap()
{
// Determine Light Map dimensions
int LightMapSize = 300;
int w = (int)(this.Width * LightMapSize);
int h = (int)(this.Height * LightMapSize);
// Create Bitmap
Bitmap bitmap = new Bitmap(w, h);
// Fragment testing
Vector3 fragmentPosition = new Vector3(this.x2, this.Height, this.z2);
float xIncement = (1f / LightMapSize) * ((x2 - x) / this.Width);
float zIncement = (1f / LightMapSize) * ((z2 - z) / this.Width);
float yIncement = (1f / LightMapSize);
// Calculate Light value for each pixel
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++)
{
// Update fragment position
fragmentPosition.X = this.x2 - xIncement -(xIncement * x);
fragmentPosition.Z = this.z2 - (zIncement * x);
fragmentPosition.Y = this.Height - (yIncement * y);
Vector3 totalDiffuse = Vector3.Zero;
// Iterate through the lights
for (int n = 0; n < 2; n++)
{
Light light = Game.lights[n];
Vector3 LightPosition = new Vector3(light.Position);
Vector3 Attenuation = new Vector3(light.Attenuation);
Vector3 Colour = new Vector3(light.Colour);
Vector3 toLightVector = LightPosition - fragmentPosition;
// Return early if wall is facing away from light
if (Vector3.Dot(this.normalVector, toLightVector.Normalized()) < 0)
continue;
// Calculate vector length (aka, distance from lightsource)
float distance = (float)Math.Sqrt(toLightVector.X * toLightVector.X + toLightVector.Y * toLightVector.Y + toLightVector.Z * toLightVector.Z);
// Attenuation
float attFactor = Attenuation.X + (Attenuation.Y * distance) + (Attenuation.Z * distance * distance);
Vector3 diffuse = Colour / attFactor;
totalDiffuse += diffuse;
}
// Create bitmap
var r = (int)(totalDiffuse.X * 256);
var g = (int)(totalDiffuse.Y * 256);
var b = (int)(totalDiffuse.Z * 256);
r = Math.Min(r, 255);
g = Math.Min(g, 255);
b = Math.Min(b, 255);
// Set Pixel
bitmap.SetPixel(x, y, Color.FromArgb(r, g, b));
}
}
this.LightMapTextureID = Texture.loadImage(bitmap);
}
Fragment shader (an alternative to above light-mapping, creating the same effect)
#version 330
precision highp float;
in vec2 frag_texcoord;
in vec3 toLightVector[8];
uniform sampler2D MyTexture0;
uniform vec3 LightColour[8];
uniform vec3 LightAttenuation[8];
uniform float NumberOfLights;
out vec4 finalColor;
void main(void)
{
vec3 totalDiffuse;
for (int i=0; i<NumberOfLights; i++) {
{
float distance = length(toLightVector[i]);
float attFactor = LightAttenuation[i].x + (LightAttenuation[i].y * distance) + (LightAttenuation[i].z * distance * distance);
vec3 diffuse = (LightColour[i]) / attFactor;
totalDiffuse += diffuse;
}
finalColor = vec4(totalDiffuse, 1.0) * texture(MyTexture0, frag_texcoord)
}
}
I'm writing a game using the XNA 4.0 framework. I've written a set of methods that translates the 2D mouse coordinates to a line in the 3d world, then checks to see if that line intersects a plane, and if the intersection point is within the bounds of a face in that plane.
The math works, but for some reason when I do these calculations over 500 times a frame it brings the program to a halt. I can watch the memory usage climb from starting at 15 MB to about 130 MB before garbage collection decides to clean things up. I know specifically it is in this code because when I comment it out, everything else runs smoothly.
I'll paste my code below, any insight would be helpful and thank you!
The Loop:
GraphicObject me = new GraphicObject();
Intersection intersect;
double? dist = null;
foreach (GraphicObject obj in GraphicObjects)
{
intersect = obj.intersectMe(line);
if (intersect.Distance != null)
{
if (intersect.Distance < dist || dist == null)
{
dist = intersect.Distance;
me = obj;
}
else
{
obj.Highlight(false);
}
}
else
{
obj.Highlight(false);
}
}
if (dist != null)
{
me.Highlight(true);
}
intersectMe:
public override Intersection intersectMe(Ray _line)
{
GraphicHelper.Intersects(_line, rect.Vertices[0].Normal, rect.Vertices[0].Position, intersect);
if (intersect.Distance != null)
{
if (!rect.PointOnMe(intersect.X - position.X, intersect.Y - position.Y, intersect.Z - position.Z))
{
intersect.Distance = null;
}
}
return intersect;
}
GraphicsHelper.Intersects:
// _l = line, _n = normal to plane, _p = point on the plane
public static void Intersects(Ray _l, Vector3 _n, Vector3 _p, Intersection _i)
{
_i.Distance = null;
float num = (_n.X * (_p.X - _l.Position.X) + _n.Y * (_p.Y - _l.Position.Y) + _n.Z * (_p.Z - _l.Position.Z));
float denom = (_n.X * _l.Direction.X + _n.Y * _l.Direction.Y + _n.Z * _l.Direction.Z);
if (denom != 0 && num != 0)
{
float t = num / denom;
if (t > 0)
{
_i.X = _l.Position.X + _l.Direction.X * t;
_i.Y = _l.Position.Y + _l.Direction.Y * t;
_i.Z = _l.Position.Z + _l.Direction.Z * t;
_i.Distance = _i.X * _i.X + _i.Y * _i.Y + _i.Z * _i.Z;
}
}
}
PointOnMe:
public bool PointOnMe(float _x, float _y, float _z)
{
float ex = _x - Vertices[3].Position.X;
float ey = _y - Vertices[3].Position.Y;
float ez = _z - Vertices[3].Position.Z;
float ae = a.X * ex + a.Y * ey + a.Z * ez;
float be = b.X * ex + b.Y * ey + b.Z * ez;
ex = _x - Vertices[1].Position.X;
ey = _y - Vertices[1].Position.Y;
ez = _z - Vertices[1].Position.Z;
float ce = c.X * ex + c.Y * ex + c.Z * ez;
float de = d.X * ex + d.Y * ey + d.Z * ez;
if (ae > 0 && be > 0 && ce > 0 && de > 0)
{
return true;
}
else
{
return false;
}
}
Thank you all for taking some time to look at this for me. The error was actually in how I handle obj.Highlight(), TaW's kick in the butt to get a profiler setup helped me to figure that out.
public override void Highlight(bool toggle)
{
if(toggle)
{
rect.Texture = new Texture2D(GraphicsManager.Graphics.GraphicsDevice, 1, 1);
rect.Texture.SetData<Color>(new Color[] { Color.Yellow });
}
else
{
rect.Texture = new Texture2D(GraphicsManager.Graphics.GraphicsDevice, 1, 1);
rect.Texture.SetData<Color>(new Color[] { squareColor });
}
}
Every frame all the obj's were having new textures generated. A terrible way to do things.