Where does unity discard vertices before shader call? - c#

I'm trying to make a game with periodic boundary conditions (so basically if you are on the right edge of the map, and you look right, you should see the whole map again).
I made a geometry shader which takes one triangle and outputs the 27 triangles i need (3*3*3 periodic box), and it works perfectly while the base object is on the screen. As soon as the base object leaves the screen all the copies disappear too.
So i think unity does not even call my shaders on the vertices which is behind my camera (which is totally fine for optimization), but with my current solution i need a call on every object of the game.
Is it possible to force unity not to discard any objects before rendering, or should i look for a different solution? If i have to do something else, do you have any ideas?
Here's my current code. (I'm new with shaders, so it might be stupid...)
Shader "Unlit/Geom_shader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma geometry geom
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD0;
};
struct v2g {
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
struct g2f {
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
sampler2D _MainTex;
float4 _MainTex_ST;
v2g vert (appdata v)
{
v2g o;
o.vertex = v.vertex;
o.uv = v.uv;
return o;
}
[maxvertexcount(3*3*3*3)]
void geom(triangle v2g input[3], inout TriangleStream<g2f> tristream) {
g2f o = (g2f)0;
for (int x = 0; x < 3; x++)
{
float x_shift = (x - 1) * 2*_SinTime[2];
for (int y = 0; y < 3; y++)
{
float y_shift = (y - 1) * 2 * _SinTime[2];
for (int z = 0; z < 3; z++)
{
float z_shift = (z - 1) * 2 * _CosTime[2];
for (int i = 0; i < 3; i++)
{
o.vertex = UnityObjectToClipPos(input[i].vertex + float4(x_shift, y_shift, z_shift, 0));
o.uv = TRANSFORM_TEX(input[i].uv, _MainTex);
tristream.Append(o);
}
tristream.RestartStrip();
}
}
}
}
fixed4 frag (g2f i) : SV_Target
{
fixed4 col = tex2D(_MainTex, i.uv);
return col;
}
ENDCG
}
}
}
I'm not trying to use it for lights but for whole objects and i'm sure they are handled differently. So the reason why it is happening is the same, but the solution must be different. (Why it's not a duplicate of: Unity3D - Light deactivated when facing opposite direction )

It's not the vertices being discarded, it's the entire mesh being culled as a whole.
Unity implements View Frustum Culling based on the bounding box of your mesh. You can change the bounding box of a mesh manually by assigning a new value to Mesh.bounds.

Related

Can a shader rotate shapes to face camera?

I made a scene where balls appear in 3D space. Triangle balls spend a lot of resources. So I did this using 2d surfaces (quad) with a ball texture. But now I need to adjust the direction of the shape every time the camera moves. I do this using position transformation and the LookAt method. The question is can I optimize this? If it is possible to rotate the shapes with shader, this will greatly help.
using UnityEngine;
public class WorldSurf : MonoBehaviour
{
GameObject[] matrix;
int xSize = 20;
int ySize = 20;
int zSize = 20;
// Start is called before the first frame update
void Start()
{
matrix = new GameObject[xSize * ySize * zSize];
//var shader = Shader.Find("Legacy Shaders/Diffuse");
var shader = Shader.Find("Sprites/Default");
//var texture = Resources.Load<Texture>("Textures/Ball_01");
var i = 0;
for (var x = 0f; x < xSize; ++x)
{
for (var y = 0f; y < ySize; ++y)
{
for (var z = 0f; z < zSize; ++z)
{
var texture = Resources.Load<Texture>("Textures/Ball_" + ((int)Random.Range(0, 15)).ToString("00"));
matrix[i++] = CreateQuad(x * 3, y * 3, z * 3, shader, texture);
}
}
}
}
static GameObject CreateQuad(float x, float y, float z, Shader shader, Texture texture)
{
var quad = GameObject.CreatePrimitive(PrimitiveType.Quad);
quad.transform.position = new Vector3(x, y, z);
quad.transform.forward = Camera.main.transform.forward;
var rend = quad.GetComponent<Renderer>();
rend.material.shader = shader;
rend.material.mainTexture = texture;
//rend.material.color = Color.red;
return quad;
}
// Update is called once per frame
void Update()
{
var pos = Camera.main.transform.position;
foreach (var itm in matrix)
{
itm.transform.LookAt(pos);
}
}
}
Generally yes, and in this specific case where you want a quad to align with the camera makes doing so very easy.
What you want is called a "billboard shader". Here is an example from Wikibooks:
Shader "Cg shader for billboards" {
Properties {
_MainTex ("Texture Image", 2D) = "white" {}
_ScaleX ("Scale X", Float) = 1.0
_ScaleY ("Scale Y", Float) = 1.0
}
SubShader {
Tags {"Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent"}
ZWrite Off
Blend SrcAlpha OneMinusSrcAlpha
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// User-specified uniforms
uniform sampler2D _MainTex;
uniform float _ScaleX;
uniform float _ScaleY;
struct vertexInput {
float4 vertex : POSITION;
float4 tex : TEXCOORD0;
};
struct vertexOutput {
float4 pos : SV_POSITION;
float4 tex : TEXCOORD0;
};
vertexOutput vert(vertexInput input)
{
vertexOutput output;
output.pos = mul(UNITY_MATRIX_P,
mul(UNITY_MATRIX_MV, float4(0.0, 0.0, 0.0, 1.0))
+ float4(input.vertex.x, input.vertex.y, 0.0, 0.0)
* float4(_ScaleX, _ScaleY, 1.0, 1.0));
output.tex = input.tex;
return output;
}
float4 frag(vertexOutput input) : COLOR
{
return tex2D(_MainTex, float2(input.tex.xy));
}
ENDCG
}
}
}
And an explanation of how this works:
The basic idea is to transform only the origin ( 0 , 0 , 0 , 1 )
of the object
space to view space with the standard model-view transformation
UNITY_MATRIX_MV. (In homogeneous coordinates all points have a 1 as
fourth coordinate; see the discussion in Section “Vertex
Transformations”.) View space is just a rotated version of world space
with the xy plane parallel to
the view plane as discussed in Section “Vertex Transformations”. Thus,
this is the correct space to construct an appropriately rotated
billboard. We subtract the x y object coordinates (vertex.x and vertex.y) from the transformed
origin in view coordinates and then transform the result with the
projection matrix UNITY_MATRIX_P.
This will produce an output like this:

Black hole distortion shader in Unity

I found shader code that has the effect of warping a space around a certain point. It's a cool effect, but it's missing some animation, so I've added something to it:
Shader "Marek/BlackHoleDistortion"
{
Properties {
_DistortionStrength ("Distortion Strength", Range(0, 10)) = 0
_Timer("Timer", Range(0, 10)) = 0
_HoleSize ("Hole Size", Range(0, 1)) = 0.1736101
_HoleEdgeSmoothness ("Hole Edge Smoothness", Range(1, 4)) = 4
_ObjectEdgeArtifactFix ("Object Edge Artifact Fix", Range(1, 10)) = 1
}
SubShader {
Tags {
"IgnoreProjector"="True"
"Queue"="Transparent"
"RenderType"="Transparent"
}
GrabPass{ }
Pass {
Name "FORWARD"
Tags {
"LightMode"="ForwardBase"
}
ZWrite Off
CGPROGRAM
#include "UnityCG.cginc"
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fwdbase
#pragma only_renderers d3d9 d3d11 glcore gles
#pragma target 3.0
uniform sampler2D _GrabTexture;
uniform float _DistortionStrength;
uniform float _HoleSize;
uniform float _HoleEdgeSmoothness;
uniform float _ObjectEdgeArtifactFix;
uniform float _Timer;
struct VertexInput {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct VertexOutput {
float4 pos : SV_POSITION;
float4 posWorld : TEXCOORD0;
float3 normalDir : TEXCOORD1;
float4 projPos : TEXCOORD2;
};
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.normalDir = UnityObjectToWorldNormal(v.normal);
o.posWorld = mul(unity_ObjectToWorld, v.vertex);
o.pos = UnityObjectToClipPos(v.vertex);
o.projPos = ComputeScreenPos(o.pos);
COMPUTE_EYEDEPTH(o.projPos.z);
return o;
}
float4 frag(VertexOutput i) : COLOR {
i.normalDir = normalize(i.normalDir);
float3 viewDirection = normalize(_WorldSpaceCameraPos.xyz - i.posWorld.xyz);
float3 normalDirection = i.normalDir;
float2 sceneUVs = (i.projPos.xy / i.projPos.w);
float node_9892 = (_HoleSize * -1.0 + 1.0);
float node_3969 = (1.0 - pow(1.0 - max(0, dot(normalDirection, viewDirection)), clamp(_DistortionStrength - _Timer, 0, _DistortionStrength)));
float node_9136 = (length(float2(ddx(node_3969), ddy(node_3969))) * _HoleEdgeSmoothness);
float node_4918 = pow(node_3969, 6.0);
float node_1920 = (1.0 - smoothstep((node_9892 - node_9136), (node_9892 + node_9136), node_4918));
float3 finalColor = (
lerp(
float4(node_1920, node_1920, node_1920, node_1920),
float4(1, 1, 1, 1),
pow(
pow(1.0 - max(0, dot(normalDirection, viewDirection)), 1.0),
_ObjectEdgeArtifactFix
)
) * tex2D(_GrabTexture, ((node_4918 * (sceneUVs.rg * _Time * -2.0 + 1.0)) + sceneUVs.rg)).rgb).rgb;
return fixed4(finalColor, 1);
}
ENDCG
}
}
FallBack "Diffuse"
}
Now, the problem is that in order to make the distortion disappear after certain time, I need to include some variable into the equation - here I'm calling it _Timer. I'm not using the _Time built in because of obvious reasons - it's an ever growing value and I need something that starts from 0 each time the object using this shader is made active. C# code handling passing that parameter looks as follows:
public void Update() {
_timeElapsed += Time.deltaTime;
_renderer.material.SetFloat("_Timer", _timeElapsed);
}
The question is - can I do it better? I would like this shader's code to be more of a self-contained thing - without the need to pass parameters from cs script to it.
Can I do it better?
In-short, yes and no. If you want the shader to behave differently per material you simply cannot avoid passing a property from C#. You can however avoid doing this in Update by passing a start time and computing the elapse time in the shader.
C#
void OnEnable ()
{
_renderer.material.SetFloat("_StartTime", Time.timeSinceLevelLoad);
}
Shader
uniform float _StartTime;
float4 frag(VertexOutput i) : COLOR
{
float elapse = _Time.y - _StartTime;
}
Now, although this will tie directly into the setup you are currently using, it should be noted that accessing the .material property will clone the material (which can break batching, among other things).
This can be avoided with the more recent introduction of MaterialPropertyBlocks.
Unity provides a handful of built-in values for your shaders: things like current object’s transformation matrices, time etc.
You just use them in ShaderLab like you’d use any other property, the only difference is that you don’t have to declare it somewhere - they are “built in”.
https://docs.unity3d.com/455/Documentation/Manual/SL-BuiltinValues.html
there is a clever way of giving you 4 variations of the value, potentially saving you a multiply operation by re-using the pre-multiplied value for every pixel being rendered. There are 4 values available.
_Time.x = time / 20
_Time.y = time
_Time.z = time * 2
_Time.w = time * 3
this is a simple example that show you how it works:
Shader "Example/Circle"
{
Properties
{
}
SubShader
{
Cull Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
float circle(in float2 _st, in float _radius){
float2 dist = distance(_st,float2(0.5,0.5));
float result = step(dist,_radius);
return result;
}
fixed4 frag (v2f i) : SV_Target
{
float WaveTime = sin(_Time.z);
float3 color = float3(1,1,1)*circle(i.uv,WaveTime);
return float4( color, 1.0 );
}
ENDCG
}
}
}
In comments you mentioned that you want reset time value when you enable it , so here you need to Initialize Time value with script.
so you should use your own Time In shader:
Properties
{
_Timer("Timer",Float) = 0
}
float WaveTime = sin(_Timer);
using System.Collections;
using UnityEngine;
public class Circle : MonoBehaviour {
public float _timeElapsed;
void OnEnable(){
_timeElapsed = 0;
}
public void Update() {
_timeElapsed += Time.deltaTime;
var _renderer = GetComponent<MeshRenderer>();
_renderer.material.SetFloat("_Timer", _timeElapsed);
}
}

Unity WebGL ok in editor, pink screen in build

I am building a Polygon Tool using Unity 5.5 and WebGL, which I would like to use to crop a selection from an image. The project is using a shader file to define the points and area which make up the polygon. You can see the code for the shader below, as well as the code file.
The problem I am currently experiencing is that the project works fine in the Editor, but the WebGL build shows up pink in the browser. The polygon shader doesn't render, if I set a Fallback shader in the file it loads that one. I've looked up the issue online and I found and tried the following:
Reimport all assets, as something may be missing. Check all game objects in the hierarchy for broken references.
Make sure the shaders are in the ‘Always Included Shaders’ list in Project Graphics Settings.
Potential memory leak? I increased the number of MB allocated to the project to 1024, lowered the size of the textures by compressing them and also ran the Profiler in the Editor. The Profiler recorded a max total use of ~220MB.
Toggled the Quality Settings - I discovered that the Shader does not work if Anti-Aliasing is not on.
Graphics card not supporting the shader? People online have reported similar problems on OS X (which I am also using), but they have not updated their queries.
I should note that there are no errors or warnings in the project. I am unsure where the problem lays and would truly appreciate any take on this issue. Please let me know if there is any further information that I can provide which could ease the process! Thank you in advance for your help :)
Polygon.cs file:
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
[ExecuteInEditMode]
public class Polygon : MonoBehaviour {
public Material mat; //Binding materials; Polygon shader applied to it.
Vector3[] worldPos; //Store 3D coordinates
Vector4[] screenPos; //Store to draw the polygon vertices in screen coordinates
int maxPointNum = 6; //Total number of polygon vertices
int currentpointNum = 0; //Current number of points placed
int pointNum2Shader = 0; //Transfer number of vertices to shader
bool InSelection = true; //Vertex gets is in the process
void Start() {
worldPos = new Vector3[maxPointNum];
screenPos = new Vector4[maxPointNum];
}
void Update() {
mat.SetVectorArray("Value", screenPos); //Pass the vertex position information to the screen shader.
mat.SetInt("PointNum", pointNum2Shader); //Transfer number of vertices to shader
//Cameras fired a ray to get to select a 3D location
Ray ray = Camera.main.ScreenPointToRay(Input.mousePosition);
RaycastHit hit;
if (Physics.Raycast(ray, out hit, 100)) {
Debug.DrawLine(ray.origin, hit.point);
}
//Use mouse clicks to obtain location information
if (Input.GetMouseButtonDown(0) && InSelection) {
if (currentpointNum < maxPointNum) {
currentpointNum++;
pointNum2Shader++;
worldPos[currentpointNum - 1] = hit.point;
//Vector3 v3 = Camera.main.ScreenToWorldPoint (worldPos [currentpointNum-1]);
Vector3 v3 = Camera.main.WorldToScreenPoint(worldPos[currentpointNum - 1]);
screenPos[currentpointNum - 1] = new Vector4(v3.x, v3.y, v3.z, 0);
}
else {
InSelection = false;
}
}
//Updated in real time screens of 3D point selected location
for (int i = 0; i < maxPointNum; i++) {
Vector3 v3 = Camera.main.WorldToScreenPoint(worldPos[i]);
screenPos[i] = new Vector4(v3.x, v3.y, v3.z, 0);
}
//Detect if 3d point is behind the camera, if you stop drawing
for (int i = 0; i < currentpointNum; i++) {
if (Vector3.Dot(worldPos[i] - Camera.main.transform.position, Camera.main.transform.forward) <= 0) {
pointNum2Shader = 0;
break;
}
pointNum2Shader = currentpointNum;
}
}
//Grab the current rendering of image processing
void OnRenderImage(RenderTexture src, RenderTexture dest) {
Graphics.Blit(src, dest, mat);
}
void OnGUI () {
float btnWidth = 100;
float btnHeight = 50;
float y = 150;
if (GUI.Button(new Rect(100, y, btnWidth, btnHeight), "Crop")) {
Vector2[] vertices2D = new Vector2[maxPointNum];
for (int i = 0; i < currentpointNum; i++) {
vertices2D[i].x = worldPos[i].x;
vertices2D[i].y = worldPos[i].y;
}
// Use the triangulator to get indices for creating triangles
Triangulator tr = new Triangulator(vertices2D);
int[] indices = tr.Triangulate();
GameObject plane = GameObject.Find("Plane");
MeshFilter filter = plane.GetComponent<MeshFilter>();
Vector3[] vertices = filter.mesh.vertices;
// Vector2[] texCoords = new Vector2[currentpointNum];
// for(int i = 0; i < currentpointNum; i++) {
// //There should be as many texture coordinates as vertices.
// //This example does not support textures, so fill with zeros
// texCoords[i] = new Vector2(vertices[i].x, vertices[i].y);
// }
// Create the mesh
Mesh msh = new Mesh();
msh.vertices = worldPos;
msh.triangles = indices;
msh.RecalculateNormals();
msh.RecalculateBounds();
filter.mesh = msh;
}
}
}
Polygon.shader file:
Shader "Unlit/polygon"
{
Properties
{
//Define basic properties can be set from inside the editor variable
//_MainTex ("Texture", 2D) = "white" {}
}
CGINCLUDE
// Upgrade NOTE: excluded shader from DX11 because it uses wrong array syntax (type[size] name)
#pragma exclude_renderers d3d11
//Incoming vertices function from the application data structure definitions
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
//Incoming segment from a vertex function from the data structure definitions
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
//Define mapping variables
sampler2D _MainTex;
// float4 _MainTex_ST;
//Define variables for communicating with the script
uniform float4 Value[6];
int PointNum = 0;
//Function that calculates the distance between two points
float Dis(float4 v1,float4 v2)
{
return sqrt(pow((v1.x-v2.x),2)+pow((v1.y-v2.y),2));
}
//Draw line segments
bool DrawLineSegment(float4 p1, float4 p2, float lineWidth,v2f i)
{
float4 center = float4((p1.x+p2.x)/2,(p1.y+p2.y)/2,0,0);
//Calculate distance between point and line
float d = abs((p2.y-p1.y)*i.vertex.x + (p1.x - p2.x)*i.vertex.y +p2.x*p1.y -p2.y*p1.x )/sqrt(pow(p2.y-p1.y,2) + pow(p1.x-p2.x,2));
//When less than or equal to half the line width, which belongs to the linear range, return true
float lineLength = sqrt(pow(p1.x-p2.x,2)+pow(p1.y-p2.y,2));
if(d<=lineWidth/2 && Dis(i.vertex,center)<lineLength/2)
{
return true;
}
return false;
}
//To draw a polygon, this limits the number of vertices is not more than 6. You can change.
bool pnpoly(int nvert, float4 vert[6], float testx, float testy)
{
int i, j;
bool c=false;
float vertx[6];
float verty[6];
for(int n=0;n<nvert;n++)
{
vertx[n] = vert[n].x;
verty[n] = vert[n].y;
}
for (i = 0, j = nvert-1; i < nvert; j = i++) {
if ( ((verty[i]>testy) != (verty[j]>testy)) && (testx < (vertx[j]-vertx[i]) * (testy-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
c = !c;
}
return c;
}
v2f vert (appdata v)
{
v2f o;
//Object vertices from model space to the camera cut space, or you can use the shorthand way:
//o.vertex = UnityObjectToClipPos(v.vertex);
o.vertex = mul(UNITY_MATRIX_MVP,v.vertex);
//2D UV coordinate transformation can also use shorthand methods
//o.uv = TRANSFORM_TEX(v.uv, _MainTex);
//o.uv = v.uv.xy * _MainTex_ST.xy + _MainTex_ST.zw;
return o;
}
fixed4 frag (v2f i) : SV_Target
{
//Draw a polygon vertex
for(int j=0;j<PointNum;j++)
{
if(Dis(i.vertex, Value[j])<3)
{
return fixed4(1,0,0,0.5);
}
}
//Draws the edges of the polygon
for(int k=0;k<PointNum;k++)
{
if(k==PointNum-1)
{
if(DrawLineSegment(Value[k],Value[0],2,i))
{
return fixed4(1,1,0,0.5);
}
}
else
{
if(DrawLineSegment(Value[k],Value[k+1],2,i))
{
return fixed4(1,1,0,0.5);
}
}
}
//Within the filled polygon
if(pnpoly(PointNum, Value,i.vertex.x ,i.vertex.y))
{
return fixed4(0,1,0,0.3);
}
return fixed4(0,0,0,0);
//fixed4 col = tex2D(_MainTex, i.uv);
//return col;
}
ENDCG
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
//Select Alpha blend mode
Blend SrcAlpha OneMinusSrcAlpha
//In the CGPROGRAM block of code to write your own processes
CGPROGRAM
//Defined segment function entry and vertex function respectively, Vert and Frag
#pragma vertex vert
#pragma fragment frag
//Contains the basic files, there are some macro definitions and basic functions
#include "UnityCG.cginc"
ENDCG
}
}
}
Pink appearance means your build has missing material assignments on the mesh renderer component or missing shader in the build.
If you are assigning materials or shaders at runtime, make sure you include the shader in the build.
You can force unity to include any shader by adding it to the list in Edit/Project Settings/Graphics

Volume Ray Marching rendered always on top of the other objects

I have a really big problem that has been bothering me for so long and I can't seem to find the solution. I have downloaded this project https://github.com/brianasu/unity-ray-marching/tree/volumetric-textures (Unitypackage with my project here https://dl.dropboxusercontent.com/u/27758186/ApplicationVolume.unitypackage ) , that is about volume rendering.
The problem is that, as you can see, the volume renders ALWAYS in front of everything else (try placing a cube in front of the cube with the volume). I have tried a lot of things but none seems to work.
I think it might be an issue of the shaders used. In the main Camera, a RayMarching script is attached, which contains an OnRenderImage method, which creates a new Camera (although disabled), and renders the volume.
I don't know if it is a shader issue, then, or more like a camera issue (is rendering with replaced shaders). I attach my current project for testing so you don't have to download from git, and I just want the volume (head), to appear behind the rectangles (not seen) when it's actually behind, and in front when it is, pretty much like an standard geometry, but it appears always on top...
Any help or suggestion would be GREATLY appreciated, I'm kind of desperate as anything works and I'm pretty sure it is a fairly easy issue.
The code of the shader of the Ray Marching is as follows. Should it do some kind of ZTesting to not show the fragments that are covered by any other object?
Shader "Hidden/Ray Marching/Ray Marching"
{
CGINCLUDE
#include "UnityCG.cginc"
#pragma target 3.0
#pragma profileoption MaxLocalParams=1024
#pragma profileoption NumInstructionSlots=4096
#pragma profileoption NumMathInstructionSlots=4096
struct v2f {
float4 pos : POSITION;
float2 uv[2] : TEXCOORD0;
};
sampler3D _VolumeTex;
float4 _VolumeTex_TexelSize;
sampler2D _FrontTex;
sampler2D _BackTex;
float4 _LightDir;
float4 _LightPos;
float _Dimensions;
float _Opacity;
float4 _ClipDims;
float4 _ClipPlane;
v2f vert( appdata_img v )
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv[0] = v.texcoord.xy;
o.uv[1] = v.texcoord.xy;
#if SHADER_API_D3D9
if (_MainTex_TexelSize.y < 0)
o.uv[0].y = 1-o.uv[0].y;
#endif
return o;
}
#define TOTAL_STEPS 128.0
#define STEP_CNT 128
#define STEP_SIZE 1 / 128.0
half4 raymarch(v2f i, float offset)
{
float3 frontPos = tex2D(_FrontTex, i.uv[1]).xyz;
float3 backPos = tex2D(_BackTex, i.uv[1]).xyz;
float3 dir = backPos - frontPos;
float3 pos = frontPos;
float4 dst = 0;
float3 stepDist = dir * STEP_SIZE;
for(int k = 0; k < STEP_CNT; k++)
{
float4 src = tex3D(_VolumeTex, pos);
// clipping
float border = step(1 - _ClipDims.x, pos.x);
border *= step(pos.y, _ClipDims.y);
border *= step(pos.z, _ClipDims.z);
border *= step(0, dot(_ClipPlane, float4(pos - 0.5, 1)) + _ClipPlane.w);
// Standard blending
src.a *= saturate(_Opacity * border);
src.rgb *= src.a;
dst = (1.0f - dst.a) * src + dst;
pos += stepDist;
}
return 3.0F*dst;// + dst;
}
ENDCG
Subshader {
Fog { Mode off }
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
half4 frag(v2f i) : COLOR {
return raymarch(i, 0);
}
ENDCG
}
}
Fallback off
} // shader

Black Screen due to shaders ps_3

I'm currently using shaders in my game, it's working fine with a nVidia GeForceGT330m but with an ATI 4670 (which supports ps_4.1) I encounter a black screen.
Here is the source of the HLSL effect:
struct Explo
{
float3 position;
float4 color;
float power;
int time;
};
float2 DisplacementScroll;
texture colortexture;
int nb;
Explo explos[5];
float ambient;
float4 ambientColor;
float screenWidth;
float screenHeight;
sampler ColorMap = sampler_state
{
Texture = <colortexture>;
};
float4 CalculateLight(Explo ex, float4 base, float3 pixelPosition)
{
float3 direction = ex.position - pixelPosition;
float distance = 1 / length(ex.position - pixelPosition) * ex.power;
float amount = max(dot(base, normalize(distance)), 0);
return base * distance * amount * ex.color * ambient;
}
float4 Explosion(float2 texCoords : TEXCOORD0) : COLOR
{
//texCoords = tex2D(NormalMap, DisplacementScroll + texCoords / 3)*0.2 - 0.15;
float4 base = tex2D(ColorMap, texCoords);
float3 pixelPosition = float3(screenWidth * (texCoords.x),
screenHeight * (texCoords.y),0);
float4 finalColor = (base * ambientColor * ambient);
for (int i=0; i<nb; i++)
{
finalColor += CalculateLight(explos[i], base, pixelPosition);
}
return finalColor;
}
technique KaBoom
{
pass Pass1
{
PixelShader = compile ps_3_0 Explosion();
}
}
I remember once I had a similar problem. A shader just didn't work on ATI. The problem was that the vertex and pixel shaders were compiled to different shader models (vs_3_0 and ps_2_0). It worked for NVIDIA, but not for ATI. In your case you're only binding a pixel shader for the pass and who knows what the last vertex shader was.
Granted, this is relevant only if you're dead sure the problem is with the shader and not something else, e.g. your DIPs.
Good luck

Categories