Black Screen due to shaders ps_3 - c#

I'm currently using shaders in my game, it's working fine with a nVidia GeForceGT330m but with an ATI 4670 (which supports ps_4.1) I encounter a black screen.
Here is the source of the HLSL effect:
struct Explo
{
float3 position;
float4 color;
float power;
int time;
};
float2 DisplacementScroll;
texture colortexture;
int nb;
Explo explos[5];
float ambient;
float4 ambientColor;
float screenWidth;
float screenHeight;
sampler ColorMap = sampler_state
{
Texture = <colortexture>;
};
float4 CalculateLight(Explo ex, float4 base, float3 pixelPosition)
{
float3 direction = ex.position - pixelPosition;
float distance = 1 / length(ex.position - pixelPosition) * ex.power;
float amount = max(dot(base, normalize(distance)), 0);
return base * distance * amount * ex.color * ambient;
}
float4 Explosion(float2 texCoords : TEXCOORD0) : COLOR
{
//texCoords = tex2D(NormalMap, DisplacementScroll + texCoords / 3)*0.2 - 0.15;
float4 base = tex2D(ColorMap, texCoords);
float3 pixelPosition = float3(screenWidth * (texCoords.x),
screenHeight * (texCoords.y),0);
float4 finalColor = (base * ambientColor * ambient);
for (int i=0; i<nb; i++)
{
finalColor += CalculateLight(explos[i], base, pixelPosition);
}
return finalColor;
}
technique KaBoom
{
pass Pass1
{
PixelShader = compile ps_3_0 Explosion();
}
}

I remember once I had a similar problem. A shader just didn't work on ATI. The problem was that the vertex and pixel shaders were compiled to different shader models (vs_3_0 and ps_2_0). It worked for NVIDIA, but not for ATI. In your case you're only binding a pixel shader for the pass and who knows what the last vertex shader was.
Granted, this is relevant only if you're dead sure the problem is with the shader and not something else, e.g. your DIPs.
Good luck

Related

Edit Unity shader that takes 2 vector3

This a jelly shader from unity asset store yet I cant figure out how to make it always active as it requires 2 vector3 (_ModelOrigin and a __ImpactOrigin). Any ideas on how to edit it is always active?
Thats how I use it right now :
modelRenderer.material.SetVector("_ModelOrigin", pos.transform.position);
modelRenderer.material.SetVector("_ImpactOrigin", pos.transform.position += new Vector3(0.001f, 0.001f, 0.001f));
but when the gameobject mesh is too far it stops working
I tried also to just use transform.position but when I move the gameobject the shader get new values so it becomes laggy for a second
Thanks
// Upgrade NOTE: upgraded instancing buffer 'Props' to new syntax.
Shader "Custom/JellyShader" {
Properties{
_Color("Color", Color) = (1,1,1,1)
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Glossiness("Smoothness", Range(0,1)) = 0.5
_Metallic("Metallic", Range(0,1)) = 0.0
_Transparency("Transparency", float) = 0.1
_ControlTime("Time", float) = 0
_ModelOrigin("Model Origin", Vector) = (0,0,0,0)
_ImpactOrigin("Impact Origin", Vector) = (-5,0,0,0)
_Emission("Emission", float) = 0
[HDR] _EmissionColor("Color", Color) = (0,0,0)
_Frequency("Frequency", Range(0, 1000)) = 10
_Amplitude("Amplitude", Range(0, 5)) = 0.1
_WaveFalloff("Wave Falloff", Range(1, 8)) = 4
_MaxWaveDistortion("Max Wave Distortion", Range(0.1, 2.0)) = 1
_ImpactSpeed("Impact Speed", Range(0, 10)) = 0.5
_WaveSpeed("Wave Speed", Range(-10, 10)) = -5
}
SubShader{
Tags { "Queue" = "Transparent" "RenderType" = "Transparent" }
LOD 200
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard fullforwardshadows addshadow vertex:vert alpha:fade
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
fixed4 _EmissionColor;
float _ControlTime;
float4 _ModelOrigin;
float4 _ImpactOrigin;
float _Transparency;
half _Frequency; //Base frequency for our waves.
half _Amplitude; //Base amplitude for our waves.
half _WaveFalloff; //How quickly our distortion should fall off given distance.
half _MaxWaveDistortion; //Smaller number here will lead to larger distortion as the vertex approaches origin.
half _ImpactSpeed; //How quickly our wave origin moves across the sphere.
half _WaveSpeed; //Oscillation speed of an individual wave.
// Add instancing support for this shader. You need to check 'Enable Instancing' on materials that use the shader.
// See https://docs.unity3d.com/Manual/GPUInstancing.html for more information about instancing.
// #pragma instancing_options assumeuniformscaling
UNITY_INSTANCING_BUFFER_START(Props)
// put more per-instance properties here
UNITY_INSTANCING_BUFFER_END(Props)
void vert(inout appdata_base v) {
float4 world_space_vertex = mul(unity_ObjectToWorld, v.vertex);
float4 direction = normalize(_ModelOrigin - _ImpactOrigin);
float4 origin = _ImpactOrigin + _ControlTime * _ImpactSpeed * direction;
//Get the distance in world space from our vertex to the wave origin.
float dist = distance(world_space_vertex, origin);
//Adjust our distance to be non-linear.
dist = pow(dist, _WaveFalloff);
//Set the max amount a wave can be distorted based on distance.
dist = max(dist, _MaxWaveDistortion);
//Convert direction and _ImpactOrigin to model space for later trig magic.
float4 l_ImpactOrigin = mul(unity_WorldToObject, _ImpactOrigin);
float4 l_direction = mul(unity_WorldToObject, direction);
//Magic
float impactAxis = l_ImpactOrigin + dot((v.vertex - l_ImpactOrigin), l_direction);
v.vertex.xyz += v.normal * sin(impactAxis * _Frequency + _ControlTime * _WaveSpeed) * _Amplitude * (1 / dist);
}
void surf(Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
o.Emission = c.rgb * tex2D(_MainTex, IN.uv_MainTex).a * _EmissionColor;
o.Smoothness = _Glossiness;
}
ENDCG
}
FallBack "Diffuse"
}
Simplest solution is to just remove them as properties and calculate the model pos based on the object to world transform and then use your offset to set the impact origin.
That way you don't need to set those parameters in c#, it should "just work".
// Upgrade NOTE: upgraded instancing buffer 'Props' to new syntax.
Shader "Custom/JellyShader" {
Properties{
_Color("Color", Color) = (1,1,1,1)
_MainTex("Albedo (RGB)", 2D) = "white" {}
_Glossiness("Smoothness", Range(0,1)) = 0.5
_Metallic("Metallic", Range(0,1)) = 0.0
_Transparency("Transparency", float) = 0.1
_ControlTime("Time", float) = 0
_Emission("Emission", float) = 0
[HDR] _EmissionColor("Color", Color) = (0,0,0)
_Frequency("Frequency", Range(0, 1000)) = 10
_Amplitude("Amplitude", Range(0, 5)) = 0.1
_WaveFalloff("Wave Falloff", Range(1, 8)) = 4
_MaxWaveDistortion("Max Wave Distortion", Range(0.1, 2.0)) = 1
_ImpactSpeed("Impact Speed", Range(0, 10)) = 0.5
_WaveSpeed("Wave Speed", Range(-10, 10)) = -5
}
SubShader{
Tags { "Queue" = "Transparent" "RenderType" = "Transparent" }
LOD 200
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard fullforwardshadows addshadow vertex:vert alpha:fade
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
sampler2D _MainTex;
struct Input {
float2 uv_MainTex;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
fixed4 _EmissionColor;
float _ControlTime;
float _Transparency;
half _Frequency; //Base frequency for our waves.
half _Amplitude; //Base amplitude for our waves.
half _WaveFalloff; //How quickly our distortion should fall off given distance.
half _MaxWaveDistortion; //Smaller number here will lead to larger distortion as the vertex approaches origin.
half _ImpactSpeed; //How quickly our wave origin moves across the sphere.
half _WaveSpeed; //Oscillation speed of an individual wave.
// Add instancing support for this shader. You need to check 'Enable Instancing' on materials that use the shader.
// See https://docs.unity3d.com/Manual/GPUInstancing.html for more information about instancing.
// #pragma instancing_options assumeuniformscaling
UNITY_INSTANCING_BUFFER_START(Props)
// put more per-instance properties here
UNITY_INSTANCING_BUFFER_END(Props)
void vert(inout appdata_base v) {
float4 world_space_vertex = mul(unity_ObjectToWorld, v.vertex);
// hardcoded stuff:
float4 _ModelOrigin = mul(unity_ObjectToWorld, float4(0,0,0,1));
float4 _ImpactOffset = float4(0.001, 0.001, 0.001, 0);
float4 _ImpactOrigin = _ModelOrigin + _ImpactOffset;
float4 direction = normalize(_ModelOrigin - _ImpactOrigin);
float4 origin = _ImpactOrigin + _ControlTime * _ImpactSpeed * direction;
//Get the distance in world space from our vertex to the wave origin.
float dist = distance(world_space_vertex, origin);
//Adjust our distance to be non-linear.
dist = pow(dist, _WaveFalloff);
//Set the max amount a wave can be distorted based on distance.
dist = max(dist, _MaxWaveDistortion);
//Convert direction and _ImpactOrigin to model space for later trig magic.
float4 l_ImpactOrigin = mul(unity_WorldToObject, _ImpactOrigin);
float4 l_direction = mul(unity_WorldToObject, direction);
//Magic
float impactAxis = l_ImpactOrigin + dot((v.vertex - l_ImpactOrigin), l_direction);
v.vertex.xyz += v.normal * sin(impactAxis * _Frequency + _ControlTime * _WaveSpeed) * _Amplitude * (1 / dist);
}
void surf(Input IN, inout SurfaceOutputStandard o) {
// Albedo comes from a texture tinted by color
fixed4 c = tex2D(_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = c.rgb;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
o.Alpha = c.a;
o.Emission = c.rgb * tex2D(_MainTex, IN.uv_MainTex).a * _EmissionColor;
o.Smoothness = _Glossiness;
}
ENDCG
}
FallBack "Diffuse"
}
Note: Please let me know if this has any syntax errors; it has been a minute since I've done shader code.

Need help converting a custom shader to URP

For the last few days I've been following Sebastian Lague's videos about procedual generation. Since my project is based on URP and the custom shader responsible for rendering textures upon the mesh is just written for SRP I have to do an conversion. I've been tinkering around with shader graphs and found a possible solution for the conversion. But still, neither the tint nor the textures are getting rendered. I will add both codes: The one original and the attempted coversion.
Edit: The only thing I got from tinkering a little more is that when I manually set the "layers" value in the shadergraph, it allows me to go and change the base color of said layer. Still, it only affects the one single layer, doesn't apply any textures and only works under a certein y value in global space
This is the code for setting the variables. The same for both:
public void ApplyToMaterial(Material material)
{
material.SetInt("layerCount", layers.Length);
material.SetColorArray("baseColours", layers.Select(x => x.tint).ToArray());
material.SetFloatArray("baseStartHeights", layers.Select(x => x.startHeight).ToArray());
material.SetFloatArray("baseBlends", layers.Select(x => x.blendStrength).ToArray());
material.SetFloatArray("baseColourStrength", layers.Select(x => x.tintStrength).ToArray());
material.SetFloatArray("baseTextureScales", layers.Select(x => x.textureScale).ToArray());
Texture2DArray texturesArray = GenerateTextureArray(layers.Select(x => x.texture).ToArray());
material.SetTexture("baseTextures", texturesArray);
UpdateMeshHeights(material, savedMinHeight, savedMaxHeight);
}
public void UpdateMeshHeights(Material material, float minHeight, float maxHeight)
{
savedMaxHeight = maxHeight;
savedMinHeight = minHeight;
material.SetFloat("minHeight", minHeight);
material.SetFloat("maxHeight", maxHeight);
}
The original shader:
Shader "Custom/Terrain" {
Properties{
testTexture("Texture", 2D) = "white"{}
testScale("Scale", Float) = 1
}
SubShader{
Tags { "RenderType" = "Opaque" }
LOD 200
CGPROGRAM
// Physically based Standard lighting model, and enable shadows on all light types
#pragma surface surf Standard fullforwardshadows
// Use shader model 3.0 target, to get nicer looking lighting
#pragma target 3.0
const static int maxLayerCount = 8;
const static float epsilon = 1E-4;
int layerCount;
float3 baseColours[maxLayerCount];
float baseStartHeights[maxLayerCount];
float baseBlends[maxLayerCount];
float baseColourStrength[maxLayerCount];
float baseTextureScales[maxLayerCount];
float minHeight;
float maxHeight;
sampler2D testTexture;
float testScale;
UNITY_DECLARE_TEX2DARRAY(baseTextures);
struct Input {
float3 worldPos;
float3 worldNormal;
};
float inverseLerp(float a, float b, float value) {
return saturate((value - a) / (b - a));
}
float3 triplanar(float3 worldPos, float scale, float3 blendAxes, int textureIndex) {
float3 scaledWorldPos = worldPos / scale;
float3 xProjection = UNITY_SAMPLE_TEX2DARRAY(baseTextures, float3(scaledWorldPos.y, scaledWorldPos.z, textureIndex)) * blendAxes.x;
float3 yProjection = UNITY_SAMPLE_TEX2DARRAY(baseTextures, float3(scaledWorldPos.x, scaledWorldPos.z, textureIndex)) * blendAxes.y;
float3 zProjection = UNITY_SAMPLE_TEX2DARRAY(baseTextures, float3(scaledWorldPos.x, scaledWorldPos.y, textureIndex)) * blendAxes.z;
return xProjection + yProjection + zProjection;
}
void surf(Input IN, inout SurfaceOutputStandard o) {
float heightPercent = inverseLerp(minHeight,maxHeight, IN.worldPos.y);
float3 blendAxes = abs(IN.worldNormal);
blendAxes /= blendAxes.x + blendAxes.y + blendAxes.z;
for (int i = 0; i < layerCount; i++) {
float drawStrength = inverseLerp(-baseBlends[i] / 2 - epsilon, baseBlends[i] / 2, heightPercent - baseStartHeights[i]);
float3 baseColour = baseColours[i] * baseColourStrength[i];
float3 textureColour = triplanar(IN.worldPos, baseTextureScales[i], blendAxes, i) * (1 - baseColourStrength[i]);
o.Albedo = o.Albedo * (1 - drawStrength) + (baseColour + textureColour) * drawStrength;
}
}
ENDCG
}
FallBack "Diffuse"
}
The attempt plus the shader graph:
const static int maxLayerCount = 8;
const static float epsilon = 1E-4;
float layerCount;
float3 baseColours[maxLayerCount];
float baseStartHeights[maxLayerCount];
float baseBlends[maxLayerCount];
float baseColourStrength[maxLayerCount];
float baseTextureScales[maxLayerCount];
float3 triplanar(float3 worldPos, float scale, float3 blendAxes, Texture2DArray textures, SamplerState ss, int textureIndex) {
float3 scaledWorldPos = worldPos / scale;
float3 xProjection = SAMPLE_TEXTURE2D_ARRAY(textures, ss, float2(scaledWorldPos.y, scaledWorldPos.z), textureIndex) * blendAxes.x;
float3 yProjection = SAMPLE_TEXTURE2D_ARRAY(textures, ss, float2(scaledWorldPos.x, scaledWorldPos.z), textureIndex) * blendAxes.y;
float3 zProjection = SAMPLE_TEXTURE2D_ARRAY(textures, ss, float2(scaledWorldPos.x, scaledWorldPos.y), textureIndex) * blendAxes.z;
return xProjection + yProjection + zProjection;
}
float inverseLerp(float a, float b, float c)
{
return saturate((c - a) / (b - a));
}
void layer_terrain_float(float3 worldPos, float heightPercent, float3 worldNormal, Texture2DArray textures, SamplerState ss, int layerCount, out float3 albedo) {
float3 blendAxes = abs(worldNormal);
blendAxes /= blendAxes.x + blendAxes.y + blendAxes.z;
albedo = 0.0f;
for (int i = 0; i < layerCount; i++) {
float drawStrength = inverseLerp(-baseBlends[i] / 2 - epsilon, baseBlends[i] / 2, heightPercent - baseStartHeights[i]);
float3 baseColour = baseColours[i] * baseColourStrength[i];
float3 textureColour = triplanar(worldPos, baseTextureScales[i], blendAxes, textures, ss, i) * (1 - baseColourStrength[i]);
albedo = albedo * (1 - drawStrength) + (baseColour + textureColour) * drawStrength;
}
}
I am banging my head against a wall here for a few days straight. So help would be really appreciated

Black hole distortion shader in Unity

I found shader code that has the effect of warping a space around a certain point. It's a cool effect, but it's missing some animation, so I've added something to it:
Shader "Marek/BlackHoleDistortion"
{
Properties {
_DistortionStrength ("Distortion Strength", Range(0, 10)) = 0
_Timer("Timer", Range(0, 10)) = 0
_HoleSize ("Hole Size", Range(0, 1)) = 0.1736101
_HoleEdgeSmoothness ("Hole Edge Smoothness", Range(1, 4)) = 4
_ObjectEdgeArtifactFix ("Object Edge Artifact Fix", Range(1, 10)) = 1
}
SubShader {
Tags {
"IgnoreProjector"="True"
"Queue"="Transparent"
"RenderType"="Transparent"
}
GrabPass{ }
Pass {
Name "FORWARD"
Tags {
"LightMode"="ForwardBase"
}
ZWrite Off
CGPROGRAM
#include "UnityCG.cginc"
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fwdbase
#pragma only_renderers d3d9 d3d11 glcore gles
#pragma target 3.0
uniform sampler2D _GrabTexture;
uniform float _DistortionStrength;
uniform float _HoleSize;
uniform float _HoleEdgeSmoothness;
uniform float _ObjectEdgeArtifactFix;
uniform float _Timer;
struct VertexInput {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct VertexOutput {
float4 pos : SV_POSITION;
float4 posWorld : TEXCOORD0;
float3 normalDir : TEXCOORD1;
float4 projPos : TEXCOORD2;
};
VertexOutput vert (VertexInput v) {
VertexOutput o = (VertexOutput)0;
o.normalDir = UnityObjectToWorldNormal(v.normal);
o.posWorld = mul(unity_ObjectToWorld, v.vertex);
o.pos = UnityObjectToClipPos(v.vertex);
o.projPos = ComputeScreenPos(o.pos);
COMPUTE_EYEDEPTH(o.projPos.z);
return o;
}
float4 frag(VertexOutput i) : COLOR {
i.normalDir = normalize(i.normalDir);
float3 viewDirection = normalize(_WorldSpaceCameraPos.xyz - i.posWorld.xyz);
float3 normalDirection = i.normalDir;
float2 sceneUVs = (i.projPos.xy / i.projPos.w);
float node_9892 = (_HoleSize * -1.0 + 1.0);
float node_3969 = (1.0 - pow(1.0 - max(0, dot(normalDirection, viewDirection)), clamp(_DistortionStrength - _Timer, 0, _DistortionStrength)));
float node_9136 = (length(float2(ddx(node_3969), ddy(node_3969))) * _HoleEdgeSmoothness);
float node_4918 = pow(node_3969, 6.0);
float node_1920 = (1.0 - smoothstep((node_9892 - node_9136), (node_9892 + node_9136), node_4918));
float3 finalColor = (
lerp(
float4(node_1920, node_1920, node_1920, node_1920),
float4(1, 1, 1, 1),
pow(
pow(1.0 - max(0, dot(normalDirection, viewDirection)), 1.0),
_ObjectEdgeArtifactFix
)
) * tex2D(_GrabTexture, ((node_4918 * (sceneUVs.rg * _Time * -2.0 + 1.0)) + sceneUVs.rg)).rgb).rgb;
return fixed4(finalColor, 1);
}
ENDCG
}
}
FallBack "Diffuse"
}
Now, the problem is that in order to make the distortion disappear after certain time, I need to include some variable into the equation - here I'm calling it _Timer. I'm not using the _Time built in because of obvious reasons - it's an ever growing value and I need something that starts from 0 each time the object using this shader is made active. C# code handling passing that parameter looks as follows:
public void Update() {
_timeElapsed += Time.deltaTime;
_renderer.material.SetFloat("_Timer", _timeElapsed);
}
The question is - can I do it better? I would like this shader's code to be more of a self-contained thing - without the need to pass parameters from cs script to it.
Can I do it better?
In-short, yes and no. If you want the shader to behave differently per material you simply cannot avoid passing a property from C#. You can however avoid doing this in Update by passing a start time and computing the elapse time in the shader.
C#
void OnEnable ()
{
_renderer.material.SetFloat("_StartTime", Time.timeSinceLevelLoad);
}
Shader
uniform float _StartTime;
float4 frag(VertexOutput i) : COLOR
{
float elapse = _Time.y - _StartTime;
}
Now, although this will tie directly into the setup you are currently using, it should be noted that accessing the .material property will clone the material (which can break batching, among other things).
This can be avoided with the more recent introduction of MaterialPropertyBlocks.
Unity provides a handful of built-in values for your shaders: things like current object’s transformation matrices, time etc.
You just use them in ShaderLab like you’d use any other property, the only difference is that you don’t have to declare it somewhere - they are “built in”.
https://docs.unity3d.com/455/Documentation/Manual/SL-BuiltinValues.html
there is a clever way of giving you 4 variations of the value, potentially saving you a multiply operation by re-using the pre-multiplied value for every pixel being rendered. There are 4 values available.
_Time.x = time / 20
_Time.y = time
_Time.z = time * 2
_Time.w = time * 3
this is a simple example that show you how it works:
Shader "Example/Circle"
{
Properties
{
}
SubShader
{
Cull Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
float circle(in float2 _st, in float _radius){
float2 dist = distance(_st,float2(0.5,0.5));
float result = step(dist,_radius);
return result;
}
fixed4 frag (v2f i) : SV_Target
{
float WaveTime = sin(_Time.z);
float3 color = float3(1,1,1)*circle(i.uv,WaveTime);
return float4( color, 1.0 );
}
ENDCG
}
}
}
In comments you mentioned that you want reset time value when you enable it , so here you need to Initialize Time value with script.
so you should use your own Time In shader:
Properties
{
_Timer("Timer",Float) = 0
}
float WaveTime = sin(_Timer);
using System.Collections;
using UnityEngine;
public class Circle : MonoBehaviour {
public float _timeElapsed;
void OnEnable(){
_timeElapsed = 0;
}
public void Update() {
_timeElapsed += Time.deltaTime;
var _renderer = GetComponent<MeshRenderer>();
_renderer.material.SetFloat("_Timer", _timeElapsed);
}
}

Volume Ray Marching rendered always on top of the other objects

I have a really big problem that has been bothering me for so long and I can't seem to find the solution. I have downloaded this project https://github.com/brianasu/unity-ray-marching/tree/volumetric-textures (Unitypackage with my project here https://dl.dropboxusercontent.com/u/27758186/ApplicationVolume.unitypackage ) , that is about volume rendering.
The problem is that, as you can see, the volume renders ALWAYS in front of everything else (try placing a cube in front of the cube with the volume). I have tried a lot of things but none seems to work.
I think it might be an issue of the shaders used. In the main Camera, a RayMarching script is attached, which contains an OnRenderImage method, which creates a new Camera (although disabled), and renders the volume.
I don't know if it is a shader issue, then, or more like a camera issue (is rendering with replaced shaders). I attach my current project for testing so you don't have to download from git, and I just want the volume (head), to appear behind the rectangles (not seen) when it's actually behind, and in front when it is, pretty much like an standard geometry, but it appears always on top...
Any help or suggestion would be GREATLY appreciated, I'm kind of desperate as anything works and I'm pretty sure it is a fairly easy issue.
The code of the shader of the Ray Marching is as follows. Should it do some kind of ZTesting to not show the fragments that are covered by any other object?
Shader "Hidden/Ray Marching/Ray Marching"
{
CGINCLUDE
#include "UnityCG.cginc"
#pragma target 3.0
#pragma profileoption MaxLocalParams=1024
#pragma profileoption NumInstructionSlots=4096
#pragma profileoption NumMathInstructionSlots=4096
struct v2f {
float4 pos : POSITION;
float2 uv[2] : TEXCOORD0;
};
sampler3D _VolumeTex;
float4 _VolumeTex_TexelSize;
sampler2D _FrontTex;
sampler2D _BackTex;
float4 _LightDir;
float4 _LightPos;
float _Dimensions;
float _Opacity;
float4 _ClipDims;
float4 _ClipPlane;
v2f vert( appdata_img v )
{
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv[0] = v.texcoord.xy;
o.uv[1] = v.texcoord.xy;
#if SHADER_API_D3D9
if (_MainTex_TexelSize.y < 0)
o.uv[0].y = 1-o.uv[0].y;
#endif
return o;
}
#define TOTAL_STEPS 128.0
#define STEP_CNT 128
#define STEP_SIZE 1 / 128.0
half4 raymarch(v2f i, float offset)
{
float3 frontPos = tex2D(_FrontTex, i.uv[1]).xyz;
float3 backPos = tex2D(_BackTex, i.uv[1]).xyz;
float3 dir = backPos - frontPos;
float3 pos = frontPos;
float4 dst = 0;
float3 stepDist = dir * STEP_SIZE;
for(int k = 0; k < STEP_CNT; k++)
{
float4 src = tex3D(_VolumeTex, pos);
// clipping
float border = step(1 - _ClipDims.x, pos.x);
border *= step(pos.y, _ClipDims.y);
border *= step(pos.z, _ClipDims.z);
border *= step(0, dot(_ClipPlane, float4(pos - 0.5, 1)) + _ClipPlane.w);
// Standard blending
src.a *= saturate(_Opacity * border);
src.rgb *= src.a;
dst = (1.0f - dst.a) * src + dst;
pos += stepDist;
}
return 3.0F*dst;// + dst;
}
ENDCG
Subshader {
Fog { Mode off }
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
half4 frag(v2f i) : COLOR {
return raymarch(i, 0);
}
ENDCG
}
}
Fallback off
} // shader

HLSL/XNA Ambient light texture mixed up with multi pass lighting

I've been having some troubles lately with lighting. I have found a source on google which is working pretty good on the example. However, when I try to implement it to my current project, I am getting some very weird bugs. The main one is that my textures are "mixed up" when I only activate the ambient light, which means that a model gets the texture of another one .
I am using the same effect for every meshes of my models. I guess this could be the problem, but I don't really know how to "reset" an effect for a new model. Is it possible?
Here is my shader:
float4x4 WVP;
float4x4 WVP;
float3x3 World;
float3 Ke;
float3 Ka;
float3 Kd;
float3 Ks;
float specularPower;
float3 globalAmbient;
float3 lightColor;
float3 eyePosition;
float3 lightDirection;
float3 lightPosition;
float spotPower;
texture2D Texture;
sampler2D texSampler = sampler_state
{
Texture = <Texture>;
MinFilter = anisotropic;
MagFilter = anisotropic;
MipFilter = linear;
MaxAnisotropy = 16;
};
struct VertexShaderInput
{
float4 Position : POSITION0;
float2 Texture : TEXCOORD0;
float3 Normal : NORMAL0;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 Texture : TEXCOORD0;
float3 PositionO: TEXCOORD1;
float3 Normal : NORMAL0;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
output.Position = mul(input.Position, WVP);
output.Normal = input.Normal;
output.PositionO = input.Position.xyz;
output.Texture = input.Texture;
return output;
}
float4 PSAmbient(VertexShaderOutput input) : COLOR0
{
return float4(Ka*globalAmbient + Ke,1) * tex2D(texSampler,input.Texture);
}
float4 PSDirectionalLight(VertexShaderOutput input) : COLOR0
{
//Difuze
float3 L = normalize(-lightDirection);
float diffuseLight = max(dot(input.Normal,L), 0);
float3 diffuse = Kd*lightColor*diffuseLight;
//Specular
float3 V = normalize(eyePosition - input.PositionO);
float3 H = normalize(L + V);
float specularLight = pow(max(dot(input.Normal,H),0),specularPower);
if(diffuseLight<=0) specularLight=0;
float3 specular = Ks * lightColor * specularLight;
//sum all light components
float3 light = diffuse + specular;
return float4(light,1) * tex2D(texSampler,input.Texture);
}
technique MultiPassLight
{
pass Ambient
{
VertexShader = compile vs_3_0 VertexShaderFunction();
PixelShader = compile ps_3_0 PSAmbient();
}
pass Directional
{
PixelShader = compile ps_3_0 PSDirectionalLight();
}
}
And here is how I actually apply my effects:
public void ApplyLights(ModelMesh mesh, Matrix world,
Texture2D modelTexture, Camera camera, Effect effect,
GraphicsDevice graphicsDevice)
{
graphicsDevice.BlendState = BlendState.Opaque;
effect.CurrentTechnique.Passes["Ambient"].Apply();
foreach (ModelMeshPart part in mesh.MeshParts)
{
graphicsDevice.SetVertexBuffer(part.VertexBuffer);
graphicsDevice.Indices = part.IndexBuffer;
// Texturing
graphicsDevice.BlendState = BlendState.AlphaBlend;
if (modelTexture != null)
{
effect.Parameters["Texture"].SetValue(
modelTexture
);
}
graphicsDevice.DrawIndexedPrimitives(
PrimitiveType.TriangleList,
part.VertexOffset,
0,
part.NumVertices,
part.StartIndex,
part.PrimitiveCount
);
// Applying our shader to all the mesh parts
effect.Parameters["WVP"].SetValue(
world *
camera.View *
camera.Projection
);
effect.Parameters["World"].SetValue(world);
effect.Parameters["eyePosition"].SetValue(
camera.Position
);
graphicsDevice.BlendState = BlendState.Additive;
// Drawing lights
foreach (DirectionalLight light in DirectionalLights)
{
effect.Parameters["lightColor"].SetValue(light.Color.ToVector3());
effect.Parameters["lightDirection"].SetValue(light.Direction);
// Applying changes and drawing them
effect.CurrentTechnique.Passes["Directional"].Apply();
graphicsDevice.DrawIndexedPrimitives(
PrimitiveType.TriangleList,
part.VertexOffset,
0,
part.NumVertices,
part.StartIndex,
part.PrimitiveCount
);
}
}
I am also applying this when loading the effect:
effect.Parameters["lightColor"].SetValue(Color.White.ToVector3());
effect.Parameters["globalAmbient"].SetValue(Color.White.ToVector3());
effect.Parameters["Ke"].SetValue(0.0f);
effect.Parameters["Ka"].SetValue(0.01f);
effect.Parameters["Kd"].SetValue(1.0f);
effect.Parameters["Ks"].SetValue(0.3f);
effect.Parameters["specularPower"].SetValue(100);
Thank you very much
UPDATE:
I tried to load an effect for each model when drawing, but it doesn't seem to have changed anything. I suppose it is because XNA detects that the effect has already been loaded before and doesn't want to load a new one. Any idea why?
Alright so the problem was pretty stupid actually. I was applying the same effect to every meshes. I simply added a Effect.Clone() when loading the model and it worked!

Categories