hlsl outputs blurry image - c#

I used a code I found in the internet for effect and it worked perfect but one thing was wrong it blurried my output screen a bit.
What it should be: http://i.imgur.com/5DwEMDR.png
What it is currenty: http://i.imgur.com/tL0HYIM.png
I tried to understand why it's doing it but I couldn't solve it by myself.
First effect class:
const float3 black = float3(0.0f, 0.0f, 0.0f);
float2 lightSource;
float3 lightColor;
float lightRadius;
struct VertexShaderInput
{
float3 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 WorldPos : TEXCOORD0;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
output.Position = float4(input.Position, 1);
output.WorldPos = input.TexCoord;
return output;
}
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float2 position = input.WorldPos - lightSource;
float dist = sqrt(dot(position, position));
float3 mix = lerp(lightColor, black, saturate(dist / lightRadius));
return float4(mix, 1.0f);
}
technique Technique1
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
}
}
Second effect class:
texture colorMap;
texture lightMap;
sampler colorSampler = sampler_state
{
Texture = (colorMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = LINEAR;
MinFilter = LINEAR;
Mipfilter = LINEAR;
};
sampler lightSampler = sampler_state
{
Texture = (lightMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = LINEAR;
MinFilter = LINEAR;
Mipfilter = LINEAR;
};
struct VertexShaderInput
{
float3 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
output.Position = float4(input.Position, 1);
output.TexCoord = input.TexCoord;
return output;
}
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float3 diffuseColor = tex2D(colorSampler, input.TexCoord).rgb;
float3 light = tex2D(lightSampler, input.TexCoord).rgb;
return float4((diffuseColor * light), 1);
}
technique Technique1
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
}
}
Edit:
I think the problem is there but I can seem to solve it:
sampler colorSampler = sampler_state
{
Texture = (colorMap);
AddressU = CLAMP;
AddressV = CLAMP;
MagFilter = LINEAR;
MinFilter = LINEAR;
Mipfilter = LINEAR;
};

Related

How to properly blend two RenderTextures?

I'm trying to create small paint app that uses RenderTexture for painting. My goal is to paint "Brush" texture with the color and alpha on the "Draw" RenderTexture (whole texture has the color (1, 1, 1, 0). I have written a shader for that, but it blends brush quads with black borders:
example
I'm using next shader for painting:
Shader "MyPaint/BrushShader"
{
Properties
{
_BrushTex ("Brush", 2D) = "white" {}
_DrawTex ("Draw Tex", 2D) = "white" {}
_Color ("Main Color", Color) = (1, 1, 1, 1)
}
SubShader
{
Tags {
"Queue"="Transparent"
"IgnoreProjector"="True"
"RenderType"="Transparent"
}
Cull Off
Lighting Off
ZWrite Off
ZTest Always
Pass
{
CGPROGRAM
#include "UnityCG.cginc"
#pragma vertex vert
#pragma fragment frag
sampler2D _BrushTex;
half4 _BrushTex_ST;
sampler2D _DrawTex;
half4 _DrawTex_ST;
float4 _Color;
struct appdata
{
float4 vertex : POSITION;
float4 color : COLOR;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float4 color : COLOR;
float2 uv : TEXCOORD0;
float4 screenPos : TEXCOORD1;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.screenPos = o.vertex;
o.uv = v.uv;
o.color = v.color;
return o;
}
float4 frag (v2f i) : SV_Target
{
float4 brush = tex2D(_BrushTex, i.uv) * _Color;
float2 grabTexcoord = i.screenPos.xy / i.screenPos.w;
grabTexcoord.x = (grabTexcoord.x + 1.0) * 0.5;
grabTexcoord.y = (grabTexcoord.y + 1.0) * 0.5;
#if UNITY_UV_STARTS_AT_TOP
grabTexcoord.y = 1.0 - grabTexcoord.y;
#endif
float4 draw = tex2D(_DrawTex, grabTexcoord);
float4 color = draw * (1.0f - brush.a) + brush * brush.a;
return color;
}
ENDCG
}
}
}
I create a Mesh and render it into "Draw" RenderTexture using CommandBuffer with
RenderTargetIdentifier and Material.
I don't understand how to avoid that black borders of the brush.
I'm using ARGB32 RenderTextures without depth and mip-maps, brush texture:
brush
My goal is to paint like in Photoshop, with constant color, that even works with alpha:
photoshop
If anyone can give me an advice on how to properly blend a RenderTexture I would greatly appreciate it!

Add Alpha to Shader

I have the following shader,
I'm trying to add an alpha to the white, however all attempts have proven to be difficult.
This is the project from which I obtained the shader for reference - github
I think it has to do with one of the passes overwriting.
Shader "Suibokuga/Suibokuga" {
Properties {
_MainTex ("Water Texture", 2D) = "white" {}
_Alpha ("Transfer/Diffusion coefficient for water particles", Range(0.01, 1.5)) = 1.0
_Evaporation ("a unit quantity of water for evaporation", Range(0.0001, 0.005)) = 0.00015
_PaperTex ("Paper Texture", 2D) = "white" {}
_Brush ("brush", Vector) = (-1, -1, -1, -1)
_Prev ("previous brush position", Vector) = (-1, -1, -1, -1)
}
CGINCLUDE
#include "UnityCG.cginc"
#pragma target 3.0
/*
* r : water particles
* b : capacities of water
* a : the heights of the bottoms
*/
sampler2D _MainTex;
float4 _MainTex_TexelSize;
float _Alpha;
float _Evaporation;
sampler2D _PaperTex;
float2 _Prev;
float3 _Brush; // x,y : position, z : size
struct appdata {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
v2f vert (appdata IN) {
v2f OUT;
OUT.vertex = mul(UNITY_MATRIX_MVP, IN.vertex);
OUT.uv = IN.uv;
return OUT;
}
void sample (float2 uv, out float4 o, out float4 l, out float4 t, out float4 r, out float4 b) {
float2 texel = _MainTex_TexelSize.xy;
o = tex2D(_MainTex, uv);
l = tex2D(_MainTex, uv + float2(-texel.x, 0));
t = tex2D(_MainTex, uv + float2( 0, -texel.y));
r = tex2D(_MainTex, uv + float2( texel.x, 0));
b = tex2D(_MainTex, uv + float2( 0, texel.y));
}
float waterDelta (float4 k, float4 o) {
float ld = (k.w + k.x) - (o.w + o.x); // level difference
float transfer = (k.w + k.x) - max(o.w, k.w + k.z); // transferable water particles
return max(
0.0,
0.25 * _Alpha * min(ld, transfer)
);
}
float waterFlow (float2 uv) {
float4 o, l, t, r, b;
sample(uv, o, l, t, r, b);
float nw = o.r;
nw += (waterDelta(l, o) - waterDelta(o, l));
nw += (waterDelta(t, o) - waterDelta(o, t));
nw += (waterDelta(r, o) - waterDelta(o, r));
nw += (waterDelta(b, o) - waterDelta(o, b));
return max(nw, 0);
}
float evaporation (float wo) {
return max(wo - _Evaporation, 0.0);
}
float brush (float2 uv) {
const int count = 10;
float2 dir = _Brush.xy - _Prev.xy;
float l = length(dir);
if(l <= 0) {
float d = length(uv - _Brush.xy);
return smoothstep(0.0, _Brush.z, _Brush.z - d);
}
float ld = l / count;
float2 norm = normalize(dir);
float md = 100;
for(int i = 0; i < count; i++) {
float2 p = _Prev.xy + norm * ld * i;
float d = length(uv - p);
if(d < md) {
md = d;
}
}
return smoothstep(0.0, _Brush.z, _Brush.z - md);
// float d = length(uv - _Brush.xy);
// return smoothstep(0.0, _Brush.z, _Brush.z - d);
}
ENDCG
SubShader {
Cull Off ZWrite Off ZTest Always
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment init
float4 init (v2f IN) : SV_Target {
float4 paper = tex2D(_PaperTex, IN.uv);
return float4(
0,
0,
paper.r,
paper.r
);
}
ENDCG
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment waterUpdate
float4 waterUpdate (v2f IN) : SV_Target {
float4 col = tex2D(_MainTex, IN.uv);
col.x = evaporation(waterFlow(IN.uv));
float dw = brush(IN.uv);
// if(dw > 0) {
col.x = min(col.x + brush(IN.uv), 1.0);
// }
return col;
}
ENDCG
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment visualize
float4 visualize (v2f IN) : SV_Target {
float4 col = tex2D(_MainTex, IN.uv);
return float4(1.0 - col.xxx, 1.0);
}
ENDCG
}
}
}
You seem to miss a blending step.
Try adding this line to the end of the last pass
Blend SrcAlpha OneMinusSrcAlpha
Check this page to learn about alpha blending for Unity shaders.
The line I attached is for standard transparency, you might want to try different blending options.

XNA 4.0 shader depth information

I am using a RenderTarget2D to draw my 3D World and I use a shader to add light effects etc. later.
How can I get the depth information inside the pixelshader?
I am new at shader programming and I have no idear of the shader given commands.
my shader:
float4x4 World;
float4x4 View;
float4x4 Projection;
texture output;
texture zBuffer;
float2 screenSize;
bool invert;
texture ModelTexture;
sampler2D textureSampler = sampler_state {
Texture = (ModelTexture);
MagFilter = Linear;
MinFilter = Linear;
AddressU = Clamp;
AddressV = Clamp;
};
struct VertexShaderInput
{
float4 Position : POSITION0;
float2 TextureCoordinate : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 TextureCoordinate : TEXCOORD0;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
float4 worldPosition = mul(input.Position, World);
float4 viewPosition = mul(worldPosition, View);
output.Position = mul(viewPosition, Projection);
output.TextureCoordinate = input.TextureCoordinate;
return output;
}
float4 PixelShaderFunction(VertexShaderOutput input, float2 vPos : VPOS) : COLOR0
{
int pixelCoordsY = vPos.y * screenSize.y; //get the y coordinate of the pixel
int pixelCoordsX = vPos.x * screenSize.x; //get the x coordinate of the pixel
float4 textureColor = tex2D(textureSampler, input.TextureCoordinate);
if (invert)
{
textureColor.r = 1 - textureColor.r;
textureColor.g = 1 - textureColor.g;
textureColor.b = 1 - textureColor.b;
}
return textureColor;
}
technique Technique1
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
}
}
Does I have to set parameters at the RenderTarget?
Thanks a lot!

Normal mapping giving wierd results. (C# XNA)

Okay, so this problem is kinda huge, and for the same reason i would rather post as little code as possible, but rather have you come with ideas as to what it could be. I will post the codere where i feel the problem could be. If you would like to see more just ask and i will provide it.
So, i just "stole" a shader for my game. By stole i mean found a tutorial that i had done before, and just copied the end result. Thus i know the shader should work, because i've used it before.
I have a custom mesh class, and also a custom vertex struct. I have never done a xertex struct before so initially i thought this is where the problem was.
But i have some counterarguments i later found:
All of their variables seems to be right, and everything works except the bump-mapping.
Changing the Tangent and/or binormal seems to have no effect on the shading what-so-ever. So i think the mistake is not in how they are calulated, but rather how they are used.
http://imageshack.us/photo/my-images/838/w6kv.png/
This is the output i get. Keep in mind that this is a voxel engine. As you can see all the boxes has the same wierd normal-map shadow. However this is the normal map:
http://imageshack.us/photo/my-images/268/r7jt.jpg/
As you can see, they don't fit whatsoever. Now, this could be one of three things as i see it:
It could be the way i set up the shader in xna.
It could also be something in the vertex struct
It could also be the way i call the actual drawing function.
So here's the code for those three things (And the shader as well):
Shader Setup:
((Here i set up the data for the shader, and the draw the mesh))
// Bind the parameters with the shader.
BBS.Parameters["World"].SetValue(Matrix.Identity);
BBS.Parameters["View"].SetValue(camera.viewMatrix);
BBS.Parameters["Projection"].SetValue(camera.projectionMatrix);
BBS.Parameters["AmbientColor"].SetValue(Color.White.ToVector4());
BBS.Parameters["AmbientIntensity"].SetValue(0.5f);
Vector3 LD = new Vector3(0, 1, -1);
LD.Normalize();
BBS.Parameters["DiffuseColor"].SetValue(Color.White.ToVector4());
BBS.Parameters["DiffuseIntensity"].SetValue(0);
BBS.Parameters["LightDirection"].SetValue(LD);
BBS.Parameters["EyePosition"].SetValue(new Vector3(0.0f, 2.0f, 5.0f));
BBS.Parameters["SpecularColor"].SetValue(Color.White.ToVector4());
BBS.Parameters["ColorMap"].SetValue(cubeTexture);
BBS.Parameters["NormalMap"].SetValue(Content.Load<Texture2D>("images"));
BBS.CurrentTechnique = BBS.Techniques["Technique1"];
for (int i = 0; i < BBS.CurrentTechnique.Passes.Count; i++)
{
//EffectPass.Apply will update the device to
//begin using the state information defined in the current pass
BBS.CurrentTechnique.Passes[i].Apply();
//theMesh contains all of the information required to draw
//the current mesh
graphics.DrawUserPrimitives(PrimitiveType.TriangleList, Mesh.Vertices, 0, Mesh.NUM_TRIANGLES);
}
Vertex struct:
public struct VertexPositionNormalTangentBinormalTexture : IVertexType
{
public Vector3 Position;
public Vector3 Normal;
public Vector2 TextureCoordinate;
public Vector3 Tangent;
public Vector3 Binormal;
public static readonly VertexDeclaration VertexElements = new VertexDeclaration
(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(12, VertexElementFormat.Vector3, VertexElementUsage.Normal, 0),
new VertexElement(24, VertexElementFormat.Vector2, VertexElementUsage.TextureCoordinate, 0),
new VertexElement(32, VertexElementFormat.Vector3, VertexElementUsage.Tangent, 0),
new VertexElement(44, VertexElementFormat.Vector3, VertexElementUsage.Binormal, 0)
);
VertexDeclaration IVertexType.VertexDeclaration { get { return VertexElements; } }
public static readonly int SizeInBytes = sizeof(float) * (3 + 3 + 2 + 3 + 3);
}
Shader:
// XNA 4.0 Shader Programming #4 - Normal Mapping
// Matrix
float4x4 World;
float4x4 View;
float4x4 Projection;
// Light related
float4 AmbientColor;
float AmbientIntensity;
float3 LightDirection;
float4 DiffuseColor;
float DiffuseIntensity;
float4 SpecularColor;
float3 EyePosition;
texture2D ColorMap;
sampler2D ColorMapSampler = sampler_state
{
Texture = <ColorMap>;
MinFilter = linear;
MagFilter = linear;
MipFilter = linear;
};
texture2D NormalMap;
sampler2D NormalMapSampler = sampler_state
{
Texture = <NormalMap>;
MinFilter = linear;
MagFilter = linear;
MipFilter = linear;
};
// The input for the VertexShader
struct VertexShaderInput
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
float3 Normal : NORMAL0;
float3 Binormal : BINORMAL0;
float3 Tangent : TANGENT0;
};
// The output from the vertex shader, used for later processing
struct VertexShaderOutput
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
float3 View : TEXCOORD1;
float3x3 WorldToTangentSpace : TEXCOORD2;
};
// The VertexShader.
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
float4 worldPosition = mul(input.Position, World);
float4 viewPosition = mul(worldPosition, View);
output.Position = mul(viewPosition, Projection);
output.TexCoord = input.TexCoord;
output.WorldToTangentSpace[0] = mul(normalize(input.Tangent), World);
output.WorldToTangentSpace[1] = mul(normalize(input.Binormal), World);
output.WorldToTangentSpace[2] = mul(normalize(input.Normal), World);
output.View = normalize(float4(EyePosition,1.0) - worldPosition);
return output;
}
// The Pixel Shader
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float4 color = tex2D(ColorMapSampler, input.TexCoord);
float3 normalMap = 2.0 *(tex2D(NormalMapSampler, input.TexCoord)) - 1.0;
normalMap = normalize(mul(normalMap, input.WorldToTangentSpace));
float4 normal = float4(normalMap,1.0);
float4 diffuse = saturate(dot(-LightDirection,normal));
float4 reflect = normalize(2*diffuse*normal-float4(LightDirection,1.0));
float4 specular = pow(saturate(dot(reflect,input.View)),32);
return color * AmbientColor * AmbientIntensity +
color * DiffuseIntensity * DiffuseColor * diffuse +
color * SpecularColor * specular;
}
// Our Techinique
technique Technique1
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
}
}
This Is Not Done In The Correct Order:
output.WorldToTangentSpace[0] = mul(normalize(input.Tangent), World);
output.WorldToTangentSpace[1] = mul(normalize(input.Binormal), World);
output.WorldToTangentSpace[2] = mul(normalize(input.Normal), World);
It Should Be Like This:
output.WorldToTangentSpace[0] = normalize(mul(input.Tangent, World));
output.WorldToTangentSpace[1] = normalize(mul(input.Binormal, World));
output.WorldToTangentSpace[2] = normalize(mul(input.Normal, World));
Otherwise, Your Normals Will Get Scaled From The World-space Transformation And Will Result In Very Bright And Very Dark Patches (Which Looks Like Your Problem). BTW, Seeing As You're Interested In Normal Mapping For Voxel Engines, Check Out The Following That I Had Made:
http://www.youtube.com/watch?v=roMlOmNgr_w
http://www.youtube.com/watch?v=qkfHoGzQ8ZY
Hope You Get Inspired And That You Complete Your Project.

Direct3D9 / HLSL Depth semantic not working

i have o problem with my shader code (HLSL). I use "DirectX for Managed Code" and Shader Model 3.0. I try to write a custom depth value into the depth buffer by using the DEPTH semantic in the pixel shader output struct:
struct PSOutput
{
float4 col : COLOR0;
float dept : DEPTH;
};
and i use this struct as return value in my pixel shader:
PSOutput PSFunction(VertexShaderOutput input)
{
PSOutput output;
...
output.col = float4(...);
output.dept = ...;
return output;
}
DirectX throws an exeption when i try to compile this shader, but gives no detailed information why. But when i remove the depth variable from the output struct it works! I also tried to write DEPTH0 as semantic, but no success. I hope anyone can help my with that.
EDIT:
If i write the following, it fails:
PSOutput PSFunction(VertexShaderOutput input)
{
PSOutput output;
float resDepth = input.Position[2] / input.Position[3];
if(...)
{
resDepth = ...;
}
output.col = float4(...);
output.dept = resDepth;
return output;
}
but if i write this code, it compiles:
PSOutput PSFunction(VertexShaderOutput input)
{
PSOutput output;
float resDepth = input.Position[2] / input.Position[3];
if(...)
{
resDepth = ...;
}
output.col = float4(...);
output.dept = 0.5;
return output;
}
any ideas?
Here's the full code:
float4x4 World;
float4x4 View;
float4x4 Projection;
float4 CamPos;
float4 LightDir;
float4 ObjColor;
static const float PI = 3.14159265f;
static const int MAX_FU = 32;
float fuPercent[MAX_FU];
float4 fuColor[MAX_FU];
int buildDir;
static int fuCount = 2;
float4 boxMin;
float4 boxMax;
struct VertexShaderInput
{
float4 Position : POSITION0;
float3 Normal : NORMAL;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float3 Normal : NORMAL;
float3 ExactPos : TEXCOORD1;
};
struct PSOutput
{
float4 col : COLOR0;
//float dept : DEPTH;
};
VertexShaderOutput VSFunction(VertexShaderInput input)
{
VertexShaderOutput output;
float4 worldPosition = mul(input.Position, World);
float4 viewPosition = mul(worldPosition, View);
output.Position = mul(viewPosition, Projection);
output.Normal = mul(input.Normal, World);
output.ExactPos = input.Position;
return output;
}
PSOutput PSFunction(VertexShaderOutput input)
{
PSOutput output;
float4 resColor = ObjColor;
float resDepth = input.Position[2] / input.Position[3];
float prpos = 0;
if (buildDir == 0)
{
prpos = (input.ExactPos[1] - boxMin[1]) / (boxMax[1] - boxMin[1]);
}
else if (buildDir == 1)
{
prpos = 1.0 - ((input.ExactPos[1] - boxMin[1]) / (boxMax[1] - boxMin[1]));
}
else if (buildDir == 2)
{
prpos = (input.ExactPos[2] - boxMin[2]) / (boxMax[2] - boxMin[2]);
}
else if (buildDir == 3)
{
prpos = 1.0 - ((input.ExactPos[2] - boxMin[2]) / (boxMax[1] - boxMin[2]));
}
else if (buildDir == 4)
{
prpos = (input.ExactPos[0] - boxMin[0]) / (boxMax[0] - boxMin[0]);
}
else if (buildDir == 5)
{
prpos = 1.0 - ((input.ExactPos[0] - boxMin[0]) / (boxMax[0] - boxMin[0]));
}
float currPerc = 1.1;
for (int i = 0; i < fuCount; i++)
{
if (prpos - 0.0001 <= fuPercent[i])
{
if (fuPercent[i] < currPerc)
{
currPerc = fuPercent[i];
resColor = fuColor[i];
}
}
else
{
resDepth = 1.0;
resColor[3] = 0.0;
}
}
float3 nor = input.Normal;
float3 pos = input.ExactPos;
float glo = 0.5;
float id = (acos(dot(LightDir,nor) / pow(dot(LightDir,LightDir) * dot(nor, nor), 0.5)) / PI );
id = pow(id,2);
float3 look = reflect(normalize(pos - CamPos), nor);
float gl = (acos(dot(LightDir,look) / pow(dot(LightDir,LightDir) * dot(look, look), 0.5)) / PI );
gl = max(gl * 10.0 - 9.0, 0.0);
gl = pow(gl,2) * glo;
output.col = float4(resColor[0] * id + gl, resColor[1] * id + gl, resColor[2] * id + gl, resColor[3]);
//output.dept = resDepth;
return output;
}
technique MyTechnique
{
pass Pass1
{
VertexShader = compile vs_3_0 VSFunction();
PixelShader = compile ps_3_0 PSFunction();
}
}
If FXC is throwing an exception during compilation rather than giving you a compilation error it's probably not anything you've done wrong.
If you're using the DirectX SDK make sure you're using the most recent version (June 2010). If you're using the Windows Kit 8.0 SDK then you may have found a compiler bug. What version of the SDK / fxc are you using?
Can you post a shader that actually compiles (one with the missing VertexShaderOutput struct and without ...'s in place of actual code)? I've filled in the missing code and have no problem compiling it using fxc from Windows Kit 8.0.
EDIT:
Nope, I hadn't spotted you'd commented out the code that made it not compile.
Sure enough, it doesn't compile, but that's because it's not valid code (as reported by the compile errors). You're using the POSITION semantic as an input to your pixel shader, which is not valid. If you want to use the outputted position from a vertex shader as input to a pixel shader, copy it into a second attribute and use that instead. If I substitute the following code into your shader it then compiles:
struct VertexShaderOutput
{
float4 ClipPosition : POSITION; // Renamed this to ClipPosition.
float4 Position : TEXCOORD0; // This is valid to use as an input to the pixel shader.
float3 Normal : NORMAL;
float3 ExactPos : TEXCOORD1;
};
struct PSOutput
{
float4 col : COLOR0;
float dept : DEPTH;
};
VertexShaderOutput VSFunction(VertexShaderInput input)
{
VertexShaderOutput output;
float4 worldPosition = mul(input.Position, World);
float4 viewPosition = mul(worldPosition, View);
output.ClipPosition = mul(viewPosition, Projection);
output.Position = output.ClipPosition; // Copy output position to our other attribute.
output.Normal = mul(input.Normal, World);
output.ExactPos = input.Position;
return output;
}

Categories