Im using a gaussian blur asset that uses this script attached to the camera to make its shader blur what is behind the UI:
public class BlurRenderer : MonoBehaviour
{
[Range(0, 25)]
/// <summary>
/// how many Iterations should the blur be applied for
/// </summary>
public int Iterations;
[Range(0, 5)]
/// <summary>
/// Lowers the resolution of the texture, thus allowing for a larger blur without so many iterations
/// </summary>
public int DownRes;
/// <summary>
/// Weather to update the blur
/// </summary>
public bool UpdateBlur = true;
/// <summary>
/// amount of time that will pass before re-rendering the blur
/// </summary>
public float UpdateRate = 0.02f;
/// <summary>
/// last time we rendered the blur
/// </summary>
private float lastUpdate = 0.0f;
/// <summary>
/// Stores the blur texture between renders
/// </summary>
public RenderTexture BlurTexture;
/// <summary>
/// The material where we'll pass the screen texture though to create the blur
/// </summary>
private Material mat;
private void Start()
{
//set up the blur texture
BlurTexture = new RenderTexture(256, 256, 16, RenderTextureFormat.ARGB32);
/*
//we can swap the code above for a texture that will take up less ram...if needed.
BlurTexture = new RenderTexture(128, 128, 4, RenderTextureFormat.ARGB4444);
*/
//create the texture
BlurTexture.Create();
//create our material if we have not already
if (mat == null)
{
mat = new Material(Shader.Find("Hidden/GaussianBlur_Mobile"));
}
}
#region BlurScaleSlider
//this section will enable a slider that controls both the Iterations and DownRes
//Uncomment the code below to use it
/*
[Range(0, 100)]
/// <summary>
/// The blur scale.
/// </summary>
public int blurScale;
private void Update()
{
int tempBlurScale = blurScale;
Iterations = 0;
DownRes = 0;
while (tempBlurScale > 20 && DownRes < 5)
{
DownRes += 1;
tempBlurScale -= 15;
}
Iterations = tempBlurScale;
tempBlurScale = 0;
}
*/
#endregion
//OnRenderImage will execute before each frame renders
void OnRenderImage(RenderTexture src, RenderTexture dst)
{
// if we need to re-render
if (Time.time - lastUpdate >= UpdateRate
&& UpdateBlur)
{
//set the width and height
int width = src.width >> DownRes;
int height = src.height >> DownRes;
//create a temp texture
RenderTexture rt = RenderTexture.GetTemporary(width, height);
//move the screen image to our temp texture
Graphics.Blit(src, rt);
//loop to add the blur to our temp texture
for (int i = 0; i < Iterations; i++)
{
RenderTexture rt2 = RenderTexture.GetTemporary(width, height);
Graphics.Blit(rt, rt2, mat);
RenderTexture.ReleaseTemporary(rt);
rt = rt2;
}
//store our texture in BlurTexture
Graphics.Blit(rt, BlurTexture);
//set the global texture for our shader to use
Shader.SetGlobalTexture("_MobileBlur", rt);
//remove the temp texture
RenderTexture.ReleaseTemporary(rt);
//set lastUpdate
lastUpdate = Time.time;
}
else
{
//set the global texture again...must be done for each frame
Shader.SetGlobalTexture("_MobileBlur", BlurTexture);
}
//make sure the camera renders as normal.
Graphics.Blit(src, dst);
}
/// <summary>
/// Creates a BlurRenderer_Mobile on the main camera
/// </summary>
static public BlurRenderer Create()
{
BlurRenderer BRM = Camera.main.gameObject.GetComponent<BlurRenderer>();
if (BRM == null)
{
BRM = Camera.main.gameObject.AddComponent<BlurRenderer>();
}
return BRM;
}
//override to allow different camera
static public BlurRenderer Create(Camera ThisCamera)
{
BlurRenderer BRM = ThisCamera.gameObject.GetComponent<BlurRenderer>();
if (BRM == null)
{
BRM = ThisCamera.gameObject.AddComponent<BlurRenderer>();
}
return BRM;
}
}
}
It creates a render texture of what the camera renders in world space. This works with most game objects, but with my skybox background or fade Standard shader (with a transparent image in the slot), nothing renders.
With game object:
With just transparent image:
How can I fix this/what is causing this?
The shader:
Properties
{
[PerRendererData] _MainTex ("_MainTex", 2D) = "white" {}
_Lightness ("_Lightness", Range(0,2)) = 1
_Saturation ("_Saturation", Range(-10,10)) = 1
_TintColor ("_TintColor",Color) = (1.0,1.0,1.0,0.0)
// required for UI.Mask
[HideInInspector] _StencilComp ("Stencil Comparison", Float) = 8
[HideInInspector] _Stencil ("Stencil ID", Float) = 0
[HideInInspector] _StencilOp ("Stencil Operation", Float) = 0
[HideInInspector] _StencilWriteMask ("Stencil Write Mask", Float) = 255
[HideInInspector] _StencilReadMask ("Stencil Read Mask", Float) = 255
[HideInInspector] _ColorMask ("Color Mask", Float) = 15
}
SubShader
{
Tags
{
"Queue" = "Transparent"
"PreviewType" = "Plane"
"DisableBatching" = "True"
}
// required for UI.Mask
Stencil
{
Ref [_Stencil]
Comp [_StencilComp]
Pass [_StencilOp]
ReadMask [_StencilReadMask]
WriteMask [_StencilWriteMask]
}
ColorMask [_ColorMask]
Pass
{
ZWrite Off
Cull Off
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
half4 color : COLOR;
half4 screenpos : TEXCOORD2;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float2 screenuv : TEXCOORD1;
half4 color : COLOR;
float2 screenpos : TEXCOORD2;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.screenuv = ((o.vertex.xy / o.vertex.w) + 1) * 0.5;
o.color = v.color;
o.screenpos = ComputeScreenPos(o.vertex);
return o;
}
float2 safemul(float4x4 M, float4 v)
{
float2 r;
r.x = dot(M._m00_m01_m02, v);
r.y = dot(M._m10_m11_m12, v);
return r;
}
sampler2D _MainTex;
float4 _MainTex_TexelSize;
// x contains 1.0/width
// y contains 1.0/height
// z contains width
// w contains height
uniform float _Lightness;
uniform float _Saturation;
uniform fixed4 _TintColor;
uniform sampler2D _MobileBlur;
float4 frag(v2f i) : SV_Target
{
float4 m = tex2D(_MainTex, i.uv);
float2 uvWH = float2(_MainTex_TexelSize.z / _ScreenParams.x,_MainTex_TexelSize.w / _ScreenParams.y);
uvWH.x *= _MainTex_TexelSize.x;
uvWH.y *= _MainTex_TexelSize.y;
float2 buv = float2(i.screenpos.x - (uvWH.x / 2),i.screenpos.y - (uvWH.y / 2));
float4 blurColor = float4(0,0,0,0);
blurColor = tex2D(_MobileBlur,buv);
//blurColor = tex2D(_MobileBlur,buv);
blurColor.a *= m.a;
float4 finalColor = blurColor * i.color;
finalColor.a = i.color.a * m.a * blurColor.a;
finalColor.rgb *= _Lightness;
finalColor.rgb *= _TintColor;
float3 intensity = dot(finalColor.rgb, float3(0.299,0.587,0.114));
finalColor.rgb = lerp(intensity, finalColor.rgb , _Saturation);
return finalColor;
}
ENDCG
}
Related
I have a shader for voxel rendering which do not support transparency.
Shader code:
Shader "Custom/Voxel"
{
Properties
{
_Color ("Color", Color) = (1,1,1,1)
_MainTex ("Albedo (RGB)", 2D) = "white" {}
_Glossiness ("Smoothness", Range(0,1)) = 0.5
_Metallic ("Metallic", Range(0,1)) = 0.0
_AOColor ("AO Color", Color) = (0,0,0,1)
_AOIntensity ("AO Intensity", Range(0, 1)) = 1.0
_AOPower ("AO Power", Range(1, 10)) = 1.0
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Standard fullforwardshadows
#pragma vertex vert
#pragma target 3.0
sampler2D _MainTex;
struct Input
{
float3 position;
float4 custom_uv;
float4 color : COLOR;
};
half _Glossiness;
half _Metallic;
fixed4 _Color;
int _AtlasX;
int _AtlasY;
fixed4 _AtlasRec;
half4 _AOColor;
float _AOIntensity;
float _AOPower;
UNITY_INSTANCING_BUFFER_START(Props)
UNITY_INSTANCING_BUFFER_END(Props)
void vert (inout appdata_full v, out Input o)
{
UNITY_INITIALIZE_OUTPUT(Input, o);
o.custom_uv = v.texcoord;
o.position = v.vertex;
v.color.rgb = _AOColor;
v.color.a = pow((1-v.color.a) * _AOIntensity, _AOPower );
}
void surf (Input IN, inout SurfaceOutputStandard o)
{
fixed2 atlasOffset = IN.custom_uv.zw;
fixed2 scaledUV = IN.custom_uv.xy;
fixed2 atlasUV = scaledUV;
atlasUV.x = (atlasOffset.x * _AtlasRec.x) + frac(atlasUV.x) * _AtlasRec.x;
atlasUV.y = (((_AtlasY - 1) - atlasOffset.y) * _AtlasRec.y) + frac(atlasUV.y) * _AtlasRec.y;
// Albedo comes from a texture tinted by color
fixed4 c = tex2Dgrad(_MainTex, atlasUV, ddx(atlasUV * _AtlasRec), ddy(atlasUV * _AtlasRec)) * _Color;
//fixed4 c = tex2D(_MainTex, atlasUV) * _Color;
o.Albedo = lerp(c.rgb, IN.color.rgb, IN.color.a);
o.Alpha = c.a;
// Metallic and smoothness come from slider variables
o.Metallic = _Metallic;
o.Smoothness = _Glossiness;
}
ENDCG
}
FallBack "Diffuse"
}
What I have changed/added by googling attempts:
Tags { "Queue"="Transparent" "RenderType" = "Transparent" }
Blend SrcAlpha OneMinusSrcAlpha
This did nothing.
Then I have tried to change:
#pragma surface surf Standard fullforwardshadows
with
#pragma surface surf Standard alpha
or
#pragma surface surf Standard fullforwardshadows alpha:fade
So in result those block that should are transparent, but the rest are kinda too (non transparent tiles works good in original version):
https://i.imgur.com/EwMIQnq.png
So what I'm trying to do is load satellite images from an SQL table and wrap them around a sphere to create a globe. I know I've got loading the images covered, I'm just not sure how to make my shader display the images in the correct orientation.
I've gone to the Unity Forums as well as checked out this code from the Unity Docs.
Using the linked shader code and the help I received on the forums, here's the code I've ended up with:
Properties
{
_MainTexArray("Tex", 2DArray) = "" {}
_SliceRange("Slices", Range(0,32)) = 6
_UVScale("UVScale", Float) = 1
_COLUMNS("Columns", Range(0, 5)) = 1
_ROWS("Rows", Range(0, 5)) = 1
_CELLS("Cells", Range(0, 32)) = 16
}
SubShader
{
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// texture arrays are not available everywhere,
// only compile shader on platforms where they are
#pragma require 2darray
#include "UnityCG.cginc"
struct v2f
{
float3 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
float _SliceRange;
float _UVScale;
v2f vert(float4 vertex : POSITION)
{
v2f o;
o.vertex = UnityObjectToClipPos(vertex);
o.uv.xy = (vertex.xy + 0.5) * _UVScale;
o.uv.z = (vertex.z + 0.5) * _SliceRange;
return o;
}
float _COLUMNS; //Columns and rows only go between 0 and 1
float _ROWS;
float _CELLS;
UNITY_DECLARE_TEX2DARRAY(_MainTexArray);
half4 frag(v2f i) : SV_Target
{
float3 uv = float3(i.uv.x * _CELLS, i.uv.y * _CELLS, 0);
uv.z = floor(i.uv.x / _COLUMNS) * floor(i.uv.y / _ROWS);
return UNITY_SAMPLE_TEX2DARRAY(_MainTexArray, uv / _CELLS);
}
ENDCG
}
}
Using that I've gotten my materials to look like this:
Here's the code that I'm using to load the SQL images:
textures = new Texture2D[size];
for (int x = 0; x <= 7; x++)
{
for (int y = 0; y <= 3; y++)
{
textures[count] = tiler.Read(x, y, 2); //The z determines the zoom level, so I wouldn't want them all loaded at once
if (textures[count] != null) TextureScale.Bilinear(textures[count], 256, 256);
count++;
}
}
texArr = new Texture2DArray(256, 256, textures.Length, TextureFormat.RGBA32, true, true);
texArr.filterMode = FilterMode.Bilinear;
texArr.wrapMode = TextureWrapMode.Repeat;
for (int i = 0; i < textures.Length; i++)
{
if (textures[i] == null) continue;
texArr.SetPixels(textures[i].GetPixels(0), i, 0);
}
texArr.Apply();
mat.SetTexture("_MainTexArray", texArr);
In the SQL Table, the x and y determines the position of the tile and the z determines the zoom level. I'm just working with one zoom level for now.
Sorry for linking the whole shader class, but I'm not very experienced with shaders so I don't quite know where the problem lies.
If you can index into the array of photos such that you effectively have an equirectangular projection of the globe, you could try using a modified form of the shader code by Farfarer from the Unity forums copied and modified slightly below:
Shader "Custom/Equirectangular" {
Properties{
_MainTexArray("Tex", 2DArray) = "" {}
_COLUMNS("Columns", Int) = 2
_ROWS("Rows", Int) = 2
}
SubShader{
Pass {
Tags {"LightMode" = "Always"}
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma require 2darray
#include "UnityCG.cginc"
struct appdata {
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float4 pos : SV_POSITION;
float3 normal : TEXCOORD0;
};
v2f vert(appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.normal = v.normal;
return o;
}
UNITY_DECLARE_TEX2DARRAY(_MainTexArray);
int _ROWS;
int _COLUMNS;
#define PI 3.141592653589793
inline float2 RadialCoords(float3 a_coords)
{
float3 a_coords_n = normalize(a_coords);
float lon = atan2(a_coords_n.z, a_coords_n.x);
float lat = acos(a_coords_n.y);
float2 sphereCoords = float2(lon, lat) * (1.0 / PI);
return float2(sphereCoords.x * 0.5 + 0.5, 1 - sphereCoords.y);
}
float4 frag(v2f IN) : COLOR
{
float2 equiUV = RadialCoords(IN.normal);
float2 texIndex;
float2 uvInTex = modf(equiUV * float2(_COLUMNS,_ROWS), texIndex);
int flatTexIndex = texIndex.x * _ROWS + texIndex.y;
return UNITY_SAMPLE_TEX2DARRAY(_MainTexArray,
float3(uvInTex, flatTexIndex));
}
ENDCG
}
}
FallBack "VertexLit"
}
You also need to use
texArr = new Texture2DArray(256, 256, textures.Length, TextureFormat.RGBA32, false, true);
instead of
texArr = new Texture2DArray(256, 256, textures.Length, TextureFormat.RGBA32, true, false);
It works for me if I attach this script to a sphere:
Material myMat;
public List<Texture2D> texes;
IEnumerator Start()
{
yield return null;
myMat = GetComponent<Renderer>().material;
Texture2DArray texArr = new Texture2DArray(256, 256, 9,
TextureFormat.RGBA32, false, true);
texArr.filterMode = FilterMode.Bilinear;
texArr.wrapMode = TextureWrapMode.Clamp;
for (int i = 0 ; i < texes.Count ; i++)
{
texArr.SetPixels(texes[i].GetPixels(), i, 0);
}
texArr.Apply();
myMat.SetTexture("_MainTexArray", texArr);
}
and in texes I add these textures in order:
0:
1:
2:
3:
4:
5:
6:
7:
8:
and set 3 for Rows and Columns, it produces decent results:
If bilinear filtering is enabled, there are still some artifacts at the borders of the textures. But these artifacts have to be zoomed in quite close to see. Due to the lack of adjacent pixels for bilinear filtering they mostly appear as improperly blended or missing pixels:
Of course this example doesn't properly tile so there is an obvious seam along one longitude line:
Since this setup expects normals from a sphere, this only works on things with normals that approximate those of a sphere. So it would not render properly on a plane, for instance.
I have the following shader,
I'm trying to add an alpha to the white, however all attempts have proven to be difficult.
This is the project from which I obtained the shader for reference - github
I think it has to do with one of the passes overwriting.
Shader "Suibokuga/Suibokuga" {
Properties {
_MainTex ("Water Texture", 2D) = "white" {}
_Alpha ("Transfer/Diffusion coefficient for water particles", Range(0.01, 1.5)) = 1.0
_Evaporation ("a unit quantity of water for evaporation", Range(0.0001, 0.005)) = 0.00015
_PaperTex ("Paper Texture", 2D) = "white" {}
_Brush ("brush", Vector) = (-1, -1, -1, -1)
_Prev ("previous brush position", Vector) = (-1, -1, -1, -1)
}
CGINCLUDE
#include "UnityCG.cginc"
#pragma target 3.0
/*
* r : water particles
* b : capacities of water
* a : the heights of the bottoms
*/
sampler2D _MainTex;
float4 _MainTex_TexelSize;
float _Alpha;
float _Evaporation;
sampler2D _PaperTex;
float2 _Prev;
float3 _Brush; // x,y : position, z : size
struct appdata {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f {
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
v2f vert (appdata IN) {
v2f OUT;
OUT.vertex = mul(UNITY_MATRIX_MVP, IN.vertex);
OUT.uv = IN.uv;
return OUT;
}
void sample (float2 uv, out float4 o, out float4 l, out float4 t, out float4 r, out float4 b) {
float2 texel = _MainTex_TexelSize.xy;
o = tex2D(_MainTex, uv);
l = tex2D(_MainTex, uv + float2(-texel.x, 0));
t = tex2D(_MainTex, uv + float2( 0, -texel.y));
r = tex2D(_MainTex, uv + float2( texel.x, 0));
b = tex2D(_MainTex, uv + float2( 0, texel.y));
}
float waterDelta (float4 k, float4 o) {
float ld = (k.w + k.x) - (o.w + o.x); // level difference
float transfer = (k.w + k.x) - max(o.w, k.w + k.z); // transferable water particles
return max(
0.0,
0.25 * _Alpha * min(ld, transfer)
);
}
float waterFlow (float2 uv) {
float4 o, l, t, r, b;
sample(uv, o, l, t, r, b);
float nw = o.r;
nw += (waterDelta(l, o) - waterDelta(o, l));
nw += (waterDelta(t, o) - waterDelta(o, t));
nw += (waterDelta(r, o) - waterDelta(o, r));
nw += (waterDelta(b, o) - waterDelta(o, b));
return max(nw, 0);
}
float evaporation (float wo) {
return max(wo - _Evaporation, 0.0);
}
float brush (float2 uv) {
const int count = 10;
float2 dir = _Brush.xy - _Prev.xy;
float l = length(dir);
if(l <= 0) {
float d = length(uv - _Brush.xy);
return smoothstep(0.0, _Brush.z, _Brush.z - d);
}
float ld = l / count;
float2 norm = normalize(dir);
float md = 100;
for(int i = 0; i < count; i++) {
float2 p = _Prev.xy + norm * ld * i;
float d = length(uv - p);
if(d < md) {
md = d;
}
}
return smoothstep(0.0, _Brush.z, _Brush.z - md);
// float d = length(uv - _Brush.xy);
// return smoothstep(0.0, _Brush.z, _Brush.z - d);
}
ENDCG
SubShader {
Cull Off ZWrite Off ZTest Always
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment init
float4 init (v2f IN) : SV_Target {
float4 paper = tex2D(_PaperTex, IN.uv);
return float4(
0,
0,
paper.r,
paper.r
);
}
ENDCG
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment waterUpdate
float4 waterUpdate (v2f IN) : SV_Target {
float4 col = tex2D(_MainTex, IN.uv);
col.x = evaporation(waterFlow(IN.uv));
float dw = brush(IN.uv);
// if(dw > 0) {
col.x = min(col.x + brush(IN.uv), 1.0);
// }
return col;
}
ENDCG
}
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment visualize
float4 visualize (v2f IN) : SV_Target {
float4 col = tex2D(_MainTex, IN.uv);
return float4(1.0 - col.xxx, 1.0);
}
ENDCG
}
}
}
You seem to miss a blending step.
Try adding this line to the end of the last pass
Blend SrcAlpha OneMinusSrcAlpha
Check this page to learn about alpha blending for Unity shaders.
The line I attached is for standard transparency, you might want to try different blending options.
I'd like to create a tiled shader that wraps back onto itself as it offsets a transparent texture over time. The result will make a "moving walkway/travellator" across a mesh using a texture representing a single step.
This is what I have so far. But it doesn't wrap around (without using external dependencies as setting the texture's wrap mode to repeat)
Shader "Custom/ScrollingTextureUnlitShader"
{
Properties
{
_FadeValue ("Fade Value", Range(0, 1)) = 1
_ColorTint ("Color Tint", Color) = (1,1,1,1)
_MainTex ("Base (RGB)", 2D) = "white" {}
_ScrollXSpeed ("X Scroll Speed", Range(-10, 10)) = -5
_ScrollYSpeed ("Y Scroll Speed", Range(-10, 10)) = 0
}
SubShader
{
Tags { "RenderType"="Transparent" "Queue"="Transparent" }
LOD 200
CGPROGRAM
#pragma surface surf Unlit alpha
float _FadeValue;
float4 _ColorTint;
sampler2D _MainTex;
fixed _ScrollXSpeed;
fixed _ScrollYSpeed;
struct Input
{
float2 uv_MainTex;
};
void surf (Input IN, inout SurfaceOutput o)
{
fixed2 scrolledUV = IN.uv_MainTex;
fixed xScrollValue = _ScrollXSpeed * _Time;
fixed yScrollValue = _ScrollYSpeed * _Time;
scrolledUV += fixed2 (xScrollValue, yScrollValue);
half4 c = tex2D (_MainTex, scrolledUV);
o.Albedo = c.rgb * _ColorTint;
o.Alpha = c.a * _FadeValue;
}
inline fixed4 LightingUnlit (SurfaceOutput s, fixed3 lightDir, fixed3 viewDir, fixed atten)
{
fixed4 c;
c.rgb = s.Albedo;
c.a = s.Alpha;
return c;
}
ENDCG
}
FallBack "Transparent/Diffuse"
}
I have some sort of a problem. I'm new to XNA and want to draw a polygon shape that looks something like this (In the end, I want these point to be random):
So I read some articles and this is what I ended up with:
private VertexPositionColor[] vertices;
public TextureClass()
{
setupVertices();
}
public override void Render(SpriteBatch spriteBatch)
{
Texture2D texture = createTexture(spriteBatch);
spriteBatch.Draw(texture, new Rectangle((int)vertices[0].Position.X, (int)vertices[0].Position.Y, 30, 30), Color.Brown);
}
private Texture2D createTexture(SpriteBatch spriteBatch)
{
Texture2D texture = new Texture2D(spriteBatch.GraphicsDevice, 1, 1, false, SurfaceFormat.Color);
texture.SetData<Color>(new Color[] { Color.Brown });
return texture;
}
When I call Render it's starts drawing some squares as if it where in a loop. I'm just guessing I'm doing it all wrong. I would love it if someones points me into the right direction. Just creating a polygon and drawing it. It seemed so simple...
Here it what I have right now.
A class that generates a BasicEffect with some desired asignments.
public class StandardBasicEffect : BasicEffect
{
public StandardBasicEffect(GraphicsDevice graphicsDevice)
: base(graphicsDevice)
{
this.VertexColorEnabled = true;
this.Projection = Matrix.CreateOrthographicOffCenter(
0, graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height, 0, 0, 1);
}
public StandardBasicEffect(BasicEffect effect)
: base(effect) { }
public BasicEffect Clone()
{
return new StandardBasicEffect(this);
}
}
Here is my PolygonShape class
/// <summary>
/// A Polygon object that you will be able to draw.
/// Animations are being implemented as we speak.
/// </summary>
/// <param name="graphicsDevice">The graphicsdevice from a Game object</param>
/// <param name="vertices">The vertices in a clockwise order</param>
public PolygonShape(GraphicsDevice graphicsDevice, VertexPositionColor[] vertices)
{
this.graphicsDevice = graphicsDevice;
this.vertices = vertices;
this.triangulated = false;
triangulatedVertices = new VertexPositionColor[vertices.Length * 3];
indexes = new int[vertices.Length];
}
/// <summary>
/// Triangulate the set of VertexPositionColors so it will be drawn correcrly
/// </summary>
/// <returns>The triangulated vertices array</returns>}
public VertexPositionColor[] Triangulate()
{
calculateCenterPoint();{
setupIndexes();
for (int i = 0; i < indexes.Length; i++)
{
setupDrawableTriangle(indexes[i]);
}
triangulated = true;
return triangulatedVertices;
}
/// <summary>
/// Calculate the center point needed for triangulation.
/// The polygon will be irregular, so this isn't the actual center of the polygon
/// but it will do for now, as we only need an extra point to make the triangles with</summary>
private void calculateCenterPoint()
{
float xCount = 0, yCount = 0;
foreach (VertexPositionColor vertice in vertices)
{
xCount += vertice.Position.X;
yCount += vertice.Position.Y;
}
centerPoint = new Vector3(xCount / vertices.Length, yCount / vertices.Length, 0);
}
private void setupIndexes()
{
for (int i = 1; i < triangulatedVertices.Length; i = i + 3)
{
indexes[i / 3] = i - 1;
}
}
private void setupDrawableTriangle(int index)
{
triangulatedVertices[index] = vertices[index / 3]; //No DividedByZeroException?...
if (index / 3 != vertices.Length - 1)
triangulatedVertices[index + 1] = vertices[(index / 3) + 1];
else
triangulatedVertices[index + 1] = vertices[0];
triangulatedVertices[index + 2].Position = centerPoint;
}
/// <summary>
/// Draw the polygon. If you haven't called Triangulate yet, I wil do it for you.
/// </summary>
/// <param name="effect">The BasicEffect needed for drawing</param>
public void Draw(BasicEffect effect)
{
try
{
if (!triangulated)
Triangulate();
draw(effect);
}
catch (Exception exception)
{
throw exception;
}
}
private void draw(BasicEffect effect)
{
effect.CurrentTechnique.Passes[0].Apply();
graphicsDevice.DrawUserPrimitives<VertexPositionColor>(
PrimitiveType.TriangleList, triangulatedVertices, 0, vertices.Length);
}
Sorry, it's kind of alot. Now for my next quest. Animation my polygon.
Hope it helped fellow people with the same problem.
this code is useful to draw 2D lines, some calcs can be done into an initilization call, but i prefer for this example to keep all together.
public void DrawLine(VertexPositionColor[] Vertices)
{
Game.GraphicsDevice.DepthStencilState = DepthStencilState.Default;
Vector2 center;
center.X = Game.GraphicsDevice.Viewport.Width * 0.5f;
center.Y = Game.GraphicsDevice.Viewport.Height * 0.5f;
Matrix View = Matrix.CreateLookAt( new Vector3( center, 0 ), new Vector3( center, 1 ), new Vector3( 0, -1, 0 ) );
Matrix Projection = Matrix.CreateOrthographic( center.X * 2, center.Y * 2, -0.5f, 1 );
Effect EffectLines = Game.Content.Load<Effect>( "lines" );
EffectLines.CurrentTechnique = EffectLines.Techniques["Lines"];
EffectLines.Parameters["xViewProjection"].SetValue( View * Projection );
EffectLines.Parameters["xWorld"].SetValue( Matrix.Identity );
foreach ( EffectPass pass in EffectLines.CurrentTechnique.Passes )
{
pass.Apply( );
Game.GraphicsDevice.DrawUserPrimitives<VertexPositionColor>
( PrimitiveType.LineList, Vertices, 0, Vertices.Length/2 );
}
}
LINES.FX
uniform float4x4 xWorld;
uniform float4x4 xViewProjection;
void VS_Basico(in float4 inPos : POSITION, in float4 inColor: COLOR0, out float4 outPos: POSITION, out float4 outColor:COLOR0 )
{
float4 tmp = mul (inPos, xWorld);
outPos = mul (tmp, xViewProjection);
outColor = inColor;
}
float4 PS_Basico(in float4 inColor:COLOR) :COLOR
{
return inColor;
}
technique Lines
{
pass Pass0
{
VertexShader = compile vs_2_0 VS_Basico();
PixelShader = compile ps_2_0 PS_Basico();
FILLMODE = SOLID;
CULLMODE = NONE;
}
}
I worked with XNA in the past on a physics simulation where I had to draw bounding boxes with GraphicsDevice.DrawIndexedPrimitives (You should google or MSDN for this function for more worked examples.)
The below code is what I used in my project for drawing a 3D geometry.
/// <summary>
/// Draw the primitive.
/// </summary>
/// <param name="world">World Matrix</param>
/// <param name="view">View Matrix</param>
/// <param name="projection">Projection Matrix</param>
/// <param name="color">Color of the primitive</param>
public void Draw(Matrix world, Matrix view, Matrix projection, Color color)
{
_mGraphicsDevice.VertexDeclaration = _mVertexDeclaration;
_mGraphicsDevice.Vertices[0].SetSource(_mVertexBuffer, 0, VertexPositionNormal.SizeInBytes);
_mGraphicsDevice.Indices = _mIndexBuffer;
_mBasicEffect.DiffuseColor = color.ToVector3();
_mBasicEffect.World = _mTransform * world;
_mBasicEffect.View = view;
_mBasicEffect.Projection = projection;
int primitiveCount = _mIndex.Count / 3;
_mBasicEffect.Begin();
foreach (EffectPass pass in _mBasicEffect.CurrentTechnique.Passes)
{
pass.Begin();
_mGraphicsDevice.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, _mVertex.Count, 0, primitiveCount);
pass.End();
}
_mBasicEffect.End();
}
This function is a member method of a geometry object (class) and is called from the Game class' Draw(GameTime) method