I'm trying to implement, using SharpDX11, a ray/mesh intersection method using the GPU. I've seen from an older post (Older post) that this can be done using the Compute Shader; but I need help in order to create and define the buffer outside the .hlsl code.
My HLSL code is the following:
struct rayHit
{
float3 intersection;
};
cbuffer cbRaySettings : register(b0)
{
float3 rayFrom;
float3 rayDir;
uint TriangleCount;
};
StructuredBuffer<float3> positionBuffer : register(t0);
StructuredBuffer<uint3> indexBuffer : register(t1);
AppendStructuredBuffer<rayHit> appendRayHitBuffer : register(u0);
void TestTriangle(float3 p1, float3 p2, float3 p3, out bool hit, out float3 intersection)
{
//Perform ray/triangle intersection
//Compute vectors along two edges of the triangle.
float3 edge1, edge2;
float distance;
//Edge 1
edge1.x = p2.x - p1.x;
edge1.y = p2.y - p1.y;
edge1.z = p2.z - p1.z;
//Edge2
edge2.x = p3.x - p1.x;
edge2.y = p3.y - p1.y;
edge2.z = p3.z - p1.z;
//Cross product of ray direction and edge2 - first part of determinant.
float3 directioncrossedge2;
directioncrossedge2.x = (rayDir.y * edge2.z) - (rayDir.z * edge2.y);
directioncrossedge2.y = (rayDir.z * edge2.x) - (rayDir.x * edge2.z);
directioncrossedge2.z = (rayDir.x * edge2.y) - (rayDir.y * edge2.x);
//Compute the determinant.
float determinant;
//Dot product of edge1 and the first part of determinant.
determinant = (edge1.x * directioncrossedge2.x) + (edge1.y * directioncrossedge2.y) + (edge1.z * directioncrossedge2.z);
//If the ray is parallel to the triangle plane, there is no collision.
//This also means that we are not culling, the ray may hit both the
//back and the front of the triangle.
if (determinant == 0)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
float inversedeterminant = 1.0f / determinant;
//Calculate the U parameter of the intersection point.
float3 distanceVector;
distanceVector.x = rayFrom.x - p1.x;
distanceVector.y = rayFrom.y - p1.y;
distanceVector.z = rayFrom.z - p1.z;
float triangleU;
triangleU = (distanceVector.x * directioncrossedge2.x) + (distanceVector.y * directioncrossedge2.y) + (distanceVector.z * directioncrossedge2.z);
triangleU = triangleU * inversedeterminant;
//Make sure it is inside the triangle.
if (triangleU < 0.0f || triangleU > 1.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
//Calculate the V parameter of the intersection point.
float3 distancecrossedge1;
distancecrossedge1.x = (distanceVector.y * edge1.z) - (distanceVector.z * edge1.y);
distancecrossedge1.y = (distanceVector.z * edge1.x) - (distanceVector.x * edge1.z);
distancecrossedge1.z = (distanceVector.x * edge1.y) - (distanceVector.y * edge1.x);
float triangleV;
triangleV = ((rayDir.x * distancecrossedge1.x) + (rayDir.y * distancecrossedge1.y)) + (rayDir.z * distancecrossedge1.z);
triangleV = triangleV * inversedeterminant;
//Make sure it is inside the triangle.
if (triangleV < 0.0f || triangleU + triangleV > 1.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
//Compute the distance along the ray to the triangle.
float raydistance;
raydistance = (edge2.x * distancecrossedge1.x) + (edge2.y * distancecrossedge1.y) + (edge2.z * distancecrossedge1.z);
raydistance = raydistance * inversedeterminant;
//Is the triangle behind the ray origin?
if (raydistance < 0.0f)
{
distance = 0.0f;
intersection = float3(0, 0, 0);
hit = false;
}
intersection = rayFrom + (rayDir * distance);
hit = true;
}
[numthreads(64, 1, 1)]
void CS_RayAppend(uint3 tid : SV_DispatchThreadID)
{
if (tid.x >= TriangleCount)
return;
uint3 indices = indexBuffer[tid.x];
float3 p1 = positionBuffer[indices.x];
float3 p2 = positionBuffer[indices.y];
float3 p3 = positionBuffer[indices.z];
bool hit;
float3 p;
TestTriangle(p1, p2, p3, hit, p);
if (hit)
{
rayHit hitData;
hitData.intersection = p;
appendRayHitBuffer.Append(hitData);
}
}
While the following is part of my c# implementation but I'm not able to understand how lo load buffers for compute shader.
int count = obj.Mesh.Triangles.Count;
int size = 8; //int+float for every hit
BufferDescription bufferDesc = new BufferDescription() {
BindFlags = BindFlags.UnorderedAccess | BindFlags.ShaderResource,
Usage = ResourceUsage.Default,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.BufferStructured,
StructureByteStride = size,
SizeInBytes = size * count
};
Buffer buffer = new Buffer(device, bufferDesc);
UnorderedAccessViewDescription uavDescription = new UnorderedAccessViewDescription() {
Buffer = new UnorderedAccessViewDescription.BufferResource() { FirstElement = 0, Flags = UnorderedAccessViewBufferFlags.None, ElementCount = count },
Format = SharpDX.DXGI.Format.Unknown,
Dimension = UnorderedAccessViewDimension.Buffer
};
UnorderedAccessView uav = new UnorderedAccessView(device, buffer, uavDescription);
context.ComputeShader.SetUnorderedAccessView(0, uav);
var code = HLSLCompiler.CompileFromFile(#"Shaders\TestTriangle.hlsl", "CS_RayAppend", "cs_5_0");
ComputeShader _shader = new ComputeShader(device, code);
Buffer positionsBuffer = new Buffer(device, Utilities.SizeOf<Vector3>(), ResourceUsage.Default, BindFlags.None, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
context.UpdateSubresource(ref data, positionsBuffer);
context.ComputeShader.Set(_shader);
Inside my c# implementation i'm considering only one ray (with its origin and direction) and I would like to use the shader to check the intersection with all the triangles of the mesh. I'm already able to do that using the CPU but for 20k+ triangles the computation took too long even if i'm already using parallel coding.
Related
I am drawing lines on a canvas using the 'UIVertex' struct and I would like to be able to detect click events on the lines I have drawn.
Here is how I draw lines (largely inspired from this tutorial => https://www.youtube.com/watch?v=--LB7URk60A):
void DrawVerticesForPoint(Vector2 point, float angle, VertexHelper vh)
{
vertex = UIVertex.simpleVert;
//vertex.color = Color.red;
vertex.position = Quaternion.Euler(0, 0, angle) * new Vector3(-thickness / 2, 0);
vertex.position += new Vector3(unitWidth * point.x, unitHeight * point.y);
vh.AddVert(vertex);
vertex.position = Quaternion.Euler(0, 0, angle) * new Vector3(thickness / 2, 0);
vertex.position += new Vector3(unitWidth * point.x, unitHeight * point.y);
vh.AddVert(vertex);
}
Any idea?
Here is the solution I have found thanks to this post:
public bool PointIsOnLine(Vector3 point, UILineRenderer line)
{
Vector3 point1 = line.points[0];
Vector3 point2 = line.points[1];
var dirNorm = (point2 - point1).normalized;
var t = Vector2.Dot(point - point1, dirNorm);
var tClamped = Mathf.Clamp(t, 0, (point2 - point1).magnitude);
var closestPoint = point1 + dirNorm * tClamped;
var dist = Vector2.Distance(point, closestPoint);
if(dist < line.thickness / 2)
{
return true;
}
return false;
}
The UILineRenderer class is the class I have which represents my lines.
line.points[0] and line.points[1] contain the coordinates of the two points which determine the line length and position. line.thickness is the... thickness of the line :O
I just realized that a global variable in Unity must be initialized in a function. Being new to shaders, I can't tell if I am doing this the wrong way.
For example, when I define a const variable:
const float PI = 3.14159265359;
then try to write a code that uses in the frag functiion:
fixed4 frag(v2f i) : SV_Target
{
float result = PI * otherVariable;
///Use the result value...
}
It doesn't work. I gets a black screen as the result or the expected result wont show up.
The confusing part is that when I re-initialize the global variable (PI) again in the frag function, I get the expected result.
For example, this is what works:
float PI = 3.14159265359;
then try to write a code that uses in the frag function:
fixed4 frag(v2f i) : SV_Target
{
//re-initialize
PI = 3.14159265359;
float result = PI * otherVariable;
}
I have so many constant global variables that I can't keep re-initiated them.
One complete example of this problem is this Shadertoy code I ported.
Shader "Unlit/Atmospheric Scattering 2"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
//https://www.shadertoy.com/view/lslXDr
// math const
float PI = 3.14159265359;
float DEG_TO_RAD = (3.14159265359 / 180.0);
float MAX = 10000.0;
// scatter const
float K_R = 0.166;
float K_M = 0.0025;
float E = 14.3;
float C_R = float3(0.3, 0.7, 1.0); // 1 / wavelength ^ 4
float G_M = -0.85; // Mie g
float R = 1.0;
float R_INNER = 0.7;
float SCALE_H = 4.0 / (1.0 - 0.7);
float SCALE_L = 1.0 / (1.0 - 0.7);
int NUM_OUT_SCATTER = 10;
float FNUM_OUT_SCATTER = 10.0;
int NUM_IN_SCATTER = 10;
float FNUM_IN_SCATTER = 10.0;
// angle : pitch, yaw
float3x3 rot3xy(float2 angle) {
float2 c = cos(angle);
float2 s = sin(angle);
return float3x3(
c.y, 0.0, -s.y,
s.y * s.x, c.x, c.y * s.x,
s.y * c.x, -s.x, c.y * c.x
);
}
// ray direction
float3 ray_dir(float fov, float2 size, float2 pos) {
float2 xy = pos - size * 0.5;
float cot_half_fov = tan((90.0 - fov * 0.5) * DEG_TO_RAD);
float z = size.y * 0.5 * cot_half_fov;
return normalize(float3(xy, -z));
}
// ray intersects sphere
// e = -b +/- sqrt( b^2 - c )
float2 ray_vs_sphere(float3 p, float3 dir, float r) {
float b = dot(p, dir);
float c = dot(p, p) - r * r;
float d = b * b - c;
if (d < 0.0) {
return float2(MAX, -MAX);
}
d = sqrt(d);
return float2(-b - d, -b + d);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float phase_mie(float g, float c, float cc) {
float gg = g * g;
float a = (1.0 - gg) * (1.0 + cc);
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt(b);
b *= 2.0 + gg;
//b = mul(b,sqrt(b));
//b = mul(b,2.0 + gg);
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float phase_reyleigh(float cc) {
return 0.75 * (1.0 + cc);
}
float density(float3 p) {
return exp(-(length(p) - R_INNER) * SCALE_H);
}
float optic(float3 p, float3 q) {
float3 step = (q - p) / FNUM_OUT_SCATTER;
float3 v = p + step * 0.5;
float sum = 0.0;
for (int i = 0; i < NUM_OUT_SCATTER; i++) {
sum += density(v);
v += step;
}
sum *= length(step) * SCALE_L;
//sum = mul(sum,length(step) * SCALE_L);
return sum;
}
float3 in_scatter(float3 o, float3 dir, float2 e, float3 l) {
float len = (e.y - e.x) / FNUM_IN_SCATTER;
float3 step = dir * len;
float3 p = o + dir * e.x;
float3 v = p + dir * (len * 0.5);
float3 sum = float3(0.,0.,0.);
for (int i = 0; i < NUM_IN_SCATTER; i++) {
float2 f = ray_vs_sphere(v, l, R);
float3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u)) * (PI * 4.0);
sum += density(v) * exp(-n * (K_R * C_R + K_M));
v += step;
}
sum *= len * SCALE_L;
//sum = mul(sum,len * SCALE_L);
float c = dot(dir, -l);
float cc = c * c;
return sum * (K_R * C_R * phase_reyleigh(cc) + K_M * phase_mie(G_M, c, cc)) * E;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
fixed4 frag(v2f i) : SV_Target
{
//Re-initialize BEGIN
// math const
PI = 3.14159265359;
DEG_TO_RAD = (3.14159265359 / 180.0);
MAX = 10000.0;
// scatter const
K_R = 0.166;
K_M = 0.0025;
E = 14.3;
C_R = float3(0.3, 0.7, 1.0); // 1 / wavelength ^ 4
G_M = -0.85; // Mie g
R = 1.0;
R_INNER = 0.7;
SCALE_H = 4.0 / (1.0 - 0.7);
SCALE_L = 1.0 / (1.0 - 0.7);
NUM_OUT_SCATTER = 10;
FNUM_OUT_SCATTER = 10.0;
NUM_IN_SCATTER = 10;
FNUM_IN_SCATTER = 10.0;
//Re-initialize END
float4 fragColor = 0;
float2 fragCoord = i.vertex.xy;
// default ray dir
float3 dir = ray_dir(45.0, _ScreenParams.xy, fragCoord.xy);
// default ray origin
float3 eye = float3(0.0, 0.0, 2.4);
// rotate camera
float3x3 rot = rot3xy(float2(0.0, _Time.y * 0.5));
/* dir = rot * dir;
eye = rot * eye;*/
dir = mul(rot,dir);
eye = mul(rot,eye);
// sun light dir
float3 l = float3(0, 0, 1);
float2 e = ray_vs_sphere(eye, dir, R);
if (e.x > e.y) {
discard;
}
float2 f = ray_vs_sphere(eye, dir, R_INNER);
e.y = min(e.y, f.x);
float3 I = in_scatter(eye, dir, e, l);
fragColor = float4(I, 1.0);
return fragColor;
}
ENDCG
}
}
}
This shader is attached to a Material that is then attached to the camera with the code below:
[ExecuteInEditMode]
public class CameraEffect : MonoBehaviour
{
public Material mat;
// Called by camera to apply image effect
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (mat != null)
{
Graphics.Blit(source, destination, mat);
}
else
{
Graphics.Blit(source, destination);
}
}
}
It works fine but notice how I had to re-initialize the PI, DEG_TO_RAD, MAX and other global variables...Without doing that it won't work.The screen is simply black and this is not the first shader that has caused this same issue.
Why is this happening?
Am I declaring the variables the wrong way?
Hi!
Any constants you can write like this:
//float my_constant = 104.3;
#define my_constant 104.3f
You can create custom library for shaders and use variables and functions as you wish. For example create file with name MyConstants.cginc and put in in your project. Code:
#ifndef MY_CONSTANTS_INCLUDED
#define MY_CONSTANTS_INCLUDED
#define DEG_TO_RAD 0.01745329251994f
#define MAX 10000.0f
#define PI 3.14159265359f
//scatter const
// .....
// .....
float3 ray_dir(float fov, float2 size, float2 pos) {
float2 xy = pos - size * 0.5;
float cot_half_fov = tan((90.0 - fov * 0.5) * DEG_TO_RAD);
float z = size.y * 0.5 * cot_half_fov;
return normalize(float3(xy, -z));
}
//.... and other methods
#endif
And Using library in your shader
//.....
#include "UnityCG.cginc"
#include "MyConstants.cginc"
//.....
I'm using a simple attenuation algorithm to darken walls based on their distance from light sources.
The end goal is to develop a light-mapping system in which the brightness for each wall is calculated in a pre-pass (including shadowing from other walls), and then that light-map image is blended with the wall texture.
+ =
Besides shadowing, I have the light-maps working, and the result replicate the shader code exactly. The problem is it is slow, and adding raycasted shadow checking is only going to make it worse.
My question is this, how can I perform these calculations on the GPU? Is a third party library/module required, or can it be done natively through OpenGL (OpenTK in my case)?
Alternatively, I'd be happy to switch to deferred rendering/lighting with cube shadow mapping but I'm yet to come across any information I can get my head around.
c# lightmap (run once for each wall)
public void createLightMap()
{
// Determine Light Map dimensions
int LightMapSize = 300;
int w = (int)(this.Width * LightMapSize);
int h = (int)(this.Height * LightMapSize);
// Create Bitmap
Bitmap bitmap = new Bitmap(w, h);
// Fragment testing
Vector3 fragmentPosition = new Vector3(this.x2, this.Height, this.z2);
float xIncement = (1f / LightMapSize) * ((x2 - x) / this.Width);
float zIncement = (1f / LightMapSize) * ((z2 - z) / this.Width);
float yIncement = (1f / LightMapSize);
// Calculate Light value for each pixel
for (int x = 0; x < w; x++) {
for (int y = 0; y < h; y++)
{
// Update fragment position
fragmentPosition.X = this.x2 - xIncement -(xIncement * x);
fragmentPosition.Z = this.z2 - (zIncement * x);
fragmentPosition.Y = this.Height - (yIncement * y);
Vector3 totalDiffuse = Vector3.Zero;
// Iterate through the lights
for (int n = 0; n < 2; n++)
{
Light light = Game.lights[n];
Vector3 LightPosition = new Vector3(light.Position);
Vector3 Attenuation = new Vector3(light.Attenuation);
Vector3 Colour = new Vector3(light.Colour);
Vector3 toLightVector = LightPosition - fragmentPosition;
// Return early if wall is facing away from light
if (Vector3.Dot(this.normalVector, toLightVector.Normalized()) < 0)
continue;
// Calculate vector length (aka, distance from lightsource)
float distance = (float)Math.Sqrt(toLightVector.X * toLightVector.X + toLightVector.Y * toLightVector.Y + toLightVector.Z * toLightVector.Z);
// Attenuation
float attFactor = Attenuation.X + (Attenuation.Y * distance) + (Attenuation.Z * distance * distance);
Vector3 diffuse = Colour / attFactor;
totalDiffuse += diffuse;
}
// Create bitmap
var r = (int)(totalDiffuse.X * 256);
var g = (int)(totalDiffuse.Y * 256);
var b = (int)(totalDiffuse.Z * 256);
r = Math.Min(r, 255);
g = Math.Min(g, 255);
b = Math.Min(b, 255);
// Set Pixel
bitmap.SetPixel(x, y, Color.FromArgb(r, g, b));
}
}
this.LightMapTextureID = Texture.loadImage(bitmap);
}
Fragment shader (an alternative to above light-mapping, creating the same effect)
#version 330
precision highp float;
in vec2 frag_texcoord;
in vec3 toLightVector[8];
uniform sampler2D MyTexture0;
uniform vec3 LightColour[8];
uniform vec3 LightAttenuation[8];
uniform float NumberOfLights;
out vec4 finalColor;
void main(void)
{
vec3 totalDiffuse;
for (int i=0; i<NumberOfLights; i++) {
{
float distance = length(toLightVector[i]);
float attFactor = LightAttenuation[i].x + (LightAttenuation[i].y * distance) + (LightAttenuation[i].z * distance * distance);
vec3 diffuse = (LightColour[i]) / attFactor;
totalDiffuse += diffuse;
}
finalColor = vec4(totalDiffuse, 1.0) * texture(MyTexture0, frag_texcoord)
}
}
My project's main goal is to project a player character from a 3D environment to a 2D screen. I already found some useful math online, but in the last four days I couldn't make it work. Everytime I move the character it appears in random places and/or goes off screen. Sometimes it follows horizontally, but usually never vertically.
My question is pretty simple: What am I doing wrong?
// A few numbers to play with:
// Case #1: myPos: 1104.031, 3505.031, -91.9875; myMousePos: 0, 180; myRotation: 153, 153, 25; playerPos: 1072, 3504, -91.9687 (Player Middle of Screen, Standing Behind Player)
// Case #2: myPos: 511.7656, 3549.25, -28.02344; myMousePos: 0, 347.5854; myRotation: 44, 2, 22; playerPos: 1632, 3232, -91.96875 (Player Middle of Screen, I stand higher and 1166 away)
// Case #3: myPos: 1105.523, 2898.336, -11.96875; myMousePos: 0, 58.67249; myRotation: 232, 184, 159; playerPos 1632, 3232, -91.96875 (Player Right, Lower Of Screen)
Vect3d viewAngles;
Vect3d vForward, vRight, vUpward;
float ScreenX, ScreenY;
float[] fov;
bool 3dWorldTo2dScreen(Vect3d playerpos, Vect3d mypos, PlayerData myself)
{
fov = new float[2];
viewAngles = Vect3d();
Vect3d vLocal, vTransForm;
vTransForm = new Vect3d();
vForward = new Vect3d();
vRight = new Vect3d();
vUpward = new Vect3d();
fov[0] = myself.MouseX; // Sky: -89, Ground: 89, Middle: 0
fov[1] = myself.MouseY; // 360 To 0
viewAngles.x = myself.Rotation.x;
viewAngles.y = myself.Rotation.y;
viewAngles.z = myself.Rotation.z;
int screenCenterX = 320; // 640
int screenCenterY = 240; // 480
AngleVectors();
vLocal = SubVectorDist(playerpos, mypos);
vTransForm.x = vLocal.dotproduct(vRight);
vTransForm.y = vLocal.dotproduct(vUpward);
vTransForm.z = vLocal.dotproduct(vForward);
if (vTransForm.z < 0.01)
return false;
ScreenX = screenCenterX + (screenCenterX / vTransForm.z * (1 / fov[0])) * vTransForm.x;
ScreenY = screenCenterY - (screenCenterY / vTransForm.z * (1 / fov[1])) * vTransForm.y;
return true;
}
Vect3d SubVectorDist(Vect3d playerFrom, Vect3d playerTo)
{
return new Vect3d(playerFrom.x - playerTo.x, playerFrom.y - playerTo.y, playerFrom.z - playerTo.z);
}
private void AngleVectors()
{
float angle;
float sr, sp, sy, cr, cp, cy,
cpi = (3.141f * 2 / 360);
angle = viewAngles.y * cpi;
sy = (float)Math.Sin(angle);
cy = (float)Math.Cos(angle);
angle = viewAngles.x * cpi;
sp = (float)Math.Sin(angle);
cp = (float)Math.Cos(angle);
angle = viewAngles.z * cpi;
sr = (float)Math.Sin(angle);
cr = (float)Math.Cos(angle);
vForward.x = cp * cy;
vForward.y = cp * sy;
vForward.z = -sp;
vRight.x = (-1 * sr * sp * cy + -1 * cr * -sy);
vRight.y = (-1 * sr * sp * sy + -1 * cr * cy);
vRight.z = -1 * sr * cp;
vUpward.x = (cr * sp * cy + -sr * -sy);
vUpward.y = (cr * sp * sy + -sr * cy);
vUpward.z = cr * cp;
}
To get the line of intersection between two rectangles in 3D, I converted them to planes, then get the line of intersection using cross product of their normals, then I try to get the line intersection with each line segment of the rectangle.
The problem is the line is parallel to three segments, and intersect with only one in NAN,NAN,NAN which is totally wrong. Can you advise me what's wrong in my code?
I use vector3 from this link http://www.koders.com/csharp/fidCA8558A72AF7D3E654FDAFA402A168B8BC23C22A.aspx
and created my plane class as following
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace referenceLineAlgorithm
{
struct Line
{
public Vector3 direction;
public Vector3 point;
}
struct lineSegment
{
public Vector3 firstPoint;
public Vector3 secondPoint;
}
class plane_test
{
public enum Line3DResult
{
Line3DResult_Parallel = 0,
Line3DResult_SkewNoCross = 1,
Line3DResult_SkewCross = 2
};
#region Fields
public Vector3 Normal;
public float D;
public Vector3[] cornersArray;
public Vector3 FirstPoint;
public Vector3 SecondPoint;
public Vector3 temp;
public Vector3 normalBeforeNormalization;
#endregion
#region constructors
public plane_test(Vector3 point0, Vector3 point1, Vector3 point2, Vector3 point3)
{
Vector3 edge1 = point1 - point0;
Vector3 edge2 = point2 - point0;
Normal = edge1.Cross(edge2);
normalBeforeNormalization = Normal;
Normal.Normalize();
D = -Normal.Dot(point0);
///// Set the Rectangle corners
cornersArray = new Vector3[] { point0, point1, point2, point3 };
}
#endregion
#region Methods
/// <summary>
/// This is a pseudodistance. The sign of the return value is
/// positive if the point is on the positive side of the plane,
/// negative if the point is on the negative side, and zero if the
/// point is on the plane.
/// The absolute value of the return value is the true distance only
/// when the plane normal is a unit length vector.
/// </summary>
/// <param name="point"></param>
/// <returns></returns>
public float GetDistance(Vector3 point)
{
return Normal.Dot(point) + D;
}
public void Intersection(plane_test SecondOne)
{
///////////////////////////// Get the parallel to the line of interrsection (Direction )
Vector3 LineDirection = Normal.Cross(SecondOne.Normal);
float d1 = this.GetDistance(LineDirection);
float d2 = SecondOne.GetDistance(LineDirection);
temp = (LineDirection - (this.Normal * d1) - (SecondOne.Normal * d2));
temp.x = Math.Abs((float)Math.Round((decimal)FirstPoint.x, 2));
temp.y = Math.Abs((float)Math.Round((decimal)FirstPoint.y, 2));
Line line;
line.direction = LineDirection;
line.point = temp;
////////// Line segments
lineSegment AB, BC, CD, DA;
AB.firstPoint = cornersArray[0]; AB.secondPoint = cornersArray[1];
BC.firstPoint = cornersArray[1]; BC.secondPoint = cornersArray[2];
CD.firstPoint = cornersArray[2]; CD.secondPoint = cornersArray[3];
DA.firstPoint = cornersArray[3]; DA.secondPoint = cornersArray[0];
Vector3 r1 = new Vector3(-1, -1, -1);
Vector3 r2 = new Vector3(-1, -1, -1);
Vector3 r3 = new Vector3(-1, -1, -1);
Vector3 r4 = new Vector3(-1, -1, -1);
/*
0,0 |----------------| w,0
| |
| |
0,h |________________| w,h
*/
IntersectionPointBetweenLines(AB, line, ref r1);
IntersectionPointBetweenLines(BC, line, ref r2);
IntersectionPointBetweenLines(CD, line, ref r3);
IntersectionPointBetweenLines(DA, line, ref r4);
List<Vector3> points = new List<Vector3>();
points.Add(r1);
points.Add(r2);
points.Add(r3);
points.Add(r4);
points.RemoveAll(
t => ((t.x == -1) && (t.y == -1) && (t.z == -1))
);
if (points.Count == 2)
{
FirstPoint = points[0];
SecondPoint = points[1];
}
}
public Line3DResult IntersectionPointBetweenLines(lineSegment first, Line aSecondLine, ref Vector3 result)
{
Vector3 p1 = first.firstPoint;
Vector3 n1 = first.secondPoint - first.firstPoint;
Vector3 p2 = aSecondLine.point;
Vector3 n2 = aSecondLine.direction;
bool parallel = AreLinesParallel(first, aSecondLine);
if (parallel)
{
return Line3DResult.Line3DResult_Parallel;
}
else
{
float d = 0, dt = 0, dk = 0;
float t = 0, k = 0;
if (Math.Abs(n1.x * n2.y - n2.x * n1.y) > float.Epsilon)
{
d = n1.x * (-n2.y) - (-n2.x) * n1.y;
dt = (p2.x - p1.x) * (-n2.y) - (p2.y - p1.y) * (-n2.x);
dk = n1.x * (p2.x - p1.x) - n1.y * (p2.y - p1.y);
}
else if (Math.Abs(n1.z * n2.y - n2.z * n1.y) > float.Epsilon)
{
d = n1.z * (-n2.y) - (-n2.z) * n1.y;
dt = (p2.z - p1.z) * (-n2.y) - (p2.y - p1.y) * (-n2.z);
dk = n1.z * (p2.z - p1.z) - n1.y * (p2.y - p1.y);
}
else if (Math.Abs(n1.x * n2.z - n2.x * n1.z) > float.Epsilon)
{
d = n1.x * (-n2.z) - (-n2.x) * n1.z;
dt = (p2.x - p1.x) * (-n2.z) - (p2.z - p1.z) * (-n2.x);
dk = n1.x * (p2.x - p1.x) - n1.z * (p2.z - p1.z);
}
t = dt / d;
k = dk / d;
result = n1 * t + p1;
// Check if the point on the segmaent or not
// if (! isPointOnSegment(first, result))
//{
// result = new Vector3(-1,-1,-1);
// }
return Line3DResult.Line3DResult_SkewCross;
}
}
private bool AreLinesParallel(lineSegment first, Line aSecondLine)
{
Vector3 vector = (first.secondPoint - first.firstPoint);
vector.Normalize();
float kl = 0, km = 0, kn = 0;
if (vector.x != aSecondLine.direction.x)
{
if (vector.x != 0 && aSecondLine.direction.x != 0)
{
kl = vector.x / aSecondLine.direction.x;
}
}
if (vector.y != aSecondLine.direction.y)
{
if (vector.y != 0 && aSecondLine.direction.y != 0)
{
km = vector.y / aSecondLine.direction.y;
}
}
if (vector.z != aSecondLine.direction.z)
{
if (vector.z != 0 && aSecondLine.direction.z != 0)
{
kn = vector.z / aSecondLine.direction.z;
}
}
// both if all are null or all are equal, the lines are parallel
return (kl == km && km == kn);
}
private bool isPointOnSegment(lineSegment segment, Vector3 point)
{
//(x - x1) / (x2 - x1) = (y - y1) / (y2 - y1) = (z - z1) / (z2 - z1)
float component1 = (point.x - segment.firstPoint.x) / (segment.secondPoint.x - segment.firstPoint.x);
float component2 = (point.y - segment.firstPoint.y) / (segment.secondPoint.y - segment.firstPoint.y);
float component3 = (point.z - segment.firstPoint.z) / (segment.secondPoint.z - segment.firstPoint.z);
if ((component1 == component2) && (component2 == component3))
{
return true;
}
else
{
return false;
}
}
#endregion
}
}
static void Main(string[] args)
{
//// create the first plane points
Vector3 point11 =new Vector3(-255.5f, -160.0f,-1.5f) ; //0,0
Vector3 point21 = new Vector3(256.5f, -160.0f, -1.5f); //0,w
Vector3 point31 = new Vector3(256.5f, -160.0f, -513.5f); //h,0
Vector3 point41 = new Vector3(-255.5f, -160.0f, -513.5f); //w,h
plane_test plane1 = new plane_test(point11, point21, point41, point31);
//// create the Second plane points
Vector3 point12 = new Vector3(-201.6289f, -349.6289f, -21.5f);
Vector3 point22 =new Vector3(310.3711f,-349.6289f,-21.5f);
Vector3 point32 = new Vector3(310.3711f, 162.3711f, -21.5f);
Vector3 point42 =new Vector3(-201.6289f,162.3711f,-21.5f);
plane_test plane2 = new plane_test(point12, point22, point42, point32);
plane2.Intersection(plane1);
}
and this is test values
Best regards
You need to specify one thing first:
by 3D rectangle, you mean plane rectangle on a 3D plane. (not a
rectangular prism).
Let's say your rectangles are not coplanar nor parallele, and therefore there is one unique line D1 that represents the intersection of the plane described by each rectangle.
Given this assumption their are 4 possible situations for the intersection of 2 rectangles R1 and R2:
(note: sometimes D1 doesn't intersect neither R1 nor R2 and R1 , R2 can be rotated a little bit so D1 doesn't always intersect on parallele sides, but consecutive sides)
When there is an intersection between the 2 rectangles, D1 always intersect R1 and R2 on the same intersection (cf 1st and 2nd picture)
Your model is not good because your line cannot be parallele to 3 segments of the same rectangle...
As you asked in this question : 3D lines intersection algorithm once you have D1 ( Get endpoints of the line segment defined by the intersection of two rectangles ) just determinate the intersection with each segment of the rectangle.(The 4 segments of each rectangles need to be checked)
Then check for common intersection... if you find one then your rectangles intersect.
Sorry it's very hard to directly check the code, but I guess with these peaces of information you should be able to find the error.
Hope it helps.
EDIT:
define a rectangle by a point and 2 vectors :
R2 {A ,u ,v}
R1 {B, u',v'}
define the planes described by R1 and R2 : P1 and P2
One orthogonal vector to P1(resp. P2) is n1 (resp. n2).Let n1 = u ^ v and n2 = u' ^ v' with :
then
P1: n1.(x-xA,y-yA,z-zA)=0
P2: n2.(x-xB,y-yB,z-zB)=0
Then if you're just looking for D1 the equation of D1 is :
D1: P1^2 + P2 ^2 =0 (x,y,z verify P1 =0 an P2 =0 )
D1 : n1.(x-xA,y-yA,z-zA)^2 + n2.(x-xB,y-yB,z-zB)^2 =0
(so just with the expression of your rectangles you can get the equation of D1 with a closed formula.)
Now let's look at the intersections :
the 4 points in R1 are :
{ A , A+u , A+v, A+u+v }
as describe in 3D lines intersection algorithm do :
D1 inter [A,A+u] = I1
D1 inter [A,A+v] = I2
D1 inter [A+u,A+u+v] = I3
D1 inter [A+v,A+u+v] = I4
(I1,I2,I3,I4 can be null)
same for D2 you get I1' I2' I3' I4'
if Ij'=Ik' != null then it's an intersection point
if you did that correctly step by step you should get to the correct solution; unless I didn't fully understand the question...
The program computes the line of intersection of the planes passing through two rectangles. The program then looks for intersections between this line and the edges of one of the rectangles. It returns two points of intersection of such two points are found. I'm not going to debate whether this is a sensible thing to do since I don't know the context of the program.
Let's go through the code and look for things that could be wrong.
The program computes the line passing through the two planes like this:
Vector3 LineDirection = Normal.Cross(SecondOne.Normal);
float d1 = this.GetDistance(LineDirection);
float d2 = SecondOne.GetDistance(LineDirection);
temp = (LineDirection - (this.Normal * d1) - (SecondOne.Normal * d2));
temp.x = Math.Abs((float)Math.Round((decimal)FirstPoint.x, 2));
temp.y = Math.Abs((float)Math.Round((decimal)FirstPoint.y, 2));
Line line;
line.direction = LineDirection;
line.point = temp;
The computation of the line direction is OK, but the computation of point is wrong, as you probably know. But I'll pretend we have a valid point and direction and carry on with the rest of the program.
The program calls AreLinesParallel() to get rid of edges that a parallel to the line through the planes. The code looks like this:
Vector3 vector = (first.secondPoint - first.firstPoint);
vector.Normalize();
float kl = 0, km = 0, kn = 0;
if (vector.x != aSecondLine.direction.x)
{
if (vector.x != 0 && aSecondLine.direction.x != 0)
{
kl = vector.x / aSecondLine.direction.x;
}
}
if (vector.y != aSecondLine.direction.y)
{
if (vector.y != 0 && aSecondLine.direction.y != 0)
{
km = vector.y / aSecondLine.direction.y;
}
}
if (vector.z != aSecondLine.direction.z)
{
if (vector.z != 0 && aSecondLine.direction.z != 0)
{
kn = vector.z / aSecondLine.direction.z;
}
}
// both if all are null or all are equal, the lines are parallel
return ((kl == km && km == kn));
The code more or less checks that the elements of the direction of the edge divided by the elements of the direction of the line are all equal to each other. It's a dangerous procedure to rely on. Because of round-off errors, later procedures may still, say, divide by zero, even if AreLinesParallel() claims that the lines aren't really parallel. It is better not to use the procedure at all.
Now comes the meat of the code, a test for intersection between the edge and the line:
float d = 0, dt = 0, dk = 0;
float t = 0, k = 0;
if (Math.Abs(n1.x * n2.y - n2.x * n1.y) > float.Epsilon)
{
d = n1.x * (-n2.y) - (-n2.x) * n1.y;
dt = (p2.x - p1.x) * (-n2.y) - (p2.y - p1.y) * (-n2.x);
dk = n1.x * (p2.x - p1.x) - n1.y * (p2.y - p1.y);
}
else if (Math.Abs(n1.z * n2.y - n2.z * n1.y) > float.Epsilon)
{
d = n1.z * (-n2.y) - (-n2.z) * n1.y;
dt = (p2.z - p1.z) * (-n2.y) - (p2.y - p1.y) * (-n2.z);
dk = n1.z * (p2.z - p1.z) - n1.y * (p2.y - p1.y);
}
else if (Math.Abs(n1.x * n2.z - n2.x * n1.z) > float.Epsilon)
{
d = n1.x * (-n2.z) - (-n2.x) * n1.z;
dt = (p2.x - p1.x) * (-n2.z) - (p2.z - p1.z) * (-n2.x);
dk = n1.x * (p2.x - p1.x) - n1.z * (p2.z - p1.z);
}
t = dt / d;
k = dk / d;
result = n1 * t + p1;
A mistake of this code is the lack of a comment that explains the origin of the algorithm. If there is no documented algorithm to refer to, the comment can contain the derivation leading to the formulas. The first branch deals with (x, y), the second with (y, z) and the third with (z, x), so I assume that the branches solve for intersection in 2D and lift these findings to 3D. It computes determinants to check for parallel lines for each 2D projection. I shouldn't have to do this kind of reverse engineering.
Anyway, this is the code that produces the NaN values. None of the three branches are triggered, so d = 0 in the end, which gives a division by zero. Instead of relying on AreLinesParallel() to avoid division by zero, it's is better to check the value that actually matters, namely d.
Of course, the code still needs more work, because we don't know yet if the lines are crossing in 3D too. Also the point is on the edge only if 0 <= t && t <= 1. And probably more bugs will show up as the earlier ones are being fixed.