How to do real time Raytracing in unity with C# - c#

I am making a video-game in unity, and decided to use ray-tracing. I have the code, But as you will see in a second. It isn't exactly rendering frame by frame.
Here is my raytracing code, this is the main script attached to the main camera.
using UnityEngine;
using System.Collections;
public class RayTracer : MonoBehaviour
{
public Color backgroundColor = Color.black;
public float RenderResolution = 1f;
public float maxDist = 100f;
public int maxRecursion = 4;
private Light[] lights;
private Texture2D renderTexture;
void Awake()
{
renderTexture = new Texture2D((int)(Screen.width * RenderResolution), (int)(Screen.height * RenderResolution));
lights = FindObjectsOfType(typeof(Light)) as Light[];
}
void Start()
{
RayTrace();
}
void OnGUI()
{
GUI.DrawTexture(new Rect(0, 0, Screen.width, Screen.height), renderTexture);
}
void RayTrace()
{
for (int x = 0; x < renderTexture.width; x++)
{
for (int y = 0; y < renderTexture.height; y++)
{
Color color = Color.black;
Ray ray = GetComponent<Camera>().ScreenPointToRay(new Vector3(x / RenderResolution, y / RenderResolution, 0));
renderTexture.SetPixel(x, y, TraceRay(ray, color, 0));
}
}
renderTexture.Apply();
}
Color TraceRay(Ray ray, Color color, int recursiveLevel)
{
if (recursiveLevel < maxRecursion)
{
RaycastHit hit;
if (Physics.Raycast(ray, out hit, maxDist))
{
Vector3 viewVector = ray.direction;
Vector3 pos = hit.point + hit.normal * 0.0001f;
Vector3 normal = hit.normal;
RayTracerObject rto = hit.collider.gameObject.GetComponent<RayTracerObject>();
//Does the object we hit have that script?
if (rto == null)
{
var GO = hit.collider.gameObject;
Debug.Log("Raycast hit failure! On " + GO.name + " position " + GO.transform.position.ToString());
return color; //exit out
}
Material mat = hit.collider.GetComponent<Renderer>().material;
if (mat.mainTexture)
{
color += (mat.mainTexture as Texture2D).GetPixelBilinear(hit.textureCoord.x, hit.textureCoord.y);
}
else
{
color += mat.color;
}
color *= TraceLight(rto, viewVector, pos, normal);
if (rto.reflectiveCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Ray newRay = new Ray(pos, viewVector - reflet * normal);
color += rto.reflectiveCoeff * TraceRay(newRay, color, recursiveLevel + 1);
}
if (rto.transparentCoeff > 0)
{
Ray newRay = new Ray(hit.point - hit.normal * 0.0001f, viewVector);
color += rto.transparentCoeff * TraceRay(newRay, color, recursiveLevel + 1);
}
}
}
return color;
}
Color TraceLight(RayTracerObject rto, Vector3 viewVector, Vector3 pos, Vector3 normal)
{
Color c = RenderSettings.ambientLight;
foreach (Light light in lights)
{
if (light.enabled)
{
c += LightTrace(rto, light, viewVector, pos, normal);
}
}
return c;
}
Color LightTrace(RayTracerObject rto, Light light, Vector3 viewVector, Vector3 pos, Vector3 normal)
{
float dot, distance, contribution;
Vector3 direction;
switch (light.type)
{
case LightType.Directional:
contribution = 0;
direction = -light.transform.forward;
dot = Vector3.Dot(direction, normal);
if (dot > 0)
{
if (Physics.Raycast(pos, direction, maxDist))
{
return Color.black;
}
if (rto.lambertCoeff > 0)
{
contribution += dot * rto.lambertCoeff;
}
if (rto.reflectiveCoeff > 0)
{
if (rto.phongCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Vector3 phongDir = viewVector - reflet * normal;
float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f);
phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff;
contribution += phongTerm;
}
if (rto.blinnPhongCoeff > 0)
{
Vector3 blinnDir = -light.transform.forward - viewVector;
float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir));
if (temp != 0.0f)
{
blinnDir = (1.0f / temp) * blinnDir;
float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f);
blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff;
contribution += blinnTerm;
}
}
}
}
return light.color * light.intensity * contribution;
case LightType.Point:
contribution = 0;
direction = (light.transform.position - pos).normalized;
dot = Vector3.Dot(normal, direction);
distance = Vector3.Distance(pos, light.transform.position);
if ((distance < light.range) && (dot > 0))
{
if (Physics.Raycast(pos, direction, distance))
{
return Color.black;
}
if (rto.lambertCoeff > 0)
{
contribution += dot * rto.lambertCoeff;
}
if (rto.reflectiveCoeff > 0)
{
if (rto.phongCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Vector3 phongDir = viewVector - reflet * normal;
float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f);
phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff;
contribution += phongTerm;
}
if (rto.blinnPhongCoeff > 0)
{
Vector3 blinnDir = -light.transform.forward - viewVector;
float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir));
if (temp != 0.0f)
{
blinnDir = (1.0f / temp) * blinnDir;
float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f);
blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff;
contribution += blinnTerm;
}
}
}
}
if (contribution == 0)
{
return Color.black;
}
return light.color * light.intensity * contribution;
case LightType.Spot:
contribution = 0;
direction = (light.transform.position - pos).normalized;
dot = Vector3.Dot(normal, direction);
distance = Vector3.Distance(pos, light.transform.position);
if (distance < light.range && dot > 0)
{
float dot2 = Vector3.Dot(-light.transform.forward, direction);
if (dot2 > (1 - light.spotAngle / 180))
{
if (Physics.Raycast(pos, direction, distance))
{
return Color.black;
}
if (rto.lambertCoeff > 0)
{
contribution += dot * rto.lambertCoeff;
}
if (rto.reflectiveCoeff > 0)
{
if (rto.phongCoeff > 0)
{
float reflet = 2.0f * Vector3.Dot(viewVector, normal);
Vector3 phongDir = viewVector - reflet * normal;
float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f);
phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff;
contribution += phongTerm;
}
if (rto.blinnPhongCoeff > 0)
{
Vector3 blinnDir = -light.transform.forward - viewVector;
float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir));
if (temp != 0.0f)
{
blinnDir = (1.0f / temp) * blinnDir;
float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f);
blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff;
contribution += blinnTerm;
}
}
}
}
}
if (contribution == 0)
{
return Color.black;
}
return light.color * light.intensity * contribution;
}
return Color.black;
}
float max(float x0, float x1)
{
return x0 > x1 ? x0 : x1;
}
}
And this is the code attached to the Objects in the scene
using UnityEngine;
using System.Collections;
public class RayTracerObject : MonoBehaviour
{
public float lambertCoeff = 1f;
public float reflectiveCoeff = 0f;
public float phongCoeff = 1f;
public float phongPower = 2f;
public float blinnPhongCoeff = 1f;
public float blinnPhongPower = 2f;
public float transparentCoeff = 0f;
public Color baseColor = Color.gray;
void Awake()
{
if (!GetComponent<Renderer>().material.mainTexture)
{
GetComponent<Renderer>().material.color = baseColor;
}
}
}
How would I go about doing this? And what would the code be?

Though raytracing in the primary thread is a perfectly acceptable design, it's probably not what you want in Unity as it blocks everything else.
Now you could arguably spawn a child thread to perform the raytracing and having the primary thread render the results. The problem though is that neither approach makes use of the GPU which sort of defeats the point using Unity in the first place.
How to do real time Raytracing in unity with C#
It all depends on what your scene consists of and how you intend to render it. You could arguably render something simple in real-time at low resolution, however rendering with a reasonable screen resolution and with reasonable levels of ray bouncing i.e. the number of recursive light rays cast with reflective or transmissive materials would perhaps be much more difficult.
Instead I would urge you to follow the changing trend in raytracing where realtime raytracing is now being performed on the GPU using techniques known as General Purpose GPU or GPGPU. nVidia has some talks on this subject and are available on YouTube. Here is my sample Unity GPGPU galaxy simulation that might prove useful as a background to GPGPU.
Sample GPGPU kernel merely to show you what GPGPU is about:
// File: Galaxy1Compute.compute
// Each #kernel tells which function to compile; you can have many kernels
#pragma kernel UpdateStars
#include "Galaxy.cginc"
// blackmagic
#define BLOCKSIZE 128
RWStructuredBuffer<Star> stars;
Texture2D HueTexture;
// refer to http://forum.unity3d.com/threads/163591-Compute-Shader-SamplerState-confusion
SamplerState samplerHueTexture;
// time ellapsed since last frame
float deltaTime;
const float Softening=3e4f;
#define Softening2 Softening * Softening
static float G = 6.67300e-11f;
static float DefaultMass = 1000000.0f;
// Do a pre-calculation assuming all the stars have the same mass
static float GMM = G*DefaultMass*DefaultMass;
[numthreads(BLOCKSIZE,1,1)]
void UpdateStars (uint3 id : SV_DispatchThreadID)
{
uint i = id.x;
uint numStars, stride;
stars.GetDimensions(numStars, stride);
float3 position = stars[i].position;
float3 velocity = stars[i].velocity;
float3 A=float3(0,0,0);
[loop]
for (uint j = 0; j < numStars; j++)
{
if (i != j)
{
float3 D = stars[j].position - stars[i].position;
float r = length(D);
float f = GMM / (r * r + Softening2);
A += f * normalize(D);
}
}
velocity += A * deltaTime;
position += velocity * deltaTime;
if (i < numStars)
{
stars[i].velocity = velocity;
stars[i].position = position;
stars[i].accelMagnitude = length(A);
}
}
Additionally there are some fine books on the subject. Real-time Volume Graphics, though it covers volumes, it does cover casting rays - the essence of ray-tracing. The hardest paradigm shift is the writing for GPGPU, once you understand it, writing a GPGPU raytracer is an easy step from GPGPU volume shaders.
A marvellous tome to accompany any raytrace author is Matt Pharr's Physically Based Rendering book (there is a 2nd edition but I have not read that)
More
GPU Ray Tracing in Unity – Part 1

Nvidia announced NVIDIA RTX™, a ray-tracing technology that brings real-time, cinematic-quality rendering to content creators and game developers.
It consists of a ray-tracing engine running on NVIDIA Volta architecture GPUs. It’s designed to support ray tracing through a variety of interfaces.
And these results in bringing the game developers to do raycasting in their work to get a movie quality output.
https://nvidianews.nvidia.com/news/nvidia-rtx-technology-realizes-dream-of-real-time-cinematic-rendering
Unity in the future update would support this new DirectX Raytracing API. Then the game developers can enjoy the photorealistic quality output in their unity rendering pipeline.

So after we all saw a hype around RTX cards, we need to answer a question, what is it actually doing? Well, basically it is hardware accelerated raycaster, which is well optimized to do its job at it.
But nobody said you can't do hardware accelerated raycasting on let's say any other graphics card. In Unity, you have access to hardware acceleration in the form of shaders. you can write your own raycaster with the power of compute shaders. which will be much slower then very very optimized RTX cards but give you an advantage in some areas.
But hey man, since it is slower then RTX why would I need to do so. Well, in general, you can enhance your rendring with this method. For example softening shadows, attempting Global illumination, all sorts of stuff. But to answer your question, you won't be able to do a full-blown raytracing without RTX cards.

Related

Zooming out the camera so that it can see all objects

I have a list of objects, these are blue stickmen on the video, I need to make the camera move away by itself and all objects (blue stickmen) always fit into it, you need to take into account that there will be more and more objects each time, so the camera should be dynamic and adapt itself to all objects
https://youtube.com/shorts/x3uSO2L22Kc?feature=share
Practical solution for any camera angle and objects positions
The idea here is fairly simple.
At each step we check if each object is inside of camera view or is camera too far away, then we simply adjust the camera position towards a better one.
Step by step, our camera will follow target objects dynamically, and when stabilized, all target objects will be captured by camera.
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class CameraAutoFitSmooth : MonoBehaviour
{
public int maxNumberOfObjs = 100;
public GameObject objPrefab;
public float maxInitRange = 10f;
public float minCameraHeight = 1f;
public float maxCameraMoveSpeed = 9f;
public float marginalPos = 0.1f;
[HideInInspector]
public List<Transform> objs = new List<Transform>();
Camera cam;
void Start()
{
cam = Camera.main;
}
void Update()
{
if (Input.GetKeyDown(KeyCode.Space))
RandomObjs();
}
// Randomly regenerate objects
void RandomObjs()
{
int nowNumberOfObjs = Random.Range(0, maxNumberOfObjs);
for (int i = objs.Count - 1; i > nowNumberOfObjs; i--)
{
Destroy(objs[i].gameObject);
objs.RemoveAt(i);
}
for (int i = objs.Count; i <= nowNumberOfObjs; i++)
objs.Add(Instantiate(objPrefab).transform);
foreach (var obj in objs)
obj.position = Random.insideUnitSphere * maxInitRange;
}
void LateUpdate()
{
SetCameraFitPosition(Time.deltaTime);
}
void SetCameraFitPosition(float deltaTime)
{
Vector3 targetCamPos = cam.transform.position;
if (objs.Count == 1)
{
targetCamPos = objs[0].position - minCameraHeight * cam.transform.forward;
}
else if (objs.Count > 1)
{
float minInsideDiff = 1f, maxOutsideDiff = 0f;
Vector3 center = Vector3.zero;
foreach (var obj in objs)
{
Vector3 screenPos = GetScreenPos(obj.position);
if (IsInsideView(screenPos))
minInsideDiff = Mathf.Min(minInsideDiff, CalculateInsideDiff(screenPos.x), CalculateInsideDiff(screenPos.y), CalculateInsideDiff(screenPos.z));
else
maxOutsideDiff = Mathf.Max(maxOutsideDiff, CalculateOutsideDiff(screenPos.x), CalculateOutsideDiff(screenPos.y), CalculateOutsideDiff(screenPos.z));
center += obj.position;
}
center /= objs.Count;
float nowHeight = Vector3.Project(cam.transform.position - center, cam.transform.forward).magnitude;
float maxDiff = maxOutsideDiff > 0f ? maxOutsideDiff : -minInsideDiff;
float finalHeight = Mathf.Max(nowHeight + maxDiff * maxCameraMoveSpeed, minCameraHeight);
targetCamPos = center - finalHeight * cam.transform.forward;
}
cam.transform.position = Vector3.MoveTowards(cam.transform.position, targetCamPos, maxCameraMoveSpeed * deltaTime);
}
Vector3 GetScreenPos(Vector3 pos)
{
return cam.WorldToViewportPoint(pos);
}
float CalculateOutsideDiff(float pos)
{
float diff = 0f;
if (pos > 1f + marginalPos)
diff = pos - 1f;
else if (pos < -marginalPos)
diff = -pos;
return diff;
}
float CalculateInsideDiff(float pos)
{
float diff = 0f;
if (pos < 1f - marginalPos && pos > marginalPos)
diff = Mathf.Min(1f - pos, pos);
return diff;
}
bool IsInsideView(Vector3 screenPoint)
{
return screenPoint.z > 0f && screenPoint.x > 0f && screenPoint.x < 1 && screenPoint.y > 0f && screenPoint.y < 1;
}
}
If you need more info feel free to contact me :) Cheers!
The following script will position a perspective camera in a top down view, so that all tracked GameObjects (objs) are visible.
It is assumed that the objects are on the zero xz plane and are points, so their actual dimensions are not taken into account. There must be at least one tracked object. The objects may not be spaced in such a way that would require the cameras height to exceed the maximum floating point value.
public GameObject[] objs;//objects that must be fitted
private Camera cam;
float recXmin, recXmax, recYmin, recYmax;
Vector3 center;
void Start()
{
cam = Camera.main;
cam.transform.rotation = Quaternion.Euler(90, 0, 0);
}
void LateUpdate()
{
recXmin = objs[0].transform.position.x;
recXmax = objs[0].transform.position.x;
recYmin = objs[0].transform.position.z;
recYmax = objs[0].transform.position.z;
center = Vector3.zero;
foreach (GameObject obj in objs)
{
if (obj.transform.position.x < recXmin)
{
recXmin = obj.transform.position.x;
}
if (obj.transform.position.x > recXmax)
{
recXmax = obj.transform.position.x;
}
if (obj.transform.position.z < recYmin)
{
recYmin = obj.transform.position.z;
}
if (obj.transform.position.z > recYmax)
{
recYmax = obj.transform.position.z;
}
}
float horizontalHeight = (recYmax - recYmin) / 2 / Mathf.Tan(Mathf.Deg2Rad * cam.fieldOfView / 2);
float verticalHeight = (recXmax - recXmin) / 2 / Mathf.Tan(Mathf.Deg2Rad * Camera.VerticalToHorizontalFieldOfView(cam.fieldOfView, cam.aspect) / 2);
float finalHeight = horizontalHeight > verticalHeight ? horizontalHeight : verticalHeight;
center = new Vector3(recXmin + (recXmax - recXmin) / 2, finalHeight, recYmin + (recYmax - recYmin) / 2);
cam.transform.position = center;
}
void OnDrawGizmos()
{
Gizmos.color = Color.red;
Gizmos.DrawLine(new Vector3(recXmin, 0, recYmin), new Vector3(recXmin, 0, recYmax));
Gizmos.DrawLine(new Vector3(recXmax, 0, recYmin), new Vector3(recXmax, 0, recYmax));
Gizmos.color = Color.green;
Gizmos.DrawLine(new Vector3(recXmin, 0, recYmin), new Vector3(recXmax, 0, recYmin));
Gizmos.DrawLine(new Vector3(recXmin, 0, recYmax), new Vector3(recXmax, 0, recYmax));
Gizmos.color = Color.blue;
Gizmos.DrawSphere(center, 0.5f);
}
the script determines the "bounding square" formed by all tracked objects
the bounding square is assumed to be the basis of a pyramid with the camera at its peak
using trigonometry the height of the camera can be calculated, by taking into account the known length of the pyramid's base side and the cameras field of view
the calculation is made twice for the cameras horizontal and vertical field of view
the greater of these two values is then selected
lastly the camera is position into the middle of the pyramids base and at the determined height, so that it ends up at the pyramids peak

The difference in the speed of moving objects through the CPU and GPU shader in Unity

I have been testing moving a lot of objects in Unity through normal C# code and through HLSL shaders. However, there is no difference in speed. FPS remains the same. Different perlin noise is used to change the position. The C# code uses the standard Mathf.PerlinNoise, while the HLSL uses a custom noise function.
Scenario 1 - Update via C# code only
Object spawn:
[SerializeField]
private GameObject prefab;
private void Start()
{
for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++)
{
GameObject createdParticle;
createdParticle = Instantiate(prefab);
createdParticle.transform.position = new Vector3(i * 1f, Random.Range(-1f, 1f), j * 1f);
}
}
Code to move an object via C#. This script is added to each created object:
private Vector3 position = new Vector3();
private void Start()
{
position = new Vector3(transform.position.x, Mathf.PerlinNoise(Time.time, Time.time), transform.position.z);
}
private void Update()
{
position.y = Mathf.PerlinNoise(transform.position.x / 20f + Time.time, transform.position.z / 20f + Time.time) * 5f;
transform.position = position;
}
Scenario 2 - via Compute Kernel (GPGPU)
Part 1: C# client code
Object spawn, running the calculation on the shader and assigning the resulting value to the objects:
public struct Particle
{
public Vector3 position;
}
[SerializeField]
private GameObject prefab;
[SerializeField]
private ComputeShader computeShader;
private List<GameObject> particlesList = new List<GameObject>();
private Particle[] particlesDataArray;
private void Start()
{
CreateParticles();
}
private void Update()
{
UpdateParticlePosition();
}
private void CreateParticles()
{
List<Particle> particlesDataList = new List<Particle>();
for (int i = 0; i < 50; i++)
for (int j = 0; j < 50; j++)
{
GameObject createdParticle;
createdParticle = Instantiate(prefab);
createdParticle.transform.position = new Vector3(i * 1f, Random.Range(-1f, 1f), j * 1f);
particlesList.Add(createdParticle);
Particle particle = new Particle();
particle.position = createdParticle.transform.position;
particlesDataList.Add(particle);
}
particlesDataArray = particlesDataList.ToArray();
particlesDataList.Clear();
computeBuffer = new ComputeBuffer(particlesDataArray.Length, sizeof(float) * 7);
computeBuffer.SetData(particlesDataArray);
computeShader.SetBuffer(0, "particles", computeBuffer);
}
private ComputeBuffer computeBuffer;
private void UpdateParticlePosition()
{
computeShader.SetFloat("time", Time.time);
computeShader.Dispatch(computeShader.FindKernel("CSMain"), particlesDataArray.Length / 10, 1, 1);
computeBuffer.GetData(particlesDataArray);
for (int i = 0; i < particlesDataArray.Length; i++)
{
Vector3 pos = particlesList[i].transform.position;
pos.y = particlesDataArray[i].position.y;
particlesList[i].transform.position = pos;
}
}
Part 2: Compute kernel (GPGPU)
#pragma kernel CSMain
struct Particle {
float3 position;
float4 color;
};
RWStructuredBuffer<Particle> particles;
float time;
float mod(float x, float y)
{
return x - y * floor(x / y);
}
float permute(float x) { return floor(mod(((x * 34.0) + 1.0) * x, 289.0)); }
float3 permute(float3 x) { return mod(((x * 34.0) + 1.0) * x, 289.0); }
float4 permute(float4 x) { return mod(((x * 34.0) + 1.0) * x, 289.0); }
float taylorInvSqrt(float r) { return 1.79284291400159 - 0.85373472095314 * r; }
float4 taylorInvSqrt(float4 r) { return float4(taylorInvSqrt(r.x), taylorInvSqrt(r.y), taylorInvSqrt(r.z), taylorInvSqrt(r.w)); }
float3 rand3(float3 c) {
float j = 4096.0 * sin(dot(c, float3(17.0, 59.4, 15.0)));
float3 r;
r.z = frac(512.0 * j);
j *= .125;
r.x = frac(512.0 * j);
j *= .125;
r.y = frac(512.0 * j);
return r - 0.5;
}
float _snoise(float3 p) {
const float F3 = 0.3333333;
const float G3 = 0.1666667;
float3 s = floor(p + dot(p, float3(F3, F3, F3)));
float3 x = p - s + dot(s, float3(G3, G3, G3));
float3 e = step(float3(0.0, 0.0, 0.0), x - x.yzx);
float3 i1 = e * (1.0 - e.zxy);
float3 i2 = 1.0 - e.zxy * (1.0 - e);
float3 x1 = x - i1 + G3;
float3 x2 = x - i2 + 2.0 * G3;
float3 x3 = x - 1.0 + 3.0 * G3;
float4 w, d;
w.x = dot(x, x);
w.y = dot(x1, x1);
w.z = dot(x2, x2);
w.w = dot(x3, x3);
w = max(0.6 - w, 0.0);
d.x = dot(rand3(s), x);
d.y = dot(rand3(s + i1), x1);
d.z = dot(rand3(s + i2), x2);
d.w = dot(rand3(s + 1.0), x3);
w *= w;
w *= w;
d *= w;
return dot(d, float4(52.0, 52.0, 52.0, 52.0));
}
[numthreads(10, 1, 1)]
void CSMain(uint3 id : SV_DispatchThreadID)
{
Particle particle = particles[id.x];
float modifyTime = time / 5.0;
float positionY = _snoise(float3(particle.position.x / 20.0 + modifyTime, 0.0, particle.position.z / 20.0 + modifyTime)) * 5.0;
particle.position = float3(particle.position.x, positionY, particle.position.z);
particles[id.x] = particle;
}
What am I doing wrong, why is there no increase in calculation speed? :)
Thanks in advance!
TL;DR: your GPGPU (compute shader) scenario is unoptimized thus skewing your results. Consider binding a material to the computeBuffer and rendering via Graphics.DrawProcedural. That way everything stays on the GPU.
OP:
What am I doing wrong, why is there no increase in calculation speed?
Essentially, there are two parts to your problem.
(1) Reading from the GPU is slow
With most things GPU-related, you generally want to avoid reading from the GPU since it will block the CPU. This is true also for GPGPU scenarios.
If I were to hazard a guess it would be the GPGPU (compute shader) call computeBuffer.GetData() shown below:
private void Update()
{
UpdateParticlePosition();
}
private void UpdateParticlePosition()
{
.
.
.
computeBuffer.GetData(particlesDataArray); // <----- OUCH!
Unity (my emphasis):
ComputeBuffer.GetData
Read data values from the buffer into an array...
Note that this function reads the data back from the GPU, which can be slow...If any GPU work has been submitted that writes to this buffer, Unity waits for the tasks to complete before it retrieves the requested data. Tell me more...
(2) Explicit GPU reading is not required in your scenario
I can see you are creating 2,500 "particles" where each particle is attached to a GameObject. If the intent is to just draw a simple quad then it's more efficient to create an array structs containing a Vector3 position and then performing a batch render call to draw all the particles in one go.
Proof: see video below of nBody simulation. 60+ FPS on 2014 era NVidia card
e.g. for my GPGPU n-Body Galaxy Simulation I do just that. Pay attention to the StarMaterial.SetBuffer("stars", _starsBuffer) during actual rendering. That tells the GPU to use the buffer that already exists on the GPU, the very same buffer that the computer shader used to move the star positions. There is no CPU reading the GPU here.
public class Galaxy1Controller : MonoBehaviour
{
public Texture2D HueTexture;
public int NumStars = 10000; // That's right! 10,000 stars!
public ComputeShader StarCompute;
public Material StarMaterial;
private ComputeBuffer _quadPoints;
private Star[] _stars;
private ComputeBuffer _starsBuffer;
.
.
.
private void Start()
{
_updateParticlesKernel = StarCompute.FindKernel("UpdateStars");
_starsBuffer = new ComputeBuffer(NumStars, Constants.StarsStride);
_stars = new Star[NumStars];
// Create initial positions for stars here (not shown)
_starsBuffer.SetData(_stars);
_quadPoints = new ComputeBuffer(6, QuadStride);
_quadPoints.SetData(...); // star quad
}
private void Update()
{
// bind resources to compute shader
StarCompute.SetBuffer(_updateParticlesKernel, "stars", _starsBuffer);
StarCompute.SetFloat("deltaTime", Time.deltaTime*_manager.MasterSpeed);
StarCompute.SetTexture(_updateParticlesKernel, "hueTexture", HueTexture);
// dispatch, launch threads on GPU
var numberOfGroups = Mathf.CeilToInt((float) NumStars/GroupSize);
StarCompute.Dispatch(_updateParticlesKernel, numberOfGroups, 1, 1);
// "Look Ma, no reading from the GPU!"
}
private void OnRenderObject()
{
// bind resources to material
StarMaterial.SetBuffer("stars", _starsBuffer);
StarMaterial.SetBuffer("quadPoints", _quadPoints);
// set the pass
StarMaterial.SetPass(0);
// draw
Graphics.DrawProcedural(MeshTopology.Triangles, 6, NumStars);
}
}
n-Body galaxy simulation of 10,000 stars:
I think everyone can agree that Microsoft's GPGPU documentation is pretty sparse so your best bet is to check out examples scattered around the interwebs. One that comes to mind is the excellent "GPU Ray Tracing in Unity" series over at Three Eyed Games. See the link below.
See also:
MickyD, "n-Body Galaxy Simulation using Compute Shaders on GPGPU via Unity 3D", 2014
Kuri, D, "GPU Ray Tracing in Unity – Part 1", 2018
ComputeBuffer.GetData is very long. The CPU copies data from the GPU. This stops the main thread.
Then you loop around all transforms to change their positions, this is certainly faster than thousands of MonoBehaviour, but also very long.
There are two ways to optimize your code.
CPU
C# Job System + Burst
Detailed tutorial: https://github.com/stella3d/job-system-cookbook
GPU
Use the structured buffer calculated in the compute shader without copying it back to the CPU. Here is a detailed tutorial on how to do it:
https://catlikecoding.com/unity/tutorials/basics/compute-shaders/

How to rotate around an object without using unity's built-in functions?

i want to rotate a cube around a 1x1 pipe with arrow keys. (left and right).
The problem is i cannot use built-in functions which sets transform's position and location directly. (Such as transform.lookAt, transform.Rotate or transform.RotateAround). Because I need the vector values of rotation's euler and position for multiple stuff before i modify the value of the transform i want to rotate.
I tried different techniques but no luck so far.
I tried using sin-cos for rotating but could not figure out how to make it work for both rotation and position.
_timer += Time.deltaTime * _larvaSpeed;
float x = -Mathf.Cos(_timer) * distanceBetweenCenter;
float y = Mathf.Sin(_timer) * distanceBetweenCenter;
Here is what i want to achieve. By pressing right or left, move and rotate the object around the pipe.
The result i want. (If i pressed right arrow key a litte bit).
I would appreciate any help. Thank you!
here is the solution using circle mathematics and I strongly recommended not use it, it's just to understand the circular move using circle equation as #FaTaLL ask in the comments
Circle equation...
(x1 - x2)^2 + (y1 - y2)^2 = r^2
x1, y1 is the cube position
x2, y2 is the pipe position
r is the distance between cube and pipe;
using UnityEngine;
public class Rotating : MonoBehaviour
{
public GameObject pipe;
public float Delta;
Vector3 nextpos;
bool compareY;
bool next;
int switchx;
float storeVarAxis;
float x, y, r;
private void Start()
{
next = true;
switchx = 1;
compareY = true;
x = transform.position.x - pipe.transform.position.x;
y = transform.position.y - pipe.transform.position.y;
storeVarAxis = y;
r = Mathf.Sqrt(x * x + y * y);
}
private void Update()
{
if (next)
{
if (compareY == true)
{
y -= Delta * Time.deltaTime;
if (y <= -storeVarAxis)
{
y = -storeVarAxis;
compareY = false;
switchx = -1;
}
}
else
{
y += Delta * Time.deltaTime;
if (y >= storeVarAxis)
{
y = storeVarAxis;
compareY = true;
switchx = 1;
}
}
float v = r * r - y * y;
x = Mathf.Sqrt(Mathf.Abs(v));
nextpos = new Vector3(pipe.transform.position.x + x * switchx, pipe.transform.position.y + y, transform.position.z);
next = false;
}
transform.position = Vector3.MoveTowards(transform.position, nextpos, 1f * Time.deltaTime);
if(Vector3.Distance(transform.position, nextpos) < .05) transform.position = nextpos;
if (transform.position.x.Equals(nextpos.x) && transform.position.y.Equals(nextpos.y)) next = true;
}
}
well, the recommended way is using this simple script
using UnityEngine;
public class Rotating : MonoBehaviour
{
public float speed;
public GameObject pipe;
float r, angle;
Vector3 startpos;
private void Start()
{
r = Mathf.Abs(transform.position.y - pipe.transform.position.y);
angle = 0;
transform.position = pipe.transform.position;
startpos = transform.position;
}
void Update()
{
angle = angle + speed * Time.deltaTime;
transform.rotation = Quaternion.EulerAngles(0,0, angle);
transform.position = startpos + (transform.rotation * new Vector3(r, 0, 0));
}
}
I think Quaternion * Vector3 is what you are looking for. Luckily the box's rotation in its own local coordinates is the same rotation you need to apply to the box's position.
public float speed; //how fast to rotate
public float radius; //radius of the cylinder
public float angle; //angle around it
void Update()
{
if (Input.GetKey(KeyCode.LeftArrow))
{
angle = angle + speed * Time.deltaTime;
}
if (Input.GetKey(KeyCode.RightArrow))
{
angle = angle - speed * Time.deltaTime;
}
//figure out the rotation (from euler angles i guess??)
var quat = Quaternion.EulerAngles(new Vector3(0, angle, 0));
//ok uh what is the box position?? lets just multiply
var unrotated_position = new Vector3(radius, 0, 0);
var rotated_position = quat * unrotated_position;
this.transform.position = rotated_position;
//oh yea and also rotate the box in its own local coordinates
this.transform.rotation = quat;
}

How do I make a pool cue rotate around the cue ball?

I want the pool cue to rotate around the cue ball as the player drags the mouse, I've played some pool games on the internet and they all seem to work this way. This will eventually be a browser game.
This game isn't mine, but it has the basic mechanic that I want (I don't care about the lines that are displayed, I just want the rotate and hit mechanic) https://www.crazygames.com/game/8-ball-billiards-classic
I have tried some orbiting scripts so far, none of them work.
I've tried some scripts that make objects orbit based on time hoping to make it so it orbits with the mouse dragging instead of time. I also can't get the cue to constantly face the cue ball.
This code has gotten me the closest.
public int vertexCount = 40;
public float lineWidth = 0.2f;
public float radius;
public bool circleFillscreen;
//circle variables
static float timeCounter = 0;
float width;
float height;
private LineRenderer lineRenderer;
private void Awake()
{
lineRenderer = GetComponent<LineRenderer>();
SetupCircle();
}
void Update()
{
timeCounter += Time.deltaTime;
float x = Mathf.Cos(timeCounter);
float y = 0;
float z = Mathf.Sin(timeCounter);
}
private void SetupCircle()
{
lineRenderer.widthMultiplier = lineWidth;
if (circleFillscreen)
{
radius = Vector3.Distance(Camera.main.ScreenToWorldPoint(new Vector3(0f, Camera.main.pixelRect.yMax, 0f)),
Camera.main.ScreenToWorldPoint(new Vector3(0f, Camera.main.pixelRect.yMin, 0f))) * 0.5f - lineWidth;
}
float deltaTheta = (2f * Mathf.PI) / vertexCount;
float theta = 0F;
lineRenderer.positionCount = -vertexCount;
for (int i = 0; i < lineRenderer.positionCount; i++)
{
Vector3 pos = new Vector3(radius * Mathf.Cos(theta), radius * Mathf.Sin(theta), 0f);
lineRenderer.SetPosition(i, pos);
theta += deltaTheta;
}
}
#if UNITY_EDITOR
private void OnDrawGizmos()
{
float deltaTheta = (2f * Mathf.PI) / vertexCount;
float theta = 0f;
Vector3 oldPos = Vector3.zero;
for (int i = 0; i < vertexCount + 1; i++)
{
Vector3 pos = new Vector3(radius * Mathf.Cos(theta), 0F,radius * Mathf.Sin(theta));
Gizmos.DrawLine(oldPos, transform.position + pos);
oldPos = transform.position + pos;
theta += deltaTheta;
}
}
#endif
}
Not really getting any error messages, code "works" but doesn't work.

Pinch-To-Zoom with Unity 5 UI

I'm trying to re-implement a pinch-to-zoom system in a Unity UI-based app. About six months ago I was able to hack one together by making the UI canvas a child of a regular GameObject, and manipulating that object's transform, but since updating to Unity 5.5+ I find this doesn't work. The closest I can get allows the pinch gesture to change the canvas' scaleFactor, which a) can make images, panels, etc resize improperly depending on their alignments, and b) won't allow me to pan once zoomed.
What I have so far is this:
public class PinchToZoomScaler : MonoBehaviour {
public Canvas canvas; // The canvas
public float zoomSpeed = 0.5f; // The rate of change of the canvas scale factor
public float _resetDuration = 3.0f;
float _durationTimer = 0.0f;
float _startScale = 0.0f;
void Start() {
_startScale = canvas.scaleFactor;
}
void Update()
{
// If there are two touches on the device...
if (Input.touchCount == 2) {
// Store both touches.
Touch touchZero = Input.GetTouch (0);
Touch touchOne = Input.GetTouch (1);
// Find the position in the previous frame of each touch.
Vector2 touchZeroPrevPos = touchZero.position - touchZero.deltaPosition;
Vector2 touchOnePrevPos = touchOne.position - touchOne.deltaPosition;
// Find the magnitude of the vector (the distance) between the touches in each frame.
float prevTouchDeltaMag = (touchZeroPrevPos - touchOnePrevPos).magnitude;
float touchDeltaMag = (touchZero.position - touchOne.position).magnitude;
// Find the difference in the distances between each frame.
float deltaMagnitudeDiff = prevTouchDeltaMag - touchDeltaMag;
// ... change the canvas size based on the change in distance between the touches.
canvas.scaleFactor -= deltaMagnitudeDiff * zoomSpeed;
// Make sure the canvas size never drops below 0.1
canvas.scaleFactor = Mathf.Max (canvas.scaleFactor, _startScale);
canvas.scaleFactor = Mathf.Min (canvas.scaleFactor, _startScale * 3.0f);
_durationTimer = 0.0f;
} else {
_durationTimer += Time.deltaTime;
if (_durationTimer >= _resetDuration) {
canvas.scaleFactor = _startScale;
}
}
}
}
As I said, this works to a degree, but doesn't give me a nice uniform zooming, not does it allow me to pan the canvas. Thanks in advance for any help.
Attach this script in canvas object which you want to zoom in and zoom out by pinch
using UnityEngine;
using UnityEngine.EventSystems;
public class ObjectScalling : MonoBehaviour, IPointerDownHandler, IPointerUpHandler
{
private bool _isDragging;
private float _currentScale;
public float minScale, maxScale;
private float _temp = 0;
private float _scalingRate = 2;
private void Start()
{
_currentScale = transform.localScale.x;
}
public void OnPointerDown(PointerEventData eventData)
{
if (Input.touchCount == 1)
{
_isDragging = true;
}
}
public void OnPointerUp(PointerEventData eventData)
{
_isDragging = false;
}
private void Update()
{
if (_isDragging)
if (Input.touchCount == 2)
{
transform.localScale = new Vector2(_currentScale, _currentScale);
float distance = Vector3.Distance(Input.GetTouch(0).position, Input.GetTouch(1).position);
if (_temp > distance)
{
if (_currentScale < minScale)
return;
_currentScale -= (Time.deltaTime) * _scalingRate;
}
else if (_temp < distance)
{
if (_currentScale > maxScale)
return;
_currentScale += (Time.deltaTime) * _scalingRate;
}
_temp = distance;
}
}
}
Reminder: This script only works in canvas objects
You can use this function ( just pass to it negative deltaMagnitudeDiff )
Also it is good to multiplay deltaMagnitudeDiff with a ratio like ( 0.05 )
float currentScale = 1f;
void Zoom (float increment)
{
currentScale += increment;
if (currentScale >= maxScale)
{
currentScale = maxScale;
}
else if (currentScale <= minScale)
{
currentScale = minScale;
}
rectTransform.localScale = new Vector3 (currentScale, currentScale, 1);
pan.ValidatePosition ();
}
For Panning,
you can use something like this :
public class Pan : MonoBehaviour
{
public float Speed;
Vector3 startDragPosition;
public void BeginDrag ()
{
startDragPosition = Input.mousePosition;
}
public void Drag ()
{
transform.localPosition += (Input.mousePosition - startDragPosition) * Speed;
startDragPosition = Input.mousePosition;
ValidatePosition ();
}
public void ValidatePosition ()
{
var temp = transform.localPosition;
var width = ((RectTransform)transform).sizeDelta.x;
var height = ((RectTransform)transform).sizeDelta.y;
var MaxX = 0.5f * width * Mathf.Max (0, transform.localScale.x - 1);
var MaxY = 0.5f * height * Mathf.Max (0, transform.localScale.y - 1);
var offsetX = transform.localScale.x * width * (((RectTransform)transform).pivot.x - 0.5f);
var offsetY = transform.localScale.y * width * (((RectTransform)transform).pivot.y - 0.5f);
if (temp.x < -MaxX + offsetX)
temp.x = -MaxX + offsetX;
else if (temp.x > MaxX + offsetX)
temp.x = MaxX + offsetX;
if (temp.y < -MaxY + offsetY)
temp.y = -MaxY + offsetY;
else if (temp.y > MaxY + offsetY)
temp.y = MaxY + offsetY;
transform.localPosition = temp;
}
Just call the functions ( BeginDrag & Drag ) from the Events Trigger component.
what i did to scale an object using pinch was this, it works on any touch screen when the object is in the middle of the screen:
if (Input.touchCount == 2)
{
//The distance between the 2 touches is checked and subsequently used to scale the
//object by moving the 2 fingers further, or closer form eachother.
Touch touch0 = Input.GetTouch(0);
Touch touch1 = Input.GetTouch(1);
if (isScaling)//this will only be done if scaling is true
{
float currentTouchDistance = getTouchDistance();
float deltaTouchDistance = currentTouchDistance - touchDistanceOrigin;
float scalePercentage = (deltaTouchDistance / 1200f) + 1f;
Vector3 scaleTemp = transform.localScale;
scaleTemp.x = scalePercentage * originalScale.x;
scaleTemp.y = scalePercentage * originalScale.y;
scaleTemp.z = scalePercentage * originalScale.z;
//to make the object snap to 100% a check is being done to see if the object scale is close to 100%,
//if it is the scale will be put back to 100% so it snaps to the normal scale.
//this is a quality of life feature, so its easy to get the original size of the object.
if (scaleTemp.x * 100 < 102 && scaleTemp.x * 100 > 98)
{
scaleTemp.x = 1;
scaleTemp.y = 1;
scaleTemp.z = 1;
}
//here we apply the calculation done above to actually make the object bigger/smaller.
transform.localScale = scaleTemp;
}
else
{
//if 2 fingers are touching the screen but isScaling is not true we are going to see if
//the middle of the screen is looking at the object and if it is set isScalinf to true;
Ray ray;
RaycastHit hitTouch;
ray = cam.ViewportPointToRay(new Vector3(0.5f, 0.5f, 0));
if (Physics.Raycast(ray, out hitTouch, 100f))
{
if (hitTouch.transform == transform)
{
isScaling = true;
//make sure that the distance between the fingers on initial contact is used as the original distance
touchDistanceOrigin = getTouchDistance();
originalScale = transform.localScale;
}
}
}
}

Categories