My game has a baseline memory usage of around 315 MB. However calling the following functions leads to a sharp rise in memory usage, leveling out at around 480 MB while reaching spikes of 580 MB and more, accompanied by memory warnings and even crashes.
What happens: First the TakeScreenshot IEnum is called three times in a row, wich is the max. count for screenshots in one session. Second the function SendEmailTask is called showing all three pictures for the user to choose one. By choosing picture "#1" the function SendImage1 is triggered.
Maybe someone can point me to where and how I can get some of that memory back, that would be really great!
All relevant code should be here:
public class Picture : MonoBehaviour {
private int ssCount = 0;
private Sprite cachedImage1sprite;
private Sprite cachedImage2sprite;
private Sprite cachedImage3sprite;
private Texture2D cachedImage1;
private Texture2D cachedImage2;
private Texture2D cachedImage3;
private Texture2D JPGtex1;
private Texture2D JPGtex2;
private Texture2D JPGtex3;
private Texture2D tex;
void Awake () {
}
// Use this for initialization
void Start () {
JPGtex1 = new Texture2D (2, 2, TextureFormat.RGB24, false );
JPGtex2 = new Texture2D (2, 2, TextureFormat.RGB24, false );
JPGtex3 = new Texture2D (2, 2, TextureFormat.RGB24, false );
// Create a texture the size of the screen, RGB24 format
int width = Screen.width;
int height = Screen.height;
tex = new Texture2D( width, height, TextureFormat.RGB24, false );
}
// Update is called once per frame
void Update () {
if (ssCount == 0) {
SendEmail.interactable = false;
TakePhoto.interactable = true;
} else if (ssCount == 1) {
SendEmail.interactable = true;
} else if (ssCount == 3) {
TakePhoto.interactable = false;
}
//Debug.Log (ssCount);
}
void SendEmailTask(){
if (ssCount == 3) {
cachedImage1 = SA.IOSNative.Storage.AppCache.GetTexture ("IMAGE_1");
cachedImage2 = SA.IOSNative.Storage.AppCache.GetTexture ("IMAGE_2");
cachedImage3 = SA.IOSNative.Storage.AppCache.GetTexture ("IMAGE_3");
ImagePicker.SetActive (true);
//Image1
Rect rec1 = new Rect(0, 0, cachedImage1.width, cachedImage1.height);
cachedImage1sprite = Sprite.Create(cachedImage1, rec1, new Vector2(0,0),1);
Image1.image.sprite = cachedImage1sprite;
//Image2
Rect rec2 = new Rect(0, 0, cachedImage2.width, cachedImage2.height);
cachedImage2sprite = Sprite.Create(cachedImage2, rec2, new Vector2(0,0),1);
Image2.image.sprite = cachedImage2sprite;
//Image3
Rect rec3 = new Rect(0, 0, cachedImage3.width, cachedImage3.height);
cachedImage3sprite = Sprite.Create(cachedImage3, rec3, new Vector2(0,0),1);
Image3.image.sprite = cachedImage3sprite;
SA.IOSNative.Storage.AppCache.Remove ("IMAGE_1");
SA.IOSNative.Storage.AppCache.Remove ("IMAGE_2");
SA.IOSNative.Storage.AppCache.Remove ("IMAGE_3");
}
}
IEnumerator TakeScreenshot() {
// Wait till the last possible moment before screen rendering to hide the UI
yield return null;
GameObject.Find("Buttons").GetComponent<Canvas>().enabled = false;
FlashImage();
// Wait for screen rendering to complete
yield return new WaitForEndOfFrame();
// Create a texture the size of the screen, RGB24 format
int width = Screen.width;
int height = Screen.height;
// Read screen contents into the texture
tex.ReadPixels( new Rect(0, 0, width, height), 0, 0 );
tex.Apply();
//byte[] screenshot = tex.EncodeToPNG();
print("Size is " + tex.width + " by " + tex.height);
if (ssCount == 0) {
SA.IOSNative.Storage.AppCache.Save ("IMAGE_1", tex);
ssCount++;
} else if (ssCount == 1) {
SA.IOSNative.Storage.AppCache.Save ("IMAGE_2", tex);
ssCount++;
} else if (ssCount == 2) {
SA.IOSNative.Storage.AppCache.Save ("IMAGE_3", tex);
ssCount++;
}
IOSCamera.Instance.SaveTextureToCameraRoll(tex); //Save to Cameraroll
// Show UI after we're done
GameObject.Find("Buttons").GetComponent<Canvas>().enabled = true;
}
public void SendImage1() {
byte[] screenshot1;
screenshot1 = cachedImage1.EncodeToJPG ();
if (Facebook == false) {
JPGtex1.LoadImage (screenshot1);
TextureScale.Bilinear (JPGtex1, 1200, 900);
IOSSocialManager.Instance.SendMail (SubjectText, EmailText, "", JPGtex1);
} else {
StartCoroutine(UploadToPage(screenshot1));
}
backToGame ();
}
public void backToGame() {
Destroy (cachedImage1sprite);
Destroy (cachedImage2sprite);
Destroy (cachedImage3sprite);
SA.IOSNative.Storage.AppCache.Remove ("IMAGE_1");
SA.IOSNative.Storage.AppCache.Remove ("IMAGE_2");
SA.IOSNative.Storage.AppCache.Remove ("IMAGE_3");
Destroy(cachedImage1);
Destroy(cachedImage2);
Destroy(cachedImage3);
cachedImage1 = null;
cachedImage2 = null;
cachedImage3 = null;
Image3Obj.SetActive (true);
ImagePicker.SetActive (false);
}
}
EDIT
Detailed memory profiler after going thru the routine twice:
Xcode memory profiler after going thru the routine twice:
From your profiler report;
You're using a big chunk of memory on Meshes and Texture2D Assets - that'd suggest to me you're possibly drawing over them/hiding them from the user rather than actually removing them from memory. 50+ MB in Texture2D Assets is a little odd considering you've got a further 120+ MB of Meshes being loaded in. I'm assuming from that that it's a 3D game, but has a 2D UI? If so, then 50MB is quite a lot to be spending on Texture2D Assets.
"Objects" are what you think they are - they're Objects. Instantiated classes, or GameObjects containing attached components. So if you made a Player GameObject, and attached to that is a "playerStats" object with a couple of variables for health, speed, stamina etc etc, then that'd count as 2 objects.
80MB isn't too worrying for Objects. Your Texture2D and Meshes use is what would strike me as being pretty high for a game that's targeting iOS. Make sure you're using mobile-friendly models, and textures that aren't too high in resolution.
I had a similar problem with audio files in iOS, I don't know if this could be your case.
I loaded big audio files of 20mb+ on memory to process them and then release the memory, the thing is that the memory kept going up in the xcode profiler. This is caused by memory fragmentation, you can read more about it here: https://stackoverflow.com/a/3770593/4024219
My solution was to load the files in little chunks and reuse my arrays instead of creating and destroying new ones.
Related
I'm running the C# code below, which computes optical flow maps and saves them as PNGs during gameplay, using Unity with a VR headset that forces an upper limit of 90 FPS. Without this code, the project runs smoothly at 90 FPS. To run this code on top of the same project consistently above 80 FPS, I had to use WaitForSeconds(0.2f) in the coroutine but the ideal scenario would be to compute and save the optical flow map for every frame of the game, or at least with a lower delay about 0.01 seconds. I'm already using AsyncGPUReadback and WriteAsync.
Main Question: How can I speed up this code further?
Side Question: Any way I can dump the calculated optical flow maps as consecutive rows in a CSV file so that it would write on a single file rather than creating a separate PNG for each map? Or would this be even slower?
using System.Collections;
using UnityEngine;
using System.IO;
using UnityEngine.Rendering;
namespace OpticalFlowAlternative
{
public class OpticalFlow : MonoBehaviour {
protected enum Pass {
Flow = 0,
DownSample = 1,
BlurH = 2,
BlurV = 3,
Visualize = 4
};
public RenderTexture Flow { get { return resultBuffer; } }
[SerializeField] protected Material flowMaterial;
protected RenderTexture prevFrame, flowBuffer, resultBuffer, renderTexture, rt;
public string customOutputFolderPath = "";
private string filepathforflow;
private int imageCount = 0;
int targetTextureWidth, targetTextureHeight;
private EyeTrackingV2 eyeTracking;
protected void Start () {
eyeTracking = GameObject.Find("XR Rig").GetComponent<EyeTrackingV2>();
targetTextureWidth = Screen.width / 16;
targetTextureHeight = Screen.height / 16;
flowMaterial.SetFloat("_Ratio", 1f * Screen.height / Screen.width);
renderTexture = new RenderTexture(targetTextureWidth, targetTextureHeight, 0);
rt = new RenderTexture(Screen.width, Screen.height, 0);
StartCoroutine("StartCapture");
}
protected void LateUpdate()
{
eyeTracking.flowCount = imageCount;
}
protected void OnDestroy ()
{
if(prevFrame != null)
{
prevFrame.Release();
prevFrame = null;
flowBuffer.Release();
flowBuffer = null;
rt.Release();
rt = null;
renderTexture.Release();
renderTexture = null;
}
}
IEnumerator StartCapture()
{
while (true)
{
yield return new WaitForSeconds(0.2f);
ScreenCapture.CaptureScreenshotIntoRenderTexture(rt);
//compensating for image flip
Graphics.Blit(rt, renderTexture, new Vector2(1, -1), new Vector2(0, 1));
if (prevFrame == null)
{
Setup(targetTextureWidth, targetTextureHeight);
Graphics.Blit(renderTexture, prevFrame);
}
flowMaterial.SetTexture("_PrevTex", prevFrame);
//calculating motion flow frame here
Graphics.Blit(renderTexture, flowBuffer, flowMaterial, (int)Pass.Flow);
Graphics.Blit(renderTexture, prevFrame);
AsyncGPUReadback.Request(flowBuffer, 0, TextureFormat.ARGB32, OnCompleteReadback);
}
}
void OnCompleteReadback(AsyncGPUReadbackRequest request)
{
if (request.hasError)
return;
var tex = new Texture2D(targetTextureWidth, targetTextureHeight, TextureFormat.ARGB32, false);
tex.LoadRawTextureData(request.GetData<uint>());
tex.Apply();
WriteTextureAsync(tex);
}
async void WriteTextureAsync(Texture2D tex)
{
imageCount++;
filepathforflow = customOutputFolderPath + imageCount + ".png";
var stream = new FileStream(filepathforflow, FileMode.OpenOrCreate);
var bytes = tex.EncodeToPNG();
await stream.WriteAsync(bytes, 0, bytes.Length);
}
protected void Setup(int width, int height)
{
prevFrame = new RenderTexture(width, height, 0);
prevFrame.format = RenderTextureFormat.ARGBFloat;
prevFrame.wrapMode = TextureWrapMode.Repeat;
prevFrame.Create();
flowBuffer = new RenderTexture(width, height, 0);
flowBuffer.format = RenderTextureFormat.ARGBFloat;
flowBuffer.wrapMode = TextureWrapMode.Repeat;
flowBuffer.Create();
}
}
}
First though here is to use CommandBuffers, with them you can perform no-copy readback of the screen, apply your calculations and store them in separate buffers (textures).
Then you can request readbacks of part of the texture/multiple textures over frames, without blocking access to currently computing texture. When readback is performed, best way is to encode it to PNG/JPG in separate thread, without blocking main thread.
Alternately to async readbacks, if you are on DX11/Desktop, it's also possible to have D3D buffer configured for fast cpu readback, and map it every frame if you want to avoid few-frames latency which happens because of using async readback.
Creating texture from buffer is another waste of performance here, since readback gives you pixel values, you can use general purpose png encoders and save it multi-threaded (while texture creation is only allowed in "main" thread)
If latencies are fine for you, but you want to have exact framenumber to image mapping, it's also possible to encode frame number into target image, so you'll always have it before saving in png.
About side-question, CSV could be faster than default PNG encoding, because PNG using zip-like compression inside, while CSV is just a bunch of numbers compiled in strings
As the title suggests I have a problem with the error occurring at the row
targetTexture.ReadPixels(new Rect(0, 0, cameraResolution.width, cameraResolution.height), 0, 0);
Error:
ReadPixels was called to read pixels from system frame buffer, while
not inside drawing frame. UnityEngine.Texture2D:ReadPixels(Rect,
Int32, Int32)
As I have understood from other posts one way to solve this issue is to make a Ienumerator method which yield return new WaitForSeconds or something, and call it like: StartCoroutine(methodname) so that the frames gets to load in time so that there will be pixels to read-ish.
What I don't get is where in the following code this method would make the most sense. Which part does not get to load in time?
PhotoCapture photoCaptureObject = null;
Texture2D targetTexture = null;
public string path = "";
CameraParameters cameraParameters = new CameraParameters();
private void Awake()
{
var cameraResolution = PhotoCapture.SupportedResolutions.OrderByDescending((res) => res.width * res.height).First();
targetTexture = new Texture2D(cameraResolution.width, cameraResolution.height);
// Create a PhotoCapture object
PhotoCapture.CreateAsync(false, captureObject =>
{
photoCaptureObject = captureObject;
cameraParameters.hologramOpacity = 0.0f;
cameraParameters.cameraResolutionWidth = cameraResolution.width;
cameraParameters.cameraResolutionHeight = cameraResolution.height;
cameraParameters.pixelFormat = CapturePixelFormat.BGRA32;
});
}
private void Update()
{
// if not initialized yet don't take input
if (photoCaptureObject == null) return;
if (Input.GetKey("k") || Input.GetKey("k"))
{
Debug.Log("k was pressed");
VuforiaBehaviour.Instance.gameObject.SetActive(false);
// Activate the camera
photoCaptureObject.StartPhotoModeAsync(cameraParameters, result =>
{
if (result.success)
{
// Take a picture
photoCaptureObject.TakePhotoAsync(OnCapturedPhotoToMemory);
}
else
{
Debug.LogError("Couldn't start photo mode!", this);
}
});
}
}
private static string FileName(int width, int height)
{
return $"screen_{width}x{height}_{DateTime.Now:yyyy-MM-dd_HH-mm-ss}.png";
}
private void OnCapturedPhotoToMemory(PhotoCapture.PhotoCaptureResult result, PhotoCaptureFrame photoCaptureFrame)
{
// Copy the raw image data into the target texture
photoCaptureFrame.UploadImageDataToTexture(targetTexture);
Resolution cameraResolution = PhotoCapture.SupportedResolutions.OrderByDescending((res) => res.width * res.height).First();
targetTexture.ReadPixels(new Rect(0, 0, cameraResolution.width, cameraResolution.height), 0, 0);
targetTexture.Apply();
byte[] bytes = targetTexture.EncodeToPNG();
string filename = FileName(Convert.ToInt32(targetTexture.width), Convert.ToInt32(targetTexture.height));
//save to folder under assets
File.WriteAllBytes(Application.streamingAssetsPath + "/Snapshots/" + filename, bytes);
Debug.Log("The picture was uploaded");
// Deactivate the camera
photoCaptureObject.StopPhotoModeAsync(OnStoppedPhotoMode);
}
private void OnStoppedPhotoMode(PhotoCapture.PhotoCaptureResult result)
{
// Shutdown the photo capture resource
VuforiaBehaviour.Instance.gameObject.SetActive(true);
photoCaptureObject.Dispose();
photoCaptureObject = null;
}
Sorry if this counts as a duplicate to this for example.
Edit
And this one might be useful when I get to that point.
Is it so that I don't need these three lines at all?
Resolution cameraResolution = PhotoCapture.SupportedResolutions.OrderByDescending((res) => res.width * res.height).First();
targetTexture.ReadPixels(new Rect(0, 0, cameraResolution.width, cameraResolution.height), 0, 0);
targetTexture.Apply();
As written in the comments the difference between using these three lines and not is that the photo saved has a black background + the AR-GUI. Without the second line of code above is a photo with the AR-GUI but with the background is a live stream of my computer webcam. And really I don't wanna see the computer webcam but what the HoloLens sees.
Your three lines
Resolution cameraResolution = PhotoCapture.SupportedResolutions.OrderByDescending((res) => res.width * res.height).First();
targetTexture.ReadPixels(new Rect(0, 0, cameraResolution.width, cameraResolution.height), 0, 0);
targetTexture.Apply();
make not much sense to me. Texture2D.ReadPixels is for creating a Screenshot so you would overwrite the texture you just received from PhotoCapture with a screenshot? (Also with incorrect dimensions since camera resolution very probably != screen resolution.)
That's also the reason for
As written in the comments the difference between using these three lines and not is that the photo saved has a black background + the AR-GUI.
After doing
photoCaptureFrame.UploadImageDataToTexture(targetTexture);
you already have the Texture2D received from the PhotoCapture in the targetTexture.
I think you probably confused it with Texture2D.GetPixels which is used to get the pixel data of a given Texture2D.
I would like to crop the captured photo from the center in the end and am thinking that maybe that is possible with this code row? Beginning the new rect at other pixels than 0, 0)
What you actually want is cropping the received Texture2D from the center as you mentioned in the comments. You can do that using GetPixels(int x, int y, int blockWidth, int blockHeight, int miplevel) which is used to cut out a certain area of a given Texture2D
public static Texture2D CropAroundCenter(Texture2D input, Vector2Int newSize)
{
if(input.width < newSize.x || input.height < newSize.y)
{
Debug.LogError("You can't cut out an area of an image which is bigger than the image itself!", this);
return null;
}
// get the pixel coordinate of the center of the input texture
var center = new Vector2Int(input.width / 2, input.height / 2);
// Get pixels around center
// Get Pixels starts width 0,0 in the bottom left corner
// so as the name says, center.x,center.y would get the pixel in the center
// we want to start getting pixels from center - half of the newSize
//
// than from starting there we want to read newSize pixels in both dimensions
var pixels = input.GetPixels(center.x - newSize.x / 2, center.y - newSize.y / 2, newSize.x, newSize.y, 0);
// Create a new texture with newSize
var output = new Texture2D(newSize.x, newSize.y);
output.SetPixels(pixels);
output.Apply();
return output;
}
for (hopefully) better understanding this is an illustration what that GetPixels overload with the given values does here:
and than use it in
private void OnCapturedPhotoToMemory(PhotoCapture.PhotoCaptureResult result, PhotoCaptureFrame photoCaptureFrame)
{
// Copy the raw image data into the target texture
photoCaptureFrame.UploadImageDataToTexture(targetTexture);
// for example take only half of the textures width and height
targetTexture = CropAroundCenter(targetTexture, new Vector2Int(targetTexture.width / 2, targetTexture.height / 2);
byte[] bytes = targetTexture.EncodeToPNG();
string filename = FileName(Convert.ToInt32(targetTexture.width), Convert.ToInt32(targetTexture.height));
//save to folder under assets
File.WriteAllBytes(Application.streamingAssetsPath + "/Snapshots/" + filename, bytes);
Debug.Log("The picture was uploaded");
// Deactivate the camera
photoCaptureObject.StopPhotoModeAsync(OnStoppedPhotoMode);
}
Or you could make it an extension method in an apart static class like
public static class Texture2DExtensions
{
public static void CropAroundCenter(this Texture2D input, Vector2Int newSize)
{
if (input.width < newSize.x || input.height < newSize.y)
{
Debug.LogError("You can't cut out an area of an image which is bigger than the image itself!");
return;
}
// get the pixel coordinate of the center of the input texture
var center = new Vector2Int(input.width / 2, input.height / 2);
// Get pixels around center
// Get Pixels starts width 0,0 in the bottom left corner
// so as the name says, center.x,center.y would get the pixel in the center
// we want to start getting pixels from center - half of the newSize
//
// than from starting there we want to read newSize pixels in both dimensions
var pixels = input.GetPixels(center.x - newSize.x / 2, center.y - newSize.y / 2, newSize.x, newSize.y, 0);
// Resize the texture (creating a new one didn't work)
input.Resize(newSize.x, newSize.y);
input.SetPixels(pixels);
input.Apply(true);
}
}
and use it instead like
targetTexture.CropAroundCenter(new Vector2Int(targetTexture.width / 2, targetTexture.height / 2));
Note:
UploadImageDataToTexture: You may only use this method if you specified the BGRA32 format in your CameraParameters.
Luckily you use that anyway ;)
Keep in mind that this operation will happen on the main thread and therefore be slow.
However the only alternative would be CopyRawImageDataIntoBuffer and generate the texture in another thread or external, so I'ld say it is ok to stay with UploadImageDataToTexture ;)
and
The captured image will also appear flipped on the HoloLens. You can reorient the image by using a custom shader.
by flipped they actually mean that the Y-Axis of the texture is upside down. X-Axis is correct.
For flipping the Texture vertically you can use a second extension method:
public static class Texture2DExtensions
{
public static void CropAroundCenter(){....}
public static void FlipVertically(this Texture2D texture)
{
var pixels = texture.GetPixels();
var flippedPixels = new Color[pixels.Length];
// These for loops are for running through each individual pixel and
// write them with inverted Y coordinates into the flippedPixels
for (var x = 0; x < texture.width; x++)
{
for (var y = 0; y < texture.height; y++)
{
var pixelIndex = x + y * texture.width;
var flippedIndex = x + (texture.height - 1 - y) * texture.width;
flippedPixels[flippedIndex] = pixels[pixelIndex];
}
}
texture.SetPixels(flippedPixels);
texture.Apply();
}
}
and use it like
targetTexture.FlipVertically();
Result: (I used FlipVertically and cropp to the half of size every second for this example and a given Texture but it should work the same for a taken picture.)
Image source: http://developer.vuforia.com/sites/default/files/sample-apps/targets/imagetargets_targets.pdf
Update
To your button problem:
Don't use
if (Input.GetKey("k") || Input.GetKey("k"))
First of all you are checking the exact same condition twice. And than GetKey fires every frame while the key stays pressed. Instead rather use
if (Input.GetKeyDown("k"))
which fires only a single time. I guess there was an issue with Vuforia and PhotoCapture since your original version fired so often and maybe you had some concurrent PhotoCapture processes...
The reason I wish to do so is that Unity has a nice DTX5 format that reduces the file size by a lot. But to get that, I need a sprite that's size is - both for height and width - a multiple of 4.
So I thought I create a new texture with the desired size, load its pixels with the original's pixels and make a sprite out of it that I save as an asset.
The issue is that while saving the texture works, I get the same texture with the proper sizes, saving the sprite doesn't work. It spits out something, but that isn't even close to what I need.
Here is the code:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class ResizeSprites
{
public void Resize(Sprite sprite)
{
int _hei, _wid;
//getting the closest higher values that are a multiple of 4.
for (_hei = sprite.texture.height; _hei % 4 != 0; _hei++) ;
for (_wid = sprite.texture.width; _wid % 4 != 0; _wid++) ;
//creating the new texture.
Texture2D tex = new Texture2D(_wid, _hei,TextureFormat.RGBA32,false);
//tex.alphaIsTransparency = true;
//tex.EncodeToPNG();
//giving the new texture the "improper" ratio sprite texture's pixel info
//pixel by pixel.
for (int wid = 0; wid < sprite.texture.width; wid++)
{
for (int hei = 0; hei < sprite.texture.height; hei++)
{
tex.SetPixel(wid, hei, sprite.texture.GetPixel(wid, hei));
}
}
//saving the asset. the save works, was used for both meshes as well as textures.
Sprite n_spr = Sprite.Create(tex,
new Rect(0, 0, tex.width, tex.height),
new Vector2(0.5f, 0.5f), 100.0f);
AssetSaver.CreateAsset(n_spr, sprite.name + "_dtx5");
}
}
And here are my results:
The first one is the original sprite, and the second is what I was given.
Edit: Even if I don't save my creation, just instantiate it as a GameObject, the result is still the same ugly one.
You really don't need all these code.Texture2D has a resize function so just pull the Texture2D from the Sprite then call the re-szie function to re-size it. That's it.
Something like this:
public void Resize(Sprite sprite)
{
Texture2D tex = sprite.texture;
tex.Resize(100, 100, TextureFormat.RGBA32, false);
Sprite n_spr = Sprite.Create(tex,
new Rect(0, 0, tex.width, tex.height),
new Vector2(0.5f, 0.5f), 100.0f);
AssetSaver.CreateAsset(n_spr, sprite.name + "_dtx5");
}
As for you original problem, that's because you did not call the Apply function. Each time you modify the pixels, you are supposed to call the Apply function. Finally, always use GetPixels32 not GetPixel or GetPixels. The reason is because GetPixels32 is extremely faster than the rest of the function.
public void Resize(Sprite sprite)
{
int _hei, _wid;
//getting the closest higher values that are a multiple of 4.
for (_hei = sprite.texture.height; _hei % 4 != 0; _hei++) ;
for (_wid = sprite.texture.width; _wid % 4 != 0; _wid++) ;
//creating the new texture.
Texture2D tex = new Texture2D(_wid, _hei, TextureFormat.RGBA32, false);
//tex.alphaIsTransparency = true;
//tex.EncodeToPNG();
//giving the new texture the "improper" ratio sprite texture's pixel info
//pixel by pixel.
Color32[] color = sprite.texture.GetPixels32();
tex.SetPixels32(color);
tex.Apply();
//saving the asset. the save works, was used for both meshes as well as textures.
Sprite n_spr = Sprite.Create(tex,
new Rect(0, 0, tex.width, tex.height),
new Vector2(0.5f, 0.5f), 100.0f);
AssetSaver.CreateAsset(n_spr, sprite.name + "_dtx5");
}
I am using a WebCamTexture and I start it in my Start method, then I run another method. I get the pixels using GetPixels(), however, they all come up as (0, 0, 0). Is there any fix to this or way I can wait (Unity seems to crash using while loops and WaitForSeconds). Here is my current Start method:
void Start () {
rawImage = gameObject.GetComponent<RawImage> ();
rawImageRect = rawImage.GetComponent<RectTransform> ();
webcamTexture = new WebCamTexture();
rawImage.texture = webcamTexture;
rawImage.material.mainTexture = webcamTexture;
webcamTexture.Play();
Method ();
loadingTextObject.SetActive (false);
gameObject.SetActive (true);
}
void Method(){
print (webcamTexture.GetPixels [0]);
}
And this prints a (0, 0, 0) color every time.
Do your webcam stuff in a coroutine then wait for 2 seconds with yield return new WaitForSeconds(2); before calling webcamTexture.GetPixels.
void Start () {
rawImage = gameObject.GetComponent<RawImage> ();
rawImageRect = rawImage.GetComponent<RectTransform> ();
StartCoroutine(startWebCam());
loadingTextObject.SetActive (false);
gameObject.SetActive (true);
}
private IEnumerator startWebCam()
{
webcamTexture = new WebCamTexture();
rawImage.texture = webcamTexture;
rawImage.material.mainTexture = webcamTexture;
webcamTexture.Play();
//Wait for 2 seconds
yield return new WaitForSeconds(2);
//Now call GetPixels
Method();
}
void Method(){
print (webcamTexture.GetPixels [0]);
}
Or like Joe said in the comment section. Waiting for seconds is not reliable. You can just wait for the width to have something before reading it.Just replace the
yield return new WaitForSeconds(2);
with
while (webcamTexture.width < 100)
{
yield return null;
}
If you don't want to wait until a size is reported or allocate and the target platform is iOS (iPhone/iPad) it's also possible to write a plugin that gets the size directly from the Camera code in Unity by extending CameraCapture.mm :
- (void)captureOutput:(AVCaptureOutput*)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection*)connection
{
intptr_t tex = (intptr_t)CMVideoSampling_SampleBuffer(&self->_cmVideoSampling, sampleBuffer, &self->_width, &self->_height);
_staticWebCamTexWidth = self->_width;
_staticWebCamTexWidth = self->_height;
UnityDidCaptureVideoFrame(tex, self->_userData);
if( self.firstFrameCallback!=nil ) {
[self fireFirstFrame];
}
}
self->width / height can be returned in a method added as a plugin method like :
[DllImport("__Internal")]
private static extern int GetWebCamTextureWidth ();
[DllImport("__Internal")]
private static extern int GetWebCamTextureHeight ();
and then in the objc code
extern "C" int GetWebCamTextureWidth() { return _staticWebCamTexWidth; }
extern "C" int GetWebCamTextureHeight() { return _staticWebCamTexHeight; }
This is the solution i came up with for one of iOS social games that uses unity. On Android we fell back on the previously described approach.
Two seconds might be quite alot of time spend waiting when you're grapping the size of the requested image.
The WebCamTexture has a requsted width/height that you can access and it will save the "real" width/height of the texture after calling GetPixels once. We've used the asset called CameraCaptureKit (https://www.assetstore.unity3d.com/en/#!/content/56673 ) in two projects where we share photos with the users.
The codes used there takes the WebCamTexture and Graps the frames every frame until it returns a width and height of the texture.
In an update function TryDummySnapshot was called until the size was returned properly.
// ANDREAS added this: In some cases we apparently don't get correct width and height until we have tried to read pixels
// from the buffer.
void TryDummySnapshot( ) {
if(!gotAspect) {
if( webCamTexture.width>16 ) {
if( Application.platform == RuntimePlatform.IPhonePlayer ) {
if(verbose)Debug.Log("Already got width height of WebCamTexture.");
} else {
}
gotAspect = true;
} else {
if(verbose)Debug.Log ("Taking dummy snapshot");
if( tmpImg == null ) {}
Color32[] c = webCamTexture.GetPixels32();
}
}
}
I am trying to play video on Direct3D 9 device, using:
nVLC - for fetching the RGB32 frames from file
SlimDX - Actually displaying frames on video device using textures.
Here is my code to receive RGB32 frames;
_videoWrapper.SetCallback(delegate(Bitmap frame)
{
if (_mainContentSurface == null || _dead)
return;
var bmpData = frame.LockBits(new Rectangle(0, 0, frame.Width, frame.Height), ImageLockMode.ReadOnly, frame.PixelFormat);
var ptr = bmpData.Scan0;
var size = bmpData.Stride * frame.Height;
_mainContentSurface.Buffer = new byte[size];
System.Runtime.InteropServices.Marshal.Copy(ptr, _mainContentSurface.Buffer, 0, size);
_mainContentSurface.SetTexture(_mainContentSurface.Buffer, frame.Width, frame.Height);
_secondaryContentSurface.SetTexture(_mainContentSurface.Buffer, frame.Width, frame.Height); // same buffer to second WINDOW
_mainContentSurface.VideoFrameRate.Value =_videoWrapper.ActualFrameRate;
frame.UnlockBits(bmpData);
});
And here is my actual usage of SetTexture and mapping texture to square:
public void SetTexture(byte[] image, int width, int height)
{
if (Context9 != null && Context9.Device != null)
{
if (IsFormClosed)
return;
// rendering is seperate from the "FRAME FETCH" thread, if it makes sense.
// also note that we recreate video texture if needed.
_renderWindow.BeginInvoke(new Action(() =>
{
if (_image == null || _currentVideoTextureWidth != width || _currentVideoTextureHeight != height)
{
if(_image != null)
_image.Dispose();
_image = new Texture(Context9.Device, width, height, 0, Usage.Dynamic, Format.A8R8G8B8,
Pool.Default);
_currentVideoTextureWidth = width;
_currentVideoTextureHeight = height;
if(_image == null)
throw new Exception("Video card does not support textures power of TWO or dynamic textures. Get a video card");
}
//upload data into texture.
var data = _image.LockRectangle(0, LockFlags.None);
data.Data.Write(image, 0, image.Length);
_image.UnlockRectangle(0);
}));
}
}
and finally the actual rendering:
Context9.Device.SetStreamSource(0, _videoVertices, 0, Vertex.SizeBytes);
Context9.Device.VertexFormat = Vertex.Format;
// Setup our texture. Using Textures introduces the texture stage states,
// which govern how Textures get blended together (in the case of multiple
// Textures) and lighting information.
Context9.Device.SetTexture(0, _image);
// The sampler states govern how smooth the texture is displayed.
Context9.Device.SetSamplerState(0, SamplerState.MinFilter, TextureFilter.Linear);
Context9.Device.SetSamplerState(0, SamplerState.MagFilter, TextureFilter.Linear);
Context9.Device.SetSamplerState(0, SamplerState.MipFilter, TextureFilter.Linear);
// Now drawing 2 triangles, for a quad.
Context9.Device.DrawPrimitives(PrimitiveType.TriangleList, 0, 2);
Now, it works on my machine. Without problems. With every video file and in every position. But when I checked the WinXP, picture was completely broken. Here is a screencaps for both nonworking and working;
http://www.upload.ee/image/2941734/untitled.PNG
http://www.upload.ee/image/2941762/Untitled2.png
Note that on the first picture, they are _maincontentSurface and _secondaryContentSurface. Does anyone have idea what could be the problem?
You shouldn't need to recreate your texture every time, just create it as dynamic:
this.Texture = new Texture(device, w, h, 1, Usage.Dynamic, Format.X8R8G8B8, Pool.Default);
About the copy issue could come from stride (row length might be different since it is padded):
to get Row pitch of the texture:
public int GetRowPitch()
{
if (rowpitch == -1)
{
DataRectangle dr = this.Texture.LockRectangle(0, LockFlags.Discard);
this.rowpitch = dr.Pitch;
this.Texture.UnlockRectangle(0);
}
return rowpitch;
}
If your texture row pitch is equal to your frame pitch, you can copy the way you do, otherwise you can do it this way:
public void WriteDataPitch(IntPtr ptr, int len)
{
DataRectangle dr = this.Texture.LockRectangle(0, LockFlags.Discard);
int pos = 0;
int stride = this.Width * 4;
byte* data = (byte*)ptr.ToPointer();
for (int i = 0; i < this.Height; i++)
{
dr.Data.WriteRange((IntPtr)data, this.Width * 4);
pos += dr.Pitch;
dr.Data.Position = pos;
data += stride;
}
this.Texture.UnlockRectangle(0);
}
If you want an example of fully working vlc player with slimdx let me know got that around (need to wrap it up nicely)