I can't seem to get EncodeToPNG() to save to a file dimension other than 512 x 512 even though my texture is 1280 x 1024 which I'm pulling from the dimensions of my RenderTexture object 'tex'. What am I missing? Thank you!
// Saves texture as PNG file.
using UnityEngine;
using System.Collections;
using System.IO;
public class SaveTexture : MonoBehaviour {
public RenderTexture tex;
int tWidth, tHeight;
int getTextureWidth(int texWidth)
{
return tex.width;
}
int getTextureHeight(int texHeight)
{
return tex.height;
}
public void Start()
{
tWidth = getTextureWidth(tex.width);
tHeight = getTextureHeight(tex.height);
Debug.Log("Texture Width: " + tWidth + ", Texture Height: " + tHeight);
}
Texture2D toTexture2D(RenderTexture rTex)
{
Texture2D tex = new Texture2D(tWidth, tHeight, TextureFormat.ARGB32, false);
RenderTexture.active = rTex;
tex.ReadPixels(new Rect(0, 0, tWidth, tHeight), 0, 0);
tex.Apply();
return tex;
}
// Save Texture as PNG
public void SaveTexturePNG()
{
Texture2D myTexture = tex.toTexture2D();
// Encode texture into PNG
byte[] bytes = myTexture.EncodeToPNG();
Object.Destroy(myTexture);
// For testing purposes, also write to a file in the project folder
File.WriteAllBytes(Application.dataPath + "/../AnimalTexture/AnimalTexture.png", bytes);
}
}
I challenged this problem too. Actually there is no problem, file already saved with the resolution you define. On created png's inspector, there is an option named "Advanced->Non-Power of 2". It is selected "ToNearest" as default. Change it to "None" and it will be fixed.
Related
Can someone please explain the difference between the 2 examples below:
Pre-build, in Unity, dragging a file (that is renamed from test.jpg to test.jpg.bytes) to a slot defined as a TextAsset (imageAsset) and then using this code:
private byte[] PrepareImageFile()
{
int width = Screen.width;
int height = Screen.height;
var tex = new Texture2D(width, height, TextureFormat.RGB24, false);
tex.LoadImage(imageAsset.bytes);
tex.Apply();
byte[] bytes = tex.EncodeToPNG();
Destroy(tex);
return bytes;
}
Post-build, on an Android tablet, passing in a gallery image path (aPath) then using this code:
private byte[] PrepareTheFile(string aPath)
{
byte[] data = File.ReadAllBytes(aPath);
int width = Screen.width;
int height = Screen.height;
var tex = new Texture2D(width, height, TextureFormat.RGB24, false);
tex.LoadImage(data);
tex.Apply();
byte[] bytes = tex.EncodeToPNG();
Destroy(tex);
return bytes;
}
The reason I know they are different is when the image is sent to a facial recognition API (using bytes), #1 returns accurate results (9/10 identified), but #2 returns inaccurate results (only 1/10 identified correctly).
There are no errors and the image must be reaching its destination for analysis as 1 of the 10 people get identified correctly.
public void GrabImage()
{
NativeGallery.Permission permission = NativeGallery.GetImageFromGallery((path) =>
{
if (path != null)
{
texture = new Texture2D(300, 300, TextureFormat.RGB24, false);
texture.LoadImage(File.ReadAllBytes(path));
Debug.Log(_celebTextAttributes.text + "W:" + texture.width + " x H:" + texture.height);
texture.Apply();
_celebTextAttributes.SetText("Path: " + path);
imagePath = path;
}
}, "Select an image from", "image/png");
_celebImage.GetComponent<Renderer>().material.mainTexture = texture;
}
Any help?
I'm having an issue with an image I'm trying to write to disc (the image size should be 1280x1024 but it's saving it as 512x512) and I'm wondering if a Coroutine might solve the problem? If so, I'm completely new to Coroutines, could someone illustrate how the code I've written below would be converted into a Coroutine? Many thanks.
// Saves texture as PNG file.
using UnityEngine;
using System.Collections;
using System.IO;
public class SaveTexture : MonoBehaviour {
public RenderTexture tex;
int tWidth, tHeight;
int getTextureWidth(int texWidth)
{
return tex.width;
}
int getTextureHeight(int texHeight)
{
return tex.height;
}
public void Start()
{
tWidth = getTextureWidth(tex.width);
tHeight = getTextureHeight(tex.height);
Debug.Log("RenderTexture Width: " + tWidth + ", RenderTexture Height: " + tHeight);
}
Texture2D toTexture2D(RenderTexture rTex)
{
Texture2D tex = new Texture2D(tWidth, tHeight, TextureFormat.ARGB32, false);
RenderTexture.active = rTex;
tex.ReadPixels(new Rect(0, 0, tWidth, tHeight), 0, 0);
tex.Apply();
return tex;
}
// Save Texture as PNG
public void SaveTexturePNG()
{
Texture2D myTexture = tex.toTexture2D();
tWidth = getTextureWidth(myTexture.width);
tHeight = getTextureHeight(myTexture.height);
Debug.Log("Texture2D Width: " + tWidth + ", Texture2D Height: " + tHeight);
// Encode texture into PNG
byte[] bytes = myTexture.EncodeToPNG();
Object.Destroy(myTexture);
Debug.Log(bytes.Length);
// For testing purposes, also write to a file in the project folder
File.WriteAllBytes(Application.dataPath + "/../AnimalTexture/AnimalTexture.png", bytes);
}
}
I have cubemap. I need to save it in a circular image, for example in PNG. Many hours of searching on the Internet in what I have failed. How I do it? Is that possible?
I have image: joxi.ru/zANd66wSl6Kdkm
I need to save in png: joxi.ru/12MW55wT40LYjr Part code, which help you:
tex.SetPixels(cubemap.GetPixels(CubemapFace.PositiveZ));
bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_PositiveZ.png", bytes);
You can create a class that inherits ScriptableWizard class that will render a cubemap from a specific transform. Here is my code:
using UnityEngine;
using UnityEditor;
using System.Collections;
using System.IO;
public class RenderCubemapWizard : ScriptableWizard
{
public Transform renderFromPosition;
public Cubemap cubemap;
void OnWizardUpdate()
{
string helpString = "Select transform to render from and cubemap to render into";
bool isValid = (renderFromPosition != null) && (cubemap != null);
}
void OnWizardCreate()
{
// create temporary camera for rendering
GameObject go = new GameObject("CubemapCamera");
go.AddComponent<Camera>();
// place it on the object
go.transform.position = renderFromPosition.position;
go.transform.rotation = Quaternion.identity;
// render into cubemap
go.GetComponent<Camera>().RenderToCubemap(cubemap);
// destroy temporary camera
DestroyImmediate(go);
ConvertToPng();
}
[MenuItem("GameObject/Render into Cubemap")]
static void RenderCubemap()
{
ScriptableWizard.DisplayWizard<RenderCubemapWizard>(
"Render cubemap", "Render!");
}
void ConvertToPng()
{
Debug.Log(Application.dataPath + "/" +cubemap.name +"_PositiveX.png");
var tex = new Texture2D (cubemap.width, cubemap.height, TextureFormat.RGB24, false);
// Read screen contents into the texture
tex.SetPixels(cubemap.GetPixels(CubemapFace.PositiveX));
// Encode texture into PNG
var bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_PositiveX.png", bytes);
tex.SetPixels(cubemap.GetPixels(CubemapFace.NegativeX));
bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_NegativeX.png", bytes);
tex.SetPixels(cubemap.GetPixels(CubemapFace.PositiveY));
bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_PositiveY.png", bytes);
tex.SetPixels(cubemap.GetPixels(CubemapFace.NegativeY));
bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_NegativeY.png", bytes);
tex.SetPixels(cubemap.GetPixels(CubemapFace.PositiveZ));
bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_PositiveZ.png", bytes);
tex.SetPixels(cubemap.GetPixels(CubemapFace.NegativeZ));
bytes = tex.EncodeToPNG();
File.WriteAllBytes(Application.dataPath + "/" + cubemap.name +"_NegativeZ.png", bytes);
DestroyImmediate(tex);
}
}
This basically creates a new cubemap from the given position that you specify from within the wizard (to use the wizard go to GameObject in the top menu and at the bottom of the list you'll see 'Render into Cubemap'). It will then grab the six positions of the cubemap and convert it into a PNG file from with in the ConvertToPng() function. This works for me and it should work for you since it essentially only needs a transform position.
Sorry for how long it is tried to simplify it but this as simplified as I could make it.
Here are the links that helped me come to this conclusion:
How to convert a face to png
Unity's scriptable wizard for rendering a cubemap
This is the correct approach that allows for a single compressed cubemap texture. After .png texture is saved, just set its settings to cube & the compression settings you want.
#if UNITY_EDITOR
using UnityEngine;
using UnityEditor;
using System.IO;
public class RenderCubemapUtil : ScriptableWizard
{
public Transform renderFromPosition;
public int size = 512;
public string newCubmapPath;
void OnWizardUpdate()
{
isValid = renderFromPosition != null && size >= 16 && !string.IsNullOrEmpty(newCubmapPath);
}
void OnWizardCreate()
{
if (!isValid) return;
// create temporary camera for rendering
var go = new GameObject("CubemapCamera");
go.AddComponent<Camera>();
try
{
// place it on the object
go.transform.position = renderFromPosition.position;
go.transform.rotation = Quaternion.identity;
// create new texture
var cubemap = new Cubemap(size, TextureFormat.RGB24, false);
// render into cubemap
go.GetComponent<Camera>().RenderToCubemap(cubemap);
// convert cubemap to single horizontal texture
var texture = new Texture2D(size * 6, size, cubemap.format, false);
int texturePixelCount = (size * 6) * size;
var texturePixels = new Color[texturePixelCount];
var cubeFacePixels = cubemap.GetPixels(CubemapFace.PositiveX);
CopyTextureIntoCubemapRegion(cubeFacePixels, texturePixels, size * 0);
cubeFacePixels = cubemap.GetPixels(CubemapFace.NegativeX);
CopyTextureIntoCubemapRegion(cubeFacePixels, texturePixels, size * 1);
cubeFacePixels = cubemap.GetPixels(CubemapFace.PositiveY);
CopyTextureIntoCubemapRegion(cubeFacePixels, texturePixels, size * 3);
cubeFacePixels = cubemap.GetPixels(CubemapFace.NegativeY);
CopyTextureIntoCubemapRegion(cubeFacePixels, texturePixels, size * 2);
cubeFacePixels = cubemap.GetPixels(CubemapFace.PositiveZ);
CopyTextureIntoCubemapRegion(cubeFacePixels, texturePixels, size * 4);
cubeFacePixels = cubemap.GetPixels(CubemapFace.NegativeZ);
CopyTextureIntoCubemapRegion(cubeFacePixels, texturePixels, size * 5);
texture.SetPixels(texturePixels, 0);
// write texture as png to disk
var textureData = texture.EncodeToPNG();
File.WriteAllBytes(Path.Combine(Application.dataPath, $"{newCubmapPath}.png"), textureData);
// save to disk
AssetDatabase.SaveAssetIfDirty(cubemap);
AssetDatabase.SaveAssets();
AssetDatabase.Refresh();
}
finally
{
// destroy temporary camera
DestroyImmediate(go);
}
}
private void CopyTextureIntoCubemapRegion(Color[] srcPixels, Color[] dstPixels, int xOffsetDst)
{
int cubemapWidth = size * 6;
for (int y = 0; y != size; ++y)
{
for (int x = 0; x != size; ++x)
{
int iSrc = x + (y * size);
int iDst = (x + xOffsetDst) + (y * cubemapWidth);
dstPixels[iDst] = srcPixels[iSrc];
}
}
}
[MenuItem("GameObject/Render into Cubemap")]
static void RenderCubemap()
{
DisplayWizard<RenderCubemapUtil>("Render cubemap", "Render!");
}
}
#endif
So I've created this class based off of the Texture2D.EncodeToPNG code example on Unity's website. I'm not getting any errors when I execute it, but I'm also not seeing a new file created. What am I doing wrong here?
public class CreateJPG : MonoBehaviour
{
public int width = 1050;
public int height = 700;
string fileName;
string filePath;
// Texture2D tex;
public void GrabJPG () {
SaveJPG();
Debug.Log("GrabJPG Executing");
}
IEnumerator SaveJPG()
{
// We should only read the screen buffer after rendering is complete
yield return new WaitForEndOfFrame();
// Create a texture the size of the screen, RGB24 format
Texture2D tex = new Texture2D(width, height, TextureFormat.RGB24, false);
tex.ReadPixels(new Rect(0,0,width,height),0,0);
tex.Apply();
// Encode texture into JPG
byte[] bytes = tex.EncodeToJPG(60);
Object.Destroy(tex);
// Get filePrefix from GameSetup array index
GameObject init = GameObject.FindGameObjectWithTag("Initializer");
GameSetup gameSetup = init.GetComponent<GameSetup>();
string prefix = gameSetup.filePrefix;
string subDir = gameSetup.subDir;
string dtString = System.DateTime.Now.ToString("MM-dd-yyyy_HHmmssfff");
fileName = prefix+dtString+".jpg";
filePath = "/Users/kenmarold/Screenshots/"+subDir+"/";
Debug.Log("SaveJPG Executing");
File.WriteAllBytes(filePath+fileName, bytes);
Debug.Log("Your file was saved at " + filePath+subDir+prefix+fileName);
if(width > 0 && height > 0)
{
}
}
}
You didn't start your coroutine, you need to call StartCodoutine in GrabJPG:
StartCoroutine(SaveJPG());
https://docs.unity3d.com/Manual/Coroutines.html
https://unity3d.com/learn/tutorials/modules/intermediate/scripting/coroutines
P. S. By the way, you can use Application.CaptureScreenshot
I'm building a web based app that takes a screenshot of a play area and then posts it to a web server to be called up in a image gallery for other to view. Currently, when running in the editor I can take the screenshot and save it locally but that won't work once it's deployed. I don't know how to take that screenshot and save it to a texture (rather than to disk) to then upload to my server. How do I do this? I'm new at this and especially new at Render Texture functionality. Can someone help me sort this out?
I have found this Snippet on a forum here. But not tested by myself on WebPlayer.
using UnityEngine;
using System.Collections;
public class Main : MonoBehaviour
{
private string _data = string.Empty;
public Texture2D bg;
void OnGUI()
{
if (GUI.Button(new Rect(Screen.width*0.5f-32,32,64,32),"Save"))
StartCoroutine(ScreeAndSave());
}
IEnumerator ScreeAndSave()
{
yield return new WaitForEndOfFrame();
var newTexture = ScreenShoot(Camera.main, bg.width, bg.height);
LerpTexture(bg, ref newTexture);
_data = System.Convert.ToBase64String(newTexture.EncodeToPNG());
Application.ExternalEval("document.location.href='data:octet-stream;base64," + _data + "'");
}
private static Texture2D ScreenShoot(Camera srcCamera, int width, int height)
{
var renderTexture = new RenderTexture(width, height, 0);
var targetTexture = new Texture2D(width, height, TextureFormat.RGB24, false);
srcCamera.targetTexture = renderTexture;
srcCamera.Render();
RenderTexture.active = renderTexture;
targetTexture.ReadPixels(new Rect(0, 0, width, height), 0, 0);
targetTexture.Apply();
srcCamera.targetTexture = null;
RenderTexture.active = null;
srcCamera.ResetAspect();
return targetTexture;
}
private static void LerpTexture(Texture2D alphaTexture, ref Texture2D texture)
{
var bgColors = alphaTexture.GetPixels();
var tarCols = texture.GetPixels();
for (var i = 0; i < tarCols.Length; i++)
tarCols[i] = bgColors[i].a > 0.99f ? bgColors[i] : Color.Lerp(tarCols[i], bgColors[i], bgColors[i].a);
texture.SetPixels(tarCols);
texture.Apply();
}
}
Reference Link