Hi I have this method for a project I am doing with the Kinect. Unfortunately the code is the beta version and I need to update it to the 1.5 sdk version. I tried a few things but they wont work. Here is what I have to so far. The method is called nui_DepthFrameReady.
void nui_DepthFrameReady(object sender, ImageFrameReadyEventArgs e)
{
if (!savedDepth)
{
PlanarImage Image = e.ImageFrame.Image;
byte[] convertedDepthFrame = convertDepthFrame(Image.Bits);
depth.Source = BitmapSource.Create(
Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * 4);
++totalFrames;
DateTime cur = DateTime.Now;
if (cur.Subtract(lastTime) > TimeSpan.FromSeconds(1))
{
int frameDiff = totalFrames - lastFrames;
lastFrames = totalFrames;
lastTime = cur;
frameRate.Text = frameDiff.ToString() + " fps";
}
if (subscribed)
{
//byte[] ColoredBytes = GenerateColoredBytes(e.ImageFrame);
//create an image based on the colored bytes
BitmapSource myBitmapDepth = BitmapSource.Create(Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * PixelFormats.Bgr32.BitsPerPixel / 8);
string imageFilePath = #"C:\Temp\kinect\depth\bmpDepthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".png";
string dataFilePath = #"C:\Temp\kinect\depth\depthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".dat";
//savePngFrame(myBitmapDepth, imageFilePath);
//Crop frame to size 180x240
byte[] croppedDepthFrame = new byte[180 * 240 * 2];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j < 180 * 2; j += 2)
{
croppedDepthFrame[i * 180 * 2 + j] = Image.Bits[i * 320 * 2 + j + 69 * 2];
croppedDepthFrame[i * 180 * 2 + j + 1] = Image.Bits[i * 320 * 2 + j + 1 + 69 * 2];
//Console.Write((i * 180 * 2 + j) + "-" + (i * 180 * 2 + j + 69*2) +", ");
}
//Console.WriteLine();
}
FileStream fs = new FileStream(dataFilePath, FileMode.Create);
fs.Write(croppedDepthFrame, 0, croppedDepthFrame.Length);
fs.Close();
}
savedDepth = true;
}
}
Thank You for the help.
This is what I have so far
void nui_DepthImageReady(object sender, DepthImageFrameReadyEventArgs e)
{
if (!savedDepth)
{
short[] pixelData;
bool receivedData = false;
using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
{
if (depthImageFrame != null)
{
if (pixelData == null) //allocate the first time
{
pixelData = new short[depthImageFrame.PixelDataLength];
}
depthImageFrame.CopyPixelDataTo(pixelData);
receivedData = true;
}
else
{
// apps processing of image data took too long; it got more than 2 frames behind.
// the data is no longer avabilable.
}
}
if (receivedData)
{
byte[] convertedDepthFrame = convertDepthFrame(Image.bits);
depth.Source = BitmapSource.Create(Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * 4);
++totalFrames;
DateTime cur = DateTime.Now;
if (cur.Subtract(lastTime) > TimeSpan.FromSeconds(1))
{
int frameDiff = totalFrames - lastFrames;
lastFrames = totalFrames;
lastTime = cur;
frameRate.Text = frameDiff.ToString() + " fps";
}
if (subscribed)
{
//byte[] ColoredBytes = GenerateColoredBytes(e.ImageFrame);
//create an image based on the colored bytes
BitmapSource myBitmapDepth = BitmapSource.Create(Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * PixelFormats.Bgr32.BitsPerPixel / 8);
string imageFilePath = #"C:\Temp\kinect\depth\bmpDepthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".png";
string dataFilePath = #"C:\Temp\kinect\depth\depthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".dat";
//savePngFrame(myBitmapDepth, imageFilePath);
//Crop frame to size 180x240
byte[] croppedDepthFrame = new byte[180 * 240 * 2];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j < 180 * 2; j += 2)
{
croppedDepthFrame[i * 180 * 2 + j] = Image.Bits[i * 320 * 2 + j + 69 * 2];
croppedDepthFrame[i * 180 * 2 + j + 1] = Image.Bits[i * 320 * 2 + j + 1 + 69 * 2];
//Console.Write((i * 180 * 2 + j) + "-" + (i * 180 * 2 + j + 69*2) +", ");
}
//Console.WriteLine();
}
FileStream fs = new FileStream(dataFilePath, FileMode.Create);
fs.Write(croppedDepthFrame, 0, croppedDepthFrame.Length);
fs.Close();
}
}
savedDepth = true;
}
}
Image.Bits has no definition in the new SDK for kinect or Image.Width, Image.Height
These are the errors that are happening so I dont know how to convert the code to get the same info.
I'm comparing your sample to a sample in the Kinect 1.5 SDK samples, and it appears that what you've called Image may be the same as pixelData. I don't see where Image was defined though, so I'm guessing. Does this help?
Related
I'm trying to implement a webcam capture app which should take still frames, display them on the screen and save to the disk.
Since I'm using SharpDX already to capture the screen, I thought it would be nice to use that library. I was not sure if SharpDX had any video capture capabilities, so I started searching and found parts of what it looks like a webcam capture prototype:
var attributes = new MediaAttributes(1);
attributes.Set<Guid>(CaptureDeviceAttributeKeys.SourceType, CaptureDeviceAttributeKeys.SourceTypeVideoCapture.Guid);
var activates = MediaFactory.EnumDeviceSources(attributes);
var dic = new Dictionary<string, Activate>();
foreach (var activate in activates)
{
var uid = activate.Get(CaptureDeviceAttributeKeys.SourceTypeVidcapSymbolicLink);
dic.Add(uid, activate);
}
var camera = dic.First().Value;
It outputs camera with a strange uid. I'm not sure if it's correct.
What I am supposed to do after this?
Edit
I got this code kind of working. I still don't understand why the output is strange.
var attributes = new MediaAttributes(1);
attributes.Set(CaptureDeviceAttributeKeys.SourceType.Guid, CaptureDeviceAttributeKeys.SourceTypeVideoCapture.Guid);
var mediaSource = MediaFactory.EnumDeviceSources(attributes)[0].ActivateObject<MediaSource>();
mediaSource.CreatePresentationDescriptor(out var presentationDescriptor);
var reader = new SourceReader(mediaSource);
var mediaTypeIndex = 0;
int width, height;
using (var mt = reader.GetNativeMediaType(0, mediaTypeIndex))
{
UnpackLong(mt.Get(MediaTypeAttributeKeys.FrameSize), out width, out height);
UnpackLong(mt.Get(MediaTypeAttributeKeys.FrameRate), out var frameRateNumerator, out var frameRateDenominator);
UnpackLong(mt.Get(MediaTypeAttributeKeys.PixelAspectRatio), out var aspectRatioNumerator, out var aspectRatioDenominator);
}
var sample = reader.ReadSample(SourceReaderIndex.AnyStream, SourceReaderControlFlags.None, out var readStreamIndex, out var readFlags, out var timestamp);
if (sample == null)
sample = reader.ReadSample(SourceReaderIndex.AnyStream, SourceReaderControlFlags.None, out readStreamIndex, out readFlags, out timestamp);
var sourceBuffer = sample.GetBufferByIndex(0); // sample.ConvertToContiguousBuffer();
var sourcePointer = sourceBuffer.Lock(out var maxLength, out var currentLength);
var data = new byte[sample.TotalLength];
Marshal.Copy(sourcePointer, data, 0, sample.TotalLength);
var newData = new byte[width * 4 * height];
var partWidth = width / 4;
var partHeight = height / 3;
for (var i = 0; i < sample.TotalLength; i += 4)
{
//X8R8B8G8 -> BGRA = 4
newData[i] = data[i + 3];
newData[i + 1] = data[i + 2];
newData[i + 2] = data[i + 1];
newData[i + 3] = 255; //data[i];
}
//var source = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgra32, null, data, ((width * 24 + 31) / 32) * 4);
var source = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgra32, null, newData, width * 4);
sourceBuffer.Unlock();
sourceBuffer.Dispose();
The output image is this (I was showing a color spectrum to my webcam):
The image is repeating 4 times, each part has a grayscale image and a color version with half the height.
Two thirds of the image is transparent.
your output is NV12, here's some sample code to convert nv12 to rgb
unsafe private static void TransformImage_NV12(IntPtr pDest, int lDestStride, IntPtr pSrc, int lSrcStride, int dwWidthInPixels, int dwHeightInPixels)
{
uint imageWidth = (uint)dwWidthInPixels;
uint widthHalf = imageWidth / 2;
uint imageHeight = (uint)dwHeightInPixels;
byte* nv12Data = (byte*)pSrc;
byte* rgbData = (byte*)pDest;
uint dataSize = imageWidth * imageHeight * 3;
for (uint y = 0; y < imageHeight; y++)
{
for (uint x = 0; x < imageWidth; x++)
{
uint xEven = x & 0xFFFFFFFE;
uint yEven = y & 0xFFFFFFFE;
uint yIndex = y * imageWidth + x;
uint cIndex = imageWidth * imageHeight + yEven * widthHalf + xEven;
byte yy = nv12Data[yIndex];
byte cr = nv12Data[cIndex + 0];
byte cb = nv12Data[cIndex + 1];
uint outputIndex = (dataSize - (y * imageWidth + x) * 3) - 3;
rgbData[outputIndex + 0] = (byte)Math.Min(Math.Max((yy + 1.402 * (cr - 128)), 0), 255);
rgbData[outputIndex + 1] = (byte)Math.Min(Math.Max((yy - 0.344 * (cb - 128) - 0.714 * (cr - 128)), 0), 255);
rgbData[outputIndex + 2] = (byte)Math.Min(Math.Max((yy + 1.772 * (cb - 128)), 0), 255);
}
}
}
I try to create a 16 bit bmp / jpg file using following code:
public static void CreateBitmap_Rgb48(int width, int height, double dpiX, double dpiY, string fn)
{
int bytesperpixel = 6; // BytesPerChannel = 2,ChannelCount = 3 (bgr)
int channelCount = 3;
int stride = width * bytesperpixel;
byte[] imgdata = new byte[width * height * bytesperpixel];
int rectDim = 40;
ushort[] intPixelData = new ushort[width * height * channelCount];
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width * channelCount; col += channelCount)
{
if (((col / channelCount / rectDim) % 2) != ((row / rectDim) % 2))
{
intPixelData[row * width * channelCount + col + 0] = 0x0000;
intPixelData[row * width * channelCount + col + 1] = 0x0000;
intPixelData[row * width * channelCount + col + 2] = 0xffff;
}
else
{
intPixelData[row * width * channelCount + col + 0] = 0x0000;
intPixelData[row * width * channelCount + col + 1] = 0xffff;
intPixelData[row * width * channelCount + col + 2] = 0x0000;
}
}
}
Buffer.BlockCopy(intPixelData, 0, imgdata, 0, imgdata.Length);
// compose the BitmapImage
var image = BitmapSource.Create(width, height, dpiX, dpiY, PixelFormats.Rgb48, null, imgdata, stride);
BmpBitmapEncoder encoder = new BmpBitmapEncoder();
encoder.Frames.Add(BitmapFrame.Create(image));
using (var fileStream = new FileStream(fn, FileMode.Create))
{
encoder.Save(fileStream);
}
}
Similarly, for jpeg, I used:
JpegBitmapEncoder encoder = new JpegBitmapEncoder();
But the file generated size is wrong and BmpBitmapDecoder show the format is default for bmp and rgb24 for jpg. they are not rgb48. what is wrong?
the sample panorama image url https://upload.wikimedia.org/wikipedia/commons/3/3b/360%C2%B0_Hoher_Freschen_Panorama_2.jpg which i saved in my pc and generate tile from that image programmatically and got
error like out of memory
this line throw the error Bitmap bmLevelSource =
(Bitmap)Bitmap.FromFile(levelSourceImage);
here is my program code in c# which throw the error
double maxZoom = 5;
string FILEPATH = #"C:\test\img.jpg";
string TARGETFOLDER = #"C:\test\Src";
bool REMOVEXISTINGFILES = true;
if (!System.IO.File.Exists(FILEPATH))
{
Console.WriteLine("file not exist");
return;
}
if (maxZoom >= 10)
{
Console.WriteLine("Scale multiplier should be an integer <=10");
return;
}
//Read image
Bitmap bmSource;
try
{
bmSource = (Bitmap)Bitmap.FromFile(FILEPATH);
}
catch
{
Console.WriteLine("image file not valid");
return;
}
//check directory exist
if (!System.IO.Directory.Exists(TARGETFOLDER))
{
System.IO.Directory.CreateDirectory(TARGETFOLDER);
}
else if (REMOVEXISTINGFILES)
{
string[] files = System.IO.Directory.GetFiles(TARGETFOLDER);
foreach (string file in files)
System.IO.File.Delete(file);
string[] dirs = System.IO.Directory.GetDirectories(TARGETFOLDER);
foreach (string dir in dirs)
System.IO.Directory.Delete(dir, true);
}
int actualHeight = bmSource.Height;
int actualWidth = bmSource.Width;
if (((actualHeight % 256) != 0)
||
((actualWidth % 256) != 0))
{
Console.WriteLine("image width and height pixels should be multiples of 256");
return;
}
int actualResizeSizeWidth = 1;
int level = 0;
while (level <= maxZoom)
{
string leveldirectory = System.IO.Path.Combine(TARGETFOLDER, String.Format("{0}", level));
if (!System.IO.Directory.Exists(leveldirectory))
System.IO.Directory.CreateDirectory(leveldirectory);
int rowsInLevel = Convert.ToInt32(Math.Pow(2, level));
actualResizeSizeWidth = 256 * rowsInLevel;
//create image to parse
int actualResizeSizeHeight = (actualHeight * actualResizeSizeWidth) / actualWidth;
Bitmap resized = new Bitmap(bmSource, new Size(actualResizeSizeWidth, actualResizeSizeHeight));
string levelSourceImage = System.IO.Path.Combine(leveldirectory, "level.png");
resized.Save(levelSourceImage);
for (int x = 0; x < rowsInLevel; x++)
{
string levelrowdirectory = System.IO.Path.Combine(leveldirectory, String.Format("{0}", x));
if (!System.IO.Directory.Exists(levelrowdirectory))
System.IO.Directory.CreateDirectory(levelrowdirectory);
Bitmap bmLevelSource = (Bitmap)Bitmap.FromFile(levelSourceImage);
//generate tiles
int numberTilesHeight = Convert.ToInt32(Math.Ceiling(actualResizeSizeHeight / 256.0));
for (int y = 0; y < numberTilesHeight; y++)
{
Console.WriteLine("Generating Tiles " + level.ToString() + " " + x.ToString() + " " + y.ToString()); int heightToCrop = actualResizeSizeHeight >= 256 ? 256 : actualResizeSizeHeight;
Rectangle destRect = new Rectangle(x * 256, y * 256, 256, heightToCrop);
//croped
Bitmap bmTile = bmLevelSource.Clone(destRect, System.Drawing.Imaging.PixelFormat.DontCare);
//full tile
Bitmap bmFullTile = new Bitmap(256, 256);
Graphics gfx = Graphics.FromImage(bmFullTile);
gfx.DrawImageUnscaled(bmTile, 0, 0);
bmFullTile.Save(System.IO.Path.Combine(levelrowdirectory, String.Format("{0}.png", y)));
bmFullTile.Dispose();
bmTile.Dispose();
}
}
level++;
}
i comment the below code when i run the program
if (((actualHeight % 256) != 0)
||
((actualWidth % 256) != 0))
{
Console.WriteLine("image width and height pixels should be multiples of 256");
return;
}
what is the fault for which i got the error called "Out of Memory"
Thanks
Edit
actual image height and width was 1250 and 2500.
actualResizeSizeWidth 256
actualResizeSizeHeight 128
i include a panorama image url in this post at top. can u plzz download url and execute my code at your end to see memory issue is coming?
Code Update
i modify the code a bit and dispose some Bitmap.
dispose like this way
bmLevelSource.Dispose(); and resized.Dispose();
while (level <= maxZoom)
{
string leveldirectory = System.IO.Path.Combine(TARGETFOLDER, String.Format("{0}", level));
if (!System.IO.Directory.Exists(leveldirectory))
System.IO.Directory.CreateDirectory(leveldirectory);
int rowsInLevel = Convert.ToInt32(Math.Pow(2, level));
actualResizeSizeWidth = 256 * rowsInLevel;
//create image to parse
int actualResizeSizeHeight = (actualHeight * actualResizeSizeWidth) / actualWidth;
Bitmap resized = new Bitmap(bmSource, new Size(actualResizeSizeWidth, actualResizeSizeHeight));
string levelSourceImage = System.IO.Path.Combine(leveldirectory, "level.png");
resized.Save(levelSourceImage);
for (int x = 0; x < rowsInLevel; x++)
{
string levelrowdirectory = System.IO.Path.Combine(leveldirectory, String.Format("{0}", x));
if (!System.IO.Directory.Exists(levelrowdirectory))
System.IO.Directory.CreateDirectory(levelrowdirectory);
Bitmap bmLevelSource = (Bitmap)Bitmap.FromFile(levelSourceImage);
//generate tiles
int numberTilesHeight = Convert.ToInt32(Math.Ceiling(actualResizeSizeHeight / 256.0));
for (int y = 0; y < numberTilesHeight; y++)
{
Console.WriteLine("Generating Tiles " + level.ToString() + " " + x.ToString() + " " + y.ToString()); int heightToCrop = actualResizeSizeHeight >= 256 ? 256 : actualResizeSizeHeight;
Rectangle destRect = new Rectangle(x * 256, y * 256, 256, heightToCrop);
//croped
Bitmap bmTile = bmLevelSource.Clone(destRect, System.Drawing.Imaging.PixelFormat.DontCare);
//full tile
Bitmap bmFullTile = new Bitmap(256, 256);
Graphics gfx = Graphics.FromImage(bmFullTile);
gfx.DrawImageUnscaled(bmTile, 0, 0);
bmFullTile.Save(System.IO.Path.Combine(levelrowdirectory, String.Format("{0}.png", y)));
bmFullTile.Dispose();
bmTile.Dispose();
}
bmLevelSource.Dispose();
}
level++;
resized.Dispose();
}
please see my bit modified code and give suggestion now.
I'm making and OpenGL application that has MULTIPLE meshes that are described as lists of positions, normals, and uvs. I am binding these data to a vertex buffer but I was wondering how I would draw these meshes per frame without re-binding the vertex buffer. Correct me if I'm wrong, but isn't copying ~100KB of data to the vertex buffer slowish? How would I draw each mesh with separate transforms (position, rotation, scale). Thanks :) Here is my Mesh code:
using System;
using System.IO;
using OpenTK;
using OpenTK.Graphics.OpenGL;
public class Mesh
{
public Vector3[] positions;
public Vector3[] normals;
public Vector2[] uvs;
public Triangle[] triangles;
public int buffer;
public Mesh()
{
this.positions = new Vector3[0];
this.normals = new Vector3[0];
this.uvs = new Vector2[0];
this.triangles = new Triangle[0];
this.buffer = 0;
}
public Mesh(Vector3[] positions, Vector3[] normals, Vector2[] uvs, Triangle[] triangles, int buffer)
{
this.positions = positions;
this.normals = normals;
this.uvs = uvs;
this.triangles = triangles;
this.buffer = buffer;
}
public static Mesh fromFile(string fileName)
{
Mesh mesh = new Mesh();
BinaryReader binaryReader = new BinaryReader(new FileStream(fileName, FileMode.Open));
int positionCount = binaryReader.ReadInt32();
mesh.positions = new Vector3[positionCount];
for (int i = 0; i < positionCount; i++)
{
mesh.positions[i] = new Vector3(binaryReader.ReadSingle(), binaryReader.ReadSingle(), binaryReader.ReadSingle());
}
int normalCount = binaryReader.ReadInt32();
mesh.normals = new Vector3[normalCount];
for (int i = 0; i < normalCount; i++)
{
mesh.normals[i] = new Vector3(binaryReader.ReadSingle(), binaryReader.ReadSingle(), binaryReader.ReadSingle());
}
int uvCount = binaryReader.ReadInt32();
mesh.uvs = new Vector2[uvCount];
for (int i = 0; i < uvCount; i++)
{
mesh.uvs[i] = new Vector2(binaryReader.ReadSingle(), binaryReader.ReadSingle());
}
int triangleCount = binaryReader.ReadInt32();
mesh.triangles = new Triangle[triangleCount];
for (int i = 0; i < triangleCount; i++)
{
mesh.triangles[i] = new Triangle(binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32(), binaryReader.ReadInt32());
}
binaryReader.Close();
return mesh;
}
public void toFile(string fileName)
{
BinaryWriter binaryWriter = new BinaryWriter(new FileStream(fileName, FileMode.OpenOrCreate));
binaryWriter.Write(positions.Length);
for (int i = 0; i < positions.Length; i++)
{
binaryWriter.Write(positions[i].X);
binaryWriter.Write(positions[i].Y);
binaryWriter.Write(positions[i].Z);
}
binaryWriter.Write(normals.Length);
for (int i = 0; i < normals.Length; i++)
{
binaryWriter.Write(normals[i].X);
binaryWriter.Write(normals[i].Y);
binaryWriter.Write(normals[i].Z);
}
binaryWriter.Write(uvs.Length);
for (int i = 0; i < uvs.Length; i++)
{
binaryWriter.Write(uvs[i].X);
binaryWriter.Write(uvs[i].Y);
}
binaryWriter.Write(triangles.Length);
for (int i = 0; i < triangles.Length; i++)
{
binaryWriter.Write(triangles[i].positionIndex0);
binaryWriter.Write(triangles[i].normalIndex0);
binaryWriter.Write(triangles[i].uvIndex0);
binaryWriter.Write(triangles[i].positionIndex1);
binaryWriter.Write(triangles[i].normalIndex1);
binaryWriter.Write(triangles[i].uvIndex1);
binaryWriter.Write(triangles[i].positionIndex2);
binaryWriter.Write(triangles[i].normalIndex2);
binaryWriter.Write(triangles[i].uvIndex2);
}
binaryWriter.Close();
}
public void draw(Transform transform)
{
float[] data = new float[triangles.Length * 24];
for (int i = 0; i < triangles.Length; i++)
{
data[(i * 9) + 0] = positions[triangles[i].positionIndex0].X;
data[(i * 9) + 1] = positions[triangles[i].positionIndex0].Y;
data[(i * 9) + 2] = positions[triangles[i].positionIndex0].Z;
data[(i * 9) + 3] = positions[triangles[i].positionIndex1].X;
data[(i * 9) + 4] = positions[triangles[i].positionIndex1].Y;
data[(i * 9) + 5] = positions[triangles[i].positionIndex1].Z;
data[(i * 9) + 6] = positions[triangles[i].positionIndex2].X;
data[(i * 9) + 7] = positions[triangles[i].positionIndex2].Y;
data[(i * 9) + 8] = positions[triangles[i].positionIndex2].Z;
data[(triangles.Length * 9) + (i * 9) + 0] = normals[triangles[i].normalIndex0].X;
data[(triangles.Length * 9) + (i * 9) + 1] = normals[triangles[i].normalIndex0].Y;
data[(triangles.Length * 9) + (i * 9) + 2] = normals[triangles[i].normalIndex0].Z;
data[(triangles.Length * 9) + (i * 9) + 3] = normals[triangles[i].normalIndex1].X;
data[(triangles.Length * 9) + (i * 9) + 4] = normals[triangles[i].normalIndex1].Y;
data[(triangles.Length * 9) + (i * 9) + 5] = normals[triangles[i].normalIndex1].Z;
data[(triangles.Length * 9) + (i * 9) + 6] = normals[triangles[i].normalIndex2].X;
data[(triangles.Length * 9) + (i * 9) + 7] = normals[triangles[i].normalIndex2].Y;
data[(triangles.Length * 9) + (i * 9) + 8] = normals[triangles[i].normalIndex2].Z;
data[(triangles.Length * 18) + (i * 6) + 0] = uvs[triangles[i].uvIndex0].X;
data[(triangles.Length * 18) + (i * 6) + 1] = uvs[triangles[i].uvIndex0].Y;
data[(triangles.Length * 18) + (i * 6) + 2] = uvs[triangles[i].uvIndex1].X;
data[(triangles.Length * 18) + (i * 6) + 3] = uvs[triangles[i].uvIndex1].Y;
data[(triangles.Length * 18) + (i * 6) + 4] = uvs[triangles[i].uvIndex2].X;
data[(triangles.Length * 18) + (i * 6) + 5] = uvs[triangles[i].uvIndex2].Y;
}
buffer = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, buffer);
GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(triangles.Length * 96), data, BufferUsageHint.StaticDraw);
//------------------------
//----------TODO----------
//------------------------
}
}
The last function, draw, is the one I'm working on.
The point is to have a single VBO per mesh that you load once and then just rebind as needed.
if you are in openGL 3.3+ you can collect all needed bindings for each mesh in a VAO per mesh: (pseudo-ish code)
class MeshBuffer{
int vao;
int vbo;
int numVertices;
void Dispose(){
if(vao==0)return;
GL.DeleteVertexArrays(1, ref vao);
GL.DeleteBuffers(1, ref vbo);
vao = 0;
vbo = 0;
}
void Bind(){
GL.BindVertexArray(vao);
}
void Unbind(){
GL.BindVertexArray(0);
}
void FillVBO(Mesh mesh){
Dispose();
GL.GenVertexArrays(1, out vao);
GL.GenBuffers(1, out vbo);
float[] data = new float[mesh.triangles.Length * 24];
//your for loop
GL.BindVertexArray(vao);
GL.BindBuffer(BufferTarget.ArrayBuffer, vbo);
GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(triangles.Length * 96), data, BufferUsageHint.StaticDraw);
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, 0, 0);
GL.VertexAttribPointer(1, 3, VertexAttribPointerType.Float, 0, triangles.Length * 9*4);
GL.VertexAttribPointer(2, 3, VertexAttribPointerType.Float, 0, triangles.Length * 18*4);
GL.BindVertexArray(0);
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
}
}
Then to draw you just bind the MeshBuffer and load the transformation matrix into the relevant uniform.
int mvpMat = GL.GetUniformLocation(prog, "mvp");
GL.UseProgram(prog);
meshBuffer.Bind();
GL.UniformMatrix4(mvpMat, transform.Mat4());
GL.DrawArrays(GL_TRIANGLES, 0, meshBuffer.numVertices);
I'm having a problem with writing to files using lock bits. I'm working on an edge detection software which has a strange distortion effect with most images. I've tried to isolate the problem, and it seems very random. It is not associated with format, but rather the only images that seem to work are pictures made for desktop wallpapers, and I don't really know why. I only switched to writing to files using lockbits recently, so I am sure the problem is with that (there were no problems when I was reading with lockbits and writing with set pixel). Here's a screenshot of the effect:
As you can see, the edge detection works, but the image is distorted horizontally, making the image into a parallelogram.
Here's a code snippet of the method that handles all this (in C#):
private void analyze()
{
//When the analyze button is pressed
percentageInt = float.Parse(textBox1.Text);
float scale = 1;
if (comboBox1.SelectedItem == "Auto")
{
scale = pic.Width / pictureBox1.Width;
}
else if (comboBox1.SelectedItem == "1/2")
{
scale = 2;
}
else if (comboBox1.SelectedItem == "1/4")
{
scale = 4;
}
else if (comboBox1.SelectedItem == "Original")
{
scale = 1;
}
else
{
scale = pic.Width / pictureBox1.Width;
}
int tempWidth = 1;
int tempHeight = 1;
if (scale >= 1)
{
tempWidth = (int)Math.Floor(pic.Width / scale);
tempHeight = (int)Math.Floor(pic.Height / scale);
}
else
{
tempWidth = pic.Width;
tempHeight = pic.Height;
}
width = pic.Width;
height = pic.Height;
edgeData = new Boolean[pic.Width, pic.Height];
img = (Bitmap)resizeImage(pic, new Size(tempWidth, tempHeight));
pic2 = new Bitmap(tempWidth, tempHeight);
Bitmap img2 = (Bitmap)pic2;
Color[] pixels = null;
BitmapData data = img.LockBits(new Rectangle(0, 0, img.Width, img.Height),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
int scaledPercent = (int)(Math.Round(percentageInt * 255));
Debug.WriteLine("percent " + scaledPercent);
unsafe
{
Debug.WriteLine("Woah there, unsafe stuff");
byte* prevLine = (byte*)data.Scan0;
byte* currLine = prevLine + data.Stride;
byte* nextLine = currLine + data.Stride;
for (int y = 1; y < img.Height - 1; y++)
{
byte* pp = prevLine + 3;
byte* cp = currLine + 3;
byte* np = nextLine + 3;
for (int x = 1; x < img.Width - 1; x++)
{
if (IsEdgeOptimized(pp, cp, np, scaledPercent))
{
edgeData[x, y] = true;
//Debug.WriteLine("x " + x + "y " + y);
//img2.SetPixel(x, y, Color.Black);
//bytes[(y * img.Width + x) * 3 + 2] = 255;
}
else
{
bytes[(y * img.Width + x) * 3] = 255;
bytes[(y * img.Width + x) * 3 + 1] = 255;
bytes[(y * img.Width + x) * 3 + 2] = 255;
//img2.SetPixel(x, y, Color.White);
}
pp += 3; cp += 3; np += 3;
}
prevLine = currLine;
currLine = nextLine;
nextLine += data.Stride;
}
}
System.Runtime.InteropServices.Marshal.Copy(bytes, 0, data.Scan0, size);
img.UnlockBits(data);
pictureBox2.Image = img;
} // end analyze
So what is causing the problem, and how can I fix it? If you need more details, feel free to comment.
You're initializing your bytes buffer with stride x height bytes:
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
But then using the width (instead of stride) when you write to it:
bytes[(y * img.Width + x) * 3] = 255;