I try to create a 16 bit bmp / jpg file using following code:
public static void CreateBitmap_Rgb48(int width, int height, double dpiX, double dpiY, string fn)
{
int bytesperpixel = 6; // BytesPerChannel = 2,ChannelCount = 3 (bgr)
int channelCount = 3;
int stride = width * bytesperpixel;
byte[] imgdata = new byte[width * height * bytesperpixel];
int rectDim = 40;
ushort[] intPixelData = new ushort[width * height * channelCount];
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width * channelCount; col += channelCount)
{
if (((col / channelCount / rectDim) % 2) != ((row / rectDim) % 2))
{
intPixelData[row * width * channelCount + col + 0] = 0x0000;
intPixelData[row * width * channelCount + col + 1] = 0x0000;
intPixelData[row * width * channelCount + col + 2] = 0xffff;
}
else
{
intPixelData[row * width * channelCount + col + 0] = 0x0000;
intPixelData[row * width * channelCount + col + 1] = 0xffff;
intPixelData[row * width * channelCount + col + 2] = 0x0000;
}
}
}
Buffer.BlockCopy(intPixelData, 0, imgdata, 0, imgdata.Length);
// compose the BitmapImage
var image = BitmapSource.Create(width, height, dpiX, dpiY, PixelFormats.Rgb48, null, imgdata, stride);
BmpBitmapEncoder encoder = new BmpBitmapEncoder();
encoder.Frames.Add(BitmapFrame.Create(image));
using (var fileStream = new FileStream(fn, FileMode.Create))
{
encoder.Save(fileStream);
}
}
Similarly, for jpeg, I used:
JpegBitmapEncoder encoder = new JpegBitmapEncoder();
But the file generated size is wrong and BmpBitmapDecoder show the format is default for bmp and rgb24 for jpg. they are not rgb48. what is wrong?
UPDATE as on 12 Nov 2015
I used PanoTools plugin with Photoshop and Hugin and played with all those parameters. End up i found the parameters for projection, HFOV and image output size that fulfill my lowest requirement.
Parameteres:
Processed Output:
My question is then how can i convert all these parameters and values into C# algorithm coding so that when I provide the original image, i will get the corrected output image?
Thanks a lot.
I have a square image captured from a circular fisheye camera. The size is 2650 * 2650 pixels.
Now, i will need to programmatically dewarp the image to a flat panorama image using C# language.
I had look around from internet with different algorithm example from Link for code below , Link1 and Link2 but just can't make it success. My maths sincerely sucks and can't help me with that. Hopefully someone able to guide me through this.
Thanks a lot.
Example of image output from the camera:
--Image grabbed from Wikipedia Fisheye Lens & size modified to fit my sample pixel.
The code i tried to dewarp it but no luck:
Bitmap sourceImage = (Bitmap)Bitmap.FromFile("circularfisheye.jpg");
double factor = 0.5;
Boolean autoCrop = false;
Color backgroundColor = Color.White;
Bitmap StartImage = null;
BitmapData srcBitmapData = null;
Byte[] srcPixels = null;
Byte[] dstPixels = null;
Bitmap NewImage = null;
BitmapData dstBitmapData = null;
try
{
// Checks whether bpp ( Bits Per Pixel ) is 8 , 24, or 32
int Depth = System.Drawing.Bitmap.GetPixelFormatSize(sourceImage.PixelFormat);
if (Depth != 8 && Depth != 24 && Depth != 32)
{
throw new ArgumentException("Only 8, 24 and 32 bpp images are supported.");
}
// Retrieves the count of the color components
int cCount = Depth / 8;
Size baseSize = new Size(sourceImage.Width, sourceImage.Height);
// check if a low image resize and need to improve the quality
// and not generate image aliasing
Int32 maxSize = Math.Max(sourceImage.Width, sourceImage.Height);
if (maxSize < 3000)
{
float percent = 3000F / (float)maxSize;
baseSize = new Size((Int32)((float)sourceImage.Width * percent), (Int32)((float)sourceImage.Height * percent));
}
StartImage = new Bitmap(baseSize.Width, baseSize.Height, sourceImage.PixelFormat);
StartImage.SetResolution(sourceImage.HorizontalResolution, sourceImage.VerticalResolution);
// Create the drawing object and white background
Graphics g = Graphics.FromImage(StartImage);
g.SmoothingMode = SmoothingMode.AntiAlias;
g.InterpolationMode = InterpolationMode.HighQualityBicubic;
g.PixelOffsetMode = PixelOffsetMode.HighQuality;
g.DrawImage(sourceImage, new Rectangle(-1, -1, baseSize.Width + 1, baseSize.Height + 1), 0, 0, sourceImage.Width, sourceImage.Height, GraphicsUnit.Pixel);
g.Dispose();
// Locks the source image and copies it to the byte array and releases the source image
srcBitmapData = StartImage.LockBits(new Rectangle(0, 0, StartImage.Width, StartImage.Height), ImageLockMode.ReadOnly, StartImage.PixelFormat);
srcPixels = new byte[StartImage.Width * StartImage.Height * (Depth / 8)];
Marshal.Copy(srcBitmapData.Scan0, srcPixels, 0, srcPixels.Length);
StartImage.UnlockBits(srcBitmapData);
srcBitmapData = null;
// Create the target image byte array
dstPixels = new Byte[srcPixels.Length];
// Fill the entire frame with the selected background color
Int32 index = ((1 * StartImage.Width) + 1) * cCount; //index = ((Y * Width) + X) * cCount
do
{
if (Depth == 32) //For 32 bpp defines Red , Green, Blue and Alpha
{
dstPixels[index++] = backgroundColor.B;
dstPixels[index++] = backgroundColor.G;
dstPixels[index++] = backgroundColor.R;
dstPixels[index++] = backgroundColor.A; // a
}
if (Depth == 24) //For 24 bpp defines Red , Green and Blue
{
dstPixels[index++] = backgroundColor.B;
dstPixels[index++] = backgroundColor.G;
dstPixels[index++] = backgroundColor.R;
}
if (Depth == 8)
// For 8 bpp defines the value of color ( Red , Green and Blue to be the same thing)
{
dstPixels[index++] = backgroundColor.B;
}
} while (index < srcPixels.Length);
// Calculate the maximum possible extent for the image and multiply by the desired factor
double amp = 0;
double ang = Math.PI * 0.5;
for (Int32 a = 0; a < StartImage.Height; a++)
{
int y = (int)((StartImage.Height / 2) - amp * Math.Sin(ang));
if ((y < 0) || (y > StartImage.Height))
break;
amp = a;
}
amp = (amp - 2) * (factor < -1 ? -1 : (factor > 1 ? 1 : factor));
// Define variables that calculates the cutoff points (if any)
Int32 x1, y1, x2, y2;
x1 = StartImage.Width;
y1 = StartImage.Height;
x2 = 0;
y2 = 0;
// Copy pixel by pixel for the new positions
index = ((1 * StartImage.Width) + 1) * cCount;
do
{
Int32 y = (Int32)((index / cCount) / StartImage.Width);
Int32 x = (index / cCount) - (y * StartImage.Width);
Point pt = NewPoint(new Point(x, y), StartImage.Width, StartImage.Height, amp, factor < 0);
//Values for crop
if (factor >= 0)
{
if (x == StartImage.Width / 2)
{
if (pt.Y < y1)
y1 = pt.Y;
if (pt.Y > y2)
y2 = pt.Y;
}
if (y == StartImage.Height / 2)
{
if (pt.X < x1)
x1 = pt.X;
if (pt.X > x2)
x2 = pt.X;
}
}
else
{
if ((x == 1) && (y == 1))
{
y1 = pt.Y;
x1 = pt.X;
}
if ((x == StartImage.Width - 1) && (y == StartImage.Height - 1))
{
y2 = pt.Y;
x2 = pt.X;
}
}
//Bytes Index which will apply the pixel
Int32 dstIndex = ((pt.Y * StartImage.Width) + pt.X) * cCount;
if (Depth == 32)
{
dstPixels[dstIndex] = srcPixels[index++];
dstPixels[dstIndex + 1] = srcPixels[index++];
dstPixels[dstIndex + 2] = srcPixels[index++];
dstPixels[dstIndex + 3] = srcPixels[index++]; // a
}
if (Depth == 24)
{
dstPixels[dstIndex] = srcPixels[index++];
dstPixels[dstIndex + 1] = srcPixels[index++];
dstPixels[dstIndex + 2] = srcPixels[index++];
}
if (Depth == 8)
{
dstPixels[dstIndex] = srcPixels[index++];
}
} while (index < srcPixels.Length);
//Creates a new image based on the byte array previously created
NewImage = new Bitmap(StartImage.Width, StartImage.Height, StartImage.PixelFormat);
NewImage.SetResolution(StartImage.HorizontalResolution, StartImage.VerticalResolution);
dstBitmapData = NewImage.LockBits(new Rectangle(0, 0, StartImage.Width, StartImage.Height), ImageLockMode.WriteOnly, StartImage.PixelFormat);
Marshal.Copy(dstPixels, 0, dstBitmapData.Scan0, dstPixels.Length);
NewImage.UnlockBits(dstBitmapData);
//Generates the final image to crop or resize the real coo
Bitmap FinalImage = new Bitmap(sourceImage.Width + 1, sourceImage.Height, StartImage.PixelFormat);
NewImage.SetResolution(StartImage.HorizontalResolution, StartImage.VerticalResolution);
Graphics g1 = Graphics.FromImage(FinalImage);
g1.SmoothingMode = SmoothingMode.AntiAlias;
g1.InterpolationMode = InterpolationMode.HighQualityBicubic;
g1.PixelOffsetMode = PixelOffsetMode.HighQuality;
//Performs the cut if enabled automatic cutting and there is need to cut
if ((autoCrop) && ((x1 > 0) || (y1 > 0) || (x2 < NewImage.Height) || (y2 < NewImage.Height)))
{
Rectangle cropRect = new Rectangle(x1, y1, x2 - x1, y2 - y1);
g1.DrawImage(NewImage, new Rectangle(-1, -1, FinalImage.Width + 1, FinalImage.Height + 1), cropRect.X, cropRect.Y, cropRect.Width, cropRect.Height, GraphicsUnit.Pixel);
}
else
{
g1.DrawImage(NewImage, new Rectangle(-1, -1, FinalImage.Width + 1, FinalImage.Height + 1), 0, 0, NewImage.Width, NewImage.Height, GraphicsUnit.Pixel);
}
g1.Dispose();
g1 = null;
NewImage = null;
FinalImage.Save("output.jpg");
FinalImage.Dispose();
}
finally
{
srcBitmapData = null;
srcPixels = null;
dstPixels = null;
dstBitmapData = null;
}
Such a distortion as a symmetry of revolution.
In polar coordinates, with the pole at the center of the image, it is expressed as
r' = f(r)
Θ' = Θ
where the quote indicates the distorted coordinates. The function f is unknown and should be measured empirically, by calibration (looking at a regular target).
To correct the image, you need to invert the function f and apply the reverse transform to the image. In fact, it is easier to measure g directly by calibration. As a starting approximation, a simple model like
r = r' + a.r'³
can do.
Most probably you don't have a picture of a grid taken with the same lens. Your last resort is to implement the undistortion function with adjustable parameters, and optimize these by trial and error.
It should also be possible to derive the calibration curve by looking at the deformation of straight lines, but this is more "technical".
In Cartesian coordinates, you can express the correction transform as
x = g(r').x'/r'
y = g(r').y'/r'
where r' = √x'²+y'².
Use the algorithm from here:
http://www.helviojunior.com.br/fotografia/barrel-and-pincushion-distortion/
It worked for me
I've made some revamp to the HelvioJunior's library (that was linked by #Tarek.Mh), I think this may suit your need:
Below, the code:
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using static System.Math;
namespace HelvioJunior
{
//https://www.helviojunior.com.br/fotografia/barrel-and-pincushion-distortion/
public class Program
{
private static void Main(string[] args)
{
Bitmap source = (Bitmap)Image.FromFile(#"JpwX0.png");
Bitmap bmp = BarrelDistortion(source, 4/10f, true);
bmp.Save(#"test.png");
bmp.Dispose();
source.Dispose();
}
static public Bitmap BarrelDistortion(Bitmap sourceImage, double factor = 0, bool autoCrop = true, uint previewRectangleWidth = 0, Color? fillerColor = null)
{
int sourceRight = sourceImage.Width - 1, sourceBottom = sourceImage.Height - 1;
// Vertical amplitude is half the height times factor
// Horizontal amplitude is missing ; vertical amplitude's applied to both directions
double amp = sourceBottom / 2f * factor;
// Inner shrinking area points
RePoint[] lPts;
bool inverse = factor < 0;
// Shrinking area coordinates (center point is considered always available)
double x1 = sourceRight / 2f,
y1 = sourceBottom / 2f,
x2 = sourceRight / 2f,
y2 = sourceBottom / 2f;
if (inverse)
{
lPts = new RePoint[]
{
new RePoint(0, 0),
new RePoint(0, sourceBottom),
new RePoint(sourceRight, sourceBottom),
new RePoint(sourceRight, 0)
};
}
else
{
lPts = new RePoint[]
{
new RePoint(sourceRight * 1 / 2f, 0),
new RePoint(0, sourceBottom * 1 / 2f),
new RePoint(sourceRight, sourceBottom * 1 / 2f),
new RePoint(sourceRight * 1 / 2f, sourceBottom)
};
}
foreach (var pN in lPts.Select(pt => NewPoint(pt, sourceImage.Width, sourceImage.Height, amp, inverse)))
{
if (pN.Y < y1) y1 = pN.Y;
if (pN.Y > y2) y2 = pN.Y;
if (pN.X < x1) x1 = pN.X;
if (pN.X > x2) x2 = pN.X;
}
// Bytes per color from bit per pixel (bpp) format
int bpcCount = Image.GetPixelFormatSize(sourceImage.PixelFormat) / 8;
Rectangle sourceRectangle = new Rectangle(0, 0, sourceImage.Width, sourceImage.Height);
int srcLength = sourceImage.Width * sourceImage.Height * bpcCount;
// Gets sourceImage byte array as srcpixels
BitmapData srcBitmapData = sourceImage.LockBits(sourceRectangle, ImageLockMode.ReadOnly, sourceImage.PixelFormat);
byte[] srcPixels = new byte[srcLength];
Marshal.Copy(srcBitmapData.Scan0, srcPixels, 0, srcLength);
sourceImage.UnlockBits(srcBitmapData);
srcBitmapData = null;
// Destination byte array preparation as dstPixels
byte[] dstPixels = new byte[srcLength];
int dstIndex = 0;
// Filler color preparation
Color fillColor = fillerColor ?? Color.Transparent;
if (!autoCrop)
{
if (bpcCount <= 4) // Depth > 32bpp may not work as expected, filler color's not applied for bit safety reason
do
{
dstPixels[dstIndex++] = fillColor.B;
if (bpcCount > 1)
{
dstPixels[dstIndex++] = fillColor.G;
dstPixels[dstIndex++] = fillColor.R;
if (bpcCount > 3)
dstPixels[dstIndex++] = fillColor.A; // a
}
} while (dstIndex < srcLength);
}
// Byte-to-byte copy (incl. Point transformation)
int index = 0, srcBpcLength = srcLength - bpcCount;
do
{
int comp = index / bpcCount; // comp yields the current "pixel" position
int y = comp / sourceImage.Width; // Each line is sourceImage.Width bytes wide
int x = comp - (y * sourceImage.Width); // Remaining (comp - lines) bytes is target column (ranges from 0 to width - 1)
// Destination "pixel"
RePoint pt = NewPoint(new RePoint(x, y), sourceImage.Width, sourceImage.Height, amp, inverse);
dstIndex = (((int)pt.Y * sourceImage.Width) + (int)pt.X) * bpcCount; // dstIndex++ overflows when |amp| >= 2
if (dstIndex >= 0 && dstIndex <= srcBpcLength)
for (int i = 0; i++ < bpcCount;)
dstPixels[dstIndex++] = srcPixels[index++];
else
index += bpcCount;
} while (index < srcLength);
srcPixels = null;
// Destination bytes application
BitmapData dstBitmapData = sourceImage.LockBits(sourceRectangle, ImageLockMode.WriteOnly, sourceImage.PixelFormat);
Marshal.Copy(dstPixels, 0, dstBitmapData.Scan0, srcLength);
sourceImage.UnlockBits(dstBitmapData);
dstBitmapData = null;
dstPixels = null;
// Final Image area
Rectangle cropRect = new Rectangle((int)Ceiling(x1), (int)Ceiling(y1), (int)Ceiling(x2 - x1), (int)Ceiling(y2 - y1));
Rectangle destRectangle = autoCrop ? cropRect : sourceRectangle;
// Final image preparation
Bitmap FinalImage = new Bitmap(destRectangle.Width, destRectangle.Height, sourceImage.PixelFormat);
FinalImage.SetResolution(sourceImage.HorizontalResolution, sourceImage.VerticalResolution);
Graphics g1 = Graphics.FromImage(FinalImage);
g1.DrawImage(sourceImage, -destRectangle.X, -destRectangle.Y);
// Previsualization rectangle
if (previewRectangleWidth > 0)
g1.DrawRectangle(new Pen(Color.Red, previewRectangleWidth), cropRect.X - 1, cropRect.Y - 1, cropRect.Width + previewRectangleWidth, cropRect.Height + previewRectangleWidth);
g1.Dispose();
g1 = null;
return FinalImage;
}
private static RePoint NewPoint(RePoint aP, double Width, double Height, double Amplitude, bool inverse)
{
double h = aP.Y / (Height - 1);
double w = aP.X / (Width - 1);
// Works ok for [0/2] to [1/2]
// Floating point error(s) here, in the range of ]1/2] to [2/2] (No workaround found)
double sinX = Round(Sin(PI * w), 15); // Range of [0] to [1] * PI ; result ranges from 0 (far from center) to 1 (at center)
double sinY = Round(Sin(PI * h), 15);
double caX = Amplitude * (1 - 2 * w);
double caY = Amplitude * (1 - 2 * h);
double aY = 0, aX = 0;
if (inverse)
{
aX = -caX;
aY = -caY;
}
double pY = aP.Y + aY + caY * sinX;
double pX = aP.X + aX + caX * sinY;
return new RePoint(pX, pY);
}
private struct RePoint
{
public double X;
public double Y;
public RePoint(double x, double y)
{
X = x;
Y = y;
}
}
}
}
I'm currently working on Tracking Object by Color on my WebCam, and i have it going so far but i want to add an option to draw multiple Objects. Until now it only Draws a Rectangle around the biggest Object.
BlobCounter blobcounter = new BlobCounter();
blobcounter.MinHeight = 100;
blobcounter.MinWidth = 100;
blobcounter.ObjectsOrder = ObjectsOrder.Size;
blobcounter.ProcessImage(grayImage);
Rectangle[] rects = blobcounter.GetObjectsRectangles();
if (checkBox1.Checked == false)
{
if (rects.Length > 0)
{
Rectangle objectRect1 = rects[0];
Graphics g = Graphics.FromImage(video);
using (Pen pen = new Pen(Color.LightGreen, 3))
{
g.DrawRectangle(pen, objectRect1);
PointF drawPoin = new PointF(objectRect1.X, objectRect1.Y);
int objectX = objectRect1.X + objectRect1.Width / 2 - video.Width / 2;
int objectY = video.Height / 2 - (objectRect1.Y + objectRect1.Height / 2);
PointF drawPoin2 = new PointF(objectRect1.X, objectRect1.Y + objectRect1.Height + 4);
String Blobinformation = "X= " + objectX.ToString() + " Y= " + objectY.ToString() + "\nSize=" + objectRect1.Width + ", " + objectRect1.Height;
g.DrawString(Blobinformation, new Font("Arial", 12), new SolidBrush(Color.LightSkyBlue), drawPoin2);
}
g.Dispose();
}
}
else
{
??????????
}
Adding a simple foreach loop should suffice. I don't know how efficient the drawing is, but I'm almost certain it wont be a problem with a few rectangles.
else
{
if (rects.Length > 0)
{
foreach (Rectangle objectRect in rects)
{
Graphics g = Graphics.FromImage(video);
using (Pen pen = new Pen(Color.LightGreen, 3))
{
g.DrawRectangle(pen, objectRect);
PointF drawPoin = new PointF(objectRect.X, objectRect.Y);
int objectX = objectRect.X + objectRect.Width / 2 - video.Width / 2;
int objectY = video.Height / 2 - (objectRect.Y + objectRect.Height / 2);
PointF drawPoin2 = new PointF(objectRect.X, objectRect.Y + objectRect.Height + 4);
String Blobinformation = "X= " + objectX.ToString() + " Y= " + objectY.ToString() + "\nSize=" + objectRect.Width + ", " + objectRect.Height;
g.DrawString(Blobinformation, new Font("Arial", 12), new SolidBrush(Color.LightSkyBlue), drawPoin2);
}
g.Dispose();
}
}
}
I'm having a problem with writing to files using lock bits. I'm working on an edge detection software which has a strange distortion effect with most images. I've tried to isolate the problem, and it seems very random. It is not associated with format, but rather the only images that seem to work are pictures made for desktop wallpapers, and I don't really know why. I only switched to writing to files using lockbits recently, so I am sure the problem is with that (there were no problems when I was reading with lockbits and writing with set pixel). Here's a screenshot of the effect:
As you can see, the edge detection works, but the image is distorted horizontally, making the image into a parallelogram.
Here's a code snippet of the method that handles all this (in C#):
private void analyze()
{
//When the analyze button is pressed
percentageInt = float.Parse(textBox1.Text);
float scale = 1;
if (comboBox1.SelectedItem == "Auto")
{
scale = pic.Width / pictureBox1.Width;
}
else if (comboBox1.SelectedItem == "1/2")
{
scale = 2;
}
else if (comboBox1.SelectedItem == "1/4")
{
scale = 4;
}
else if (comboBox1.SelectedItem == "Original")
{
scale = 1;
}
else
{
scale = pic.Width / pictureBox1.Width;
}
int tempWidth = 1;
int tempHeight = 1;
if (scale >= 1)
{
tempWidth = (int)Math.Floor(pic.Width / scale);
tempHeight = (int)Math.Floor(pic.Height / scale);
}
else
{
tempWidth = pic.Width;
tempHeight = pic.Height;
}
width = pic.Width;
height = pic.Height;
edgeData = new Boolean[pic.Width, pic.Height];
img = (Bitmap)resizeImage(pic, new Size(tempWidth, tempHeight));
pic2 = new Bitmap(tempWidth, tempHeight);
Bitmap img2 = (Bitmap)pic2;
Color[] pixels = null;
BitmapData data = img.LockBits(new Rectangle(0, 0, img.Width, img.Height),
ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
int scaledPercent = (int)(Math.Round(percentageInt * 255));
Debug.WriteLine("percent " + scaledPercent);
unsafe
{
Debug.WriteLine("Woah there, unsafe stuff");
byte* prevLine = (byte*)data.Scan0;
byte* currLine = prevLine + data.Stride;
byte* nextLine = currLine + data.Stride;
for (int y = 1; y < img.Height - 1; y++)
{
byte* pp = prevLine + 3;
byte* cp = currLine + 3;
byte* np = nextLine + 3;
for (int x = 1; x < img.Width - 1; x++)
{
if (IsEdgeOptimized(pp, cp, np, scaledPercent))
{
edgeData[x, y] = true;
//Debug.WriteLine("x " + x + "y " + y);
//img2.SetPixel(x, y, Color.Black);
//bytes[(y * img.Width + x) * 3 + 2] = 255;
}
else
{
bytes[(y * img.Width + x) * 3] = 255;
bytes[(y * img.Width + x) * 3 + 1] = 255;
bytes[(y * img.Width + x) * 3 + 2] = 255;
//img2.SetPixel(x, y, Color.White);
}
pp += 3; cp += 3; np += 3;
}
prevLine = currLine;
currLine = nextLine;
nextLine += data.Stride;
}
}
System.Runtime.InteropServices.Marshal.Copy(bytes, 0, data.Scan0, size);
img.UnlockBits(data);
pictureBox2.Image = img;
} // end analyze
So what is causing the problem, and how can I fix it? If you need more details, feel free to comment.
You're initializing your bytes buffer with stride x height bytes:
int size = Math.Abs(data.Stride) * img.Height;
Byte[] bytes = new byte[size];
But then using the width (instead of stride) when you write to it:
bytes[(y * img.Width + x) * 3] = 255;
Hi I have this method for a project I am doing with the Kinect. Unfortunately the code is the beta version and I need to update it to the 1.5 sdk version. I tried a few things but they wont work. Here is what I have to so far. The method is called nui_DepthFrameReady.
void nui_DepthFrameReady(object sender, ImageFrameReadyEventArgs e)
{
if (!savedDepth)
{
PlanarImage Image = e.ImageFrame.Image;
byte[] convertedDepthFrame = convertDepthFrame(Image.Bits);
depth.Source = BitmapSource.Create(
Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * 4);
++totalFrames;
DateTime cur = DateTime.Now;
if (cur.Subtract(lastTime) > TimeSpan.FromSeconds(1))
{
int frameDiff = totalFrames - lastFrames;
lastFrames = totalFrames;
lastTime = cur;
frameRate.Text = frameDiff.ToString() + " fps";
}
if (subscribed)
{
//byte[] ColoredBytes = GenerateColoredBytes(e.ImageFrame);
//create an image based on the colored bytes
BitmapSource myBitmapDepth = BitmapSource.Create(Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * PixelFormats.Bgr32.BitsPerPixel / 8);
string imageFilePath = #"C:\Temp\kinect\depth\bmpDepthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".png";
string dataFilePath = #"C:\Temp\kinect\depth\depthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".dat";
//savePngFrame(myBitmapDepth, imageFilePath);
//Crop frame to size 180x240
byte[] croppedDepthFrame = new byte[180 * 240 * 2];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j < 180 * 2; j += 2)
{
croppedDepthFrame[i * 180 * 2 + j] = Image.Bits[i * 320 * 2 + j + 69 * 2];
croppedDepthFrame[i * 180 * 2 + j + 1] = Image.Bits[i * 320 * 2 + j + 1 + 69 * 2];
//Console.Write((i * 180 * 2 + j) + "-" + (i * 180 * 2 + j + 69*2) +", ");
}
//Console.WriteLine();
}
FileStream fs = new FileStream(dataFilePath, FileMode.Create);
fs.Write(croppedDepthFrame, 0, croppedDepthFrame.Length);
fs.Close();
}
savedDepth = true;
}
}
Thank You for the help.
This is what I have so far
void nui_DepthImageReady(object sender, DepthImageFrameReadyEventArgs e)
{
if (!savedDepth)
{
short[] pixelData;
bool receivedData = false;
using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
{
if (depthImageFrame != null)
{
if (pixelData == null) //allocate the first time
{
pixelData = new short[depthImageFrame.PixelDataLength];
}
depthImageFrame.CopyPixelDataTo(pixelData);
receivedData = true;
}
else
{
// apps processing of image data took too long; it got more than 2 frames behind.
// the data is no longer avabilable.
}
}
if (receivedData)
{
byte[] convertedDepthFrame = convertDepthFrame(Image.bits);
depth.Source = BitmapSource.Create(Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * 4);
++totalFrames;
DateTime cur = DateTime.Now;
if (cur.Subtract(lastTime) > TimeSpan.FromSeconds(1))
{
int frameDiff = totalFrames - lastFrames;
lastFrames = totalFrames;
lastTime = cur;
frameRate.Text = frameDiff.ToString() + " fps";
}
if (subscribed)
{
//byte[] ColoredBytes = GenerateColoredBytes(e.ImageFrame);
//create an image based on the colored bytes
BitmapSource myBitmapDepth = BitmapSource.Create(Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, convertedDepthFrame, Image.Width * PixelFormats.Bgr32.BitsPerPixel / 8);
string imageFilePath = #"C:\Temp\kinect\depth\bmpDepthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".png";
string dataFilePath = #"C:\Temp\kinect\depth\depthFrame_" + cur.TimeOfDay.ToString().Replace(':', '.') + ".dat";
//savePngFrame(myBitmapDepth, imageFilePath);
//Crop frame to size 180x240
byte[] croppedDepthFrame = new byte[180 * 240 * 2];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j < 180 * 2; j += 2)
{
croppedDepthFrame[i * 180 * 2 + j] = Image.Bits[i * 320 * 2 + j + 69 * 2];
croppedDepthFrame[i * 180 * 2 + j + 1] = Image.Bits[i * 320 * 2 + j + 1 + 69 * 2];
//Console.Write((i * 180 * 2 + j) + "-" + (i * 180 * 2 + j + 69*2) +", ");
}
//Console.WriteLine();
}
FileStream fs = new FileStream(dataFilePath, FileMode.Create);
fs.Write(croppedDepthFrame, 0, croppedDepthFrame.Length);
fs.Close();
}
}
savedDepth = true;
}
}
Image.Bits has no definition in the new SDK for kinect or Image.Width, Image.Height
These are the errors that are happening so I dont know how to convert the code to get the same info.
I'm comparing your sample to a sample in the Kinect 1.5 SDK samples, and it appears that what you've called Image may be the same as pixelData. I don't see where Image was defined though, so I'm guessing. Does this help?