I am using Kinect v2 and have a small program that only shows body and color streams but the stream stops sending frames after fetching just 3 frames. Here is the code:
_sensor = KinectSensor.GetDefault();
if (_sensor != null)
{
_sensor.Open();
_reader = _sensor.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth | FrameSourceTypes.Infrared | FrameSourceTypes.Body);
_reader.MultiSourceFrameArrived += Reader_MultiSourceFrameArrived;
}
and here is how I am getting frames
Console.WriteLine("==== FRAME FOUND ====");
var reference = e.FrameReference.AcquireFrame();
// Body
using (var frame = reference.ColorFrameReference.AcquireFrame())
{
if (frame != null)
{
//stream.Children.Clear();
var c_frame = reference.ColorFrameReference.AcquireFrame();
ImageBrush ib = new ImageBrush();
Image im = new Image();
rgb.Source = frame.ToBitmap();
var b_frame = reference.BodyFrameReference.AcquireFrame();
_bodies = new Body[b_frame.BodyFrameSource.BodyCount];
b_frame.GetAndRefreshBodyData(_bodies);
if (_bodies[0].IsTracked)
{
stream.DrawSkeleton(_bodies[0]);
if (recording)
{
recorder.RecordFrame(_bodies[0]);
}
}
b_frame.GetAndRefreshBodyData(_bodies);
}
}
Most probably your build target is set to 32 bit CPU. Set it to 64 bit.
Related
I am processing frames received from Kinect v2 (Color and IR) in UWP. The program runs on remote machine (XBOX One S). The main goal is to get frames and write them to the disk with 30 fps for Color and IR to later process them further.
I am using the following code to check the frame rate:
public MainPage()
{
this.InitialiseFrameReader(); // initialises MediaCapture for IR and Color
}
const int COLOR_SOURCE = 0;
const int IR_SOURCE = 1;
private async void InitialiseFrameReader()
{
await CleanupMediaCaptureAsync();
var allGroups = await MediaFrameSourceGroup.FindAllAsync();
if (allGroups.Count == 0)
{
return;
}
_groupSelectionIndex = (_groupSelectionIndex + 1) % allGroups.Count;
var selectedGroup = allGroups[_groupSelectionIndex];
var kinectGroup = selectedGroup;
try
{
await InitializeMediaCaptureAsync(kinectGroup);
}
catch (Exception exception)
{
_logger.Log($"MediaCapture initialization error: {exception.Message}");
await CleanupMediaCaptureAsync();
return;
}
// Set up frame readers, register event handlers and start streaming.
var startedKinds = new HashSet<MediaFrameSourceKind>();
foreach (MediaFrameSource source in _mediaCapture.FrameSources.Values.Where(x => x.Info.SourceKind == MediaFrameSourceKind.Color || x.Info.SourceKind == MediaFrameSourceKind.Infrared)) //
{
MediaFrameSourceKind kind = source.Info.SourceKind;
MediaFrameSource frameSource = null;
int frameindex = COLOR_SOURCE;
if (kind == MediaFrameSourceKind.Infrared)
{
frameindex = IR_SOURCE;
}
// Ignore this source if we already have a source of this kind.
if (startedKinds.Contains(kind))
{
continue;
}
MediaFrameSourceInfo frameInfo = kinectGroup.SourceInfos[frameindex];
if (_mediaCapture.FrameSources.TryGetValue(frameInfo.Id, out frameSource))
{
// Create a frameReader based on the source stream
MediaFrameReader frameReader = await _mediaCapture.CreateFrameReaderAsync(frameSource);
frameReader.FrameArrived += FrameReader_FrameArrived;
_sourceReaders.Add(frameReader);
MediaFrameReaderStartStatus status = await frameReader.StartAsync();
if (status == MediaFrameReaderStartStatus.Success)
{
startedKinds.Add(kind);
}
}
}
}
private async Task InitializeMediaCaptureAsync(MediaFrameSourceGroup sourceGroup)
{
if (_mediaCapture != null)
{
return;
}
// Initialize mediacapture with the source group.
_mediaCapture = new MediaCapture();
var settings = new MediaCaptureInitializationSettings
{
SourceGroup = sourceGroup,
SharingMode = MediaCaptureSharingMode.SharedReadOnly,
StreamingCaptureMode = StreamingCaptureMode.Video,
MemoryPreference = MediaCaptureMemoryPreference.Cpu
};
await _mediaCapture.InitializeAsync(settings);
}
private void FrameReader_FrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
{
using (var frame = sender.TryAcquireLatestFrame())
{
if (frame != null)
{
//Settings.cameraframeQueue.Enqueue(null, frame.SourceKind.ToString(), frame.SystemRelativeTime.Value); //Add to Queue to process frame
Debug.WriteLine(frame.SourceKind.ToString() + " : " + frame.SystemRelativeTime.ToString());
}
}
}
I am trying to debug the application to check the frame rate so I have removed further processing.
I am not sure if I am not calculating it properly or something else is wrong.
For example, System Relative Time from 04:37:06 to 04:37:48 gives :
IR:
Fps(Occurrence)
31(1)
30(36)
29(18)
28(4)
Color:
Fps(Occurrence)
30(38)
29(18)
28(3)
I want this frame rate to be constant (30 fps) and aligned so IR and Color and same number of frames for that time.
This does not include any additional code. As soon as I have a process queue or any sort of code, the fps decreases and ranges from 15 to 30.
Can anyone please help me with this?
Thank you.
UPDATE:
After some testing and working around, it has come to my notice that PC produces 30fps but XBOX One (remote device) on debug mode produces very low fps. This does however improve when running it on release mode but the memory allocated for UWP apps is quite low.
https://learn.microsoft.com/en-us/windows/uwp/xbox-apps/system-resource-allocation
XBOX One has maximum available memory of 1 GB for Apps and 5 for Games.
https://learn.microsoft.com/en-us/windows/uwp/xbox-apps/system-resource-allocation
While in PC the fps is 30 (as the memory has no such restrictions).
This causes the frame rate to drop. However, the fps did improve when running it on release mode or published to MS Store.
I'm trying to add noise to my image and show it in a pixturebox,then blur it and show it in another picturebox too. But i see two blurred image on my pictureboxes. How can i show both of them?
Note: I don't want create new Bitmap.
Filtreler f1 = new Filtreler();
Bitmap Orj = new Bitmap(pBox_SOURCE.Image);
f1.Imge = Orj;
if (SablonBoyutu % 2 == 1)
{
f1.addnoise(f1.Imge);
pictureBoxNoisyImg.Image = f1.Imge;
f1.meanfilter(SablonBoyutu, f1.Imge);
pBox_PROCESSED.Image = f1.Imge;
}
class Filtreler
{
private Bitmap resim;
public Bitmap Imge
{
get { return resim; }
set { resim = value; }
}
.... (my filters)
}
I think you need one more copy (img2) of your image
f1.addnoise(f1.Imge);
pictureBoxNoisyImg.Image = f1.Imge;
var img2 = new Bitmap(pictureBoxNoisyImg.Image);
f1.meanfilter(SablonBoyutu, img2);
pBox_PROCESSED.Image = img2;
Or
f1.addnoise(f1.Imge);
pictureBoxNoisyImg.Image = new Bitmap(f1.Imge);
f1.meanfilter(SablonBoyutu, f1.Imge);
pBox_PROCESSED.Image = f1.Imge;
Edit
To dispose old images you can do
f1.addnoise(f1.Imge);
if(pictureBoxNoisyImg.Image != null)
{
pictureBoxNoisyImg.Image.Dispose();
pictureBoxNoisyImg.Image = null;
}
pictureBoxNoisyImg.Image = new Bitmap(f1.Imge);
f1.meanfilter(SablonBoyutu, f1.Imge);
if(pBox_PROCESSED.Image != null)
{
pBox_PROCESSED.Image.Dispose();
pBox_PROCESSED.Image = null;
}
pBox_PROCESSED.Image = f1.Imge;
There is an alternative method which called Cloning (image.Clone();) instead of using new Bitmap Instance. Maybe it will help to you.
What's the difference between Bitmap.Clone() and new Bitmap(Bitmap)?
I have a device that rotates an object and takes a picture of a portion of the object at regular intervals. Currently, I have 30 pictures. To stitch the images into a flat image, I am taking a slice right out of the center of each picture of a fixed width (between 50 and 75 pixels). I am trying to stitch these slices together into a flat image of the original picture using the EMGU CV Stitching library with the sample stitching code that comes with EMGU. I am testing with between 5 and 10 slices at a time. Sometimes, I am getting an error that says "Error, need more images". When I do get a result, it looks terrible with weird curvatures. I don't need any spatial adjustments. I just want to stitch them in a linear fashion from left to right. Any ideas, either using EMGU or other?
Here are a few slices and the result:
Why is the resulting image not the same height as the 4 slices? What must be done just to stitch these together in a linear fashion so that the text is continuous?
Here is the code I am using:
private void selectImagesButton_Click(object sender, EventArgs e)
{
OpenFileDialog dlg = new OpenFileDialog();
dlg.CheckFileExists = true;
dlg.Multiselect = true;
if (dlg.ShowDialog() == System.Windows.Forms.DialogResult.OK)
{
sourceImageDataGridView.Rows.Clear();
Image<Bgr, byte>[] sourceImages = new Image<Bgr, byte>[dlg.FileNames.Length];
for (int i = 0; i < sourceImages.Length; i++)
{
sourceImages[i] = new Image<Bgr, byte>(dlg.FileNames[i]);
using (Image<Bgr, byte> thumbnail = sourceImages[i].Resize(200, 200, Emgu.CV.CvEnum.Inter.Cubic, true))
{
DataGridViewRow row = sourceImageDataGridView.Rows[sourceImageDataGridView.Rows.Add()];
row.Cells["FileNameColumn"].Value = dlg.FileNames[i];
row.Cells["ThumbnailColumn"].Value = thumbnail.ToBitmap();
row.Height = 200;
}
}
try
{
//only use GPU if you have build the native binary from code and enabled "NON_FREE"
using (Stitcher stitcher = new Stitcher(false))
{
using (AKAZEFeaturesFinder finder = new AKAZEFeaturesFinder())
{
stitcher.SetFeaturesFinder(finder);
using (VectorOfMat vm = new VectorOfMat())
{
Mat result = new Mat();
vm.Push(sourceImages);
Stopwatch watch = Stopwatch.StartNew();
this.Text = "Stitching";
Stitcher.Status stitchStatus = stitcher.Stitch(vm, result);
watch.Stop();
if (stitchStatus == Stitcher.Status.Ok)
{
resultImageBox.Image = result;
this.Text = String.Format("Stitched in {0} milliseconds.", watch.ElapsedMilliseconds);
}
else
{
MessageBox.Show(this, String.Format("Stiching Error: {0}", stitchStatus));
resultImageBox.Image = null;
}
}
}
}
}
finally
{
foreach (Image<Bgr, Byte> img in sourceImages)
{
img.Dispose();
}
}
}
}
I have a problem with SetRecordRotation in UWP, I was wondering is there a possibility to increase a setRecordingRotation(VideoRotation.Clockwise180Degrees) to setRecordingRotation(VideoRotation.Clockwise360Degrees) in case not only to double it or to flip video or mirroring like in Skype.
I am creating an app that the need to record with a preview like in SKYPE, below it's my code any suggestion
private async Task InitializeCameraAsync()
{
Debug.WriteLine("InitializeCameraAsync");
if (mc == null)
{
// Attempt to get the back camera if one is available, but use any camera device if not
var allVideoDevices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
//StorageFile sampleFile = await localFolder.GetFileAsync("proporties.txt");
//String timestamp = await FileIO.ReadTextAsync(sampleFile);
var cameraDevice = localSettings.Values["camValue"].ToString();
if (allVideoDevices == null)
{
Debug.WriteLine("No camera device found!");
return;
}
// Create MediaCapture and its settings
mc = new MediaCapture();
var settings = new MediaCaptureInitializationSettings { VideoDeviceId = allVideoDevices[int.Parse(cameraDevice)].Id };
await mc.InitializeAsync(settings);
//CaptureElement.RenderTransform = new ScaleTransform { ScaleX = -1 };
//_isInitialized = true;
SetResolution();
DisplayInformation displayInfo = DisplayInformation.GetForCurrentView();
displayInfo.OrientationChanged += DisplayInfo_OrientationChanged;
DisplayInfo_OrientationChanged(displayInfo, null);
stream = new InMemoryRandomAccessStream();
llmr = await mc.PrepareLowLagRecordToStreamAsync(MediaEncodingProfile.CreateMp4(VideoEncodingQuality.Auto), stream);
//mc.SetPreviewRotation(VideoRotation.Clockwise180Degrees);
// mc.SetRecordRotation(rotationAngle);
//CaptureElement.RenderTransform = new ScaleTransform()
//{
//ScaleX = 1
//};
//mc.SetPreviewMirroring(_mirroringPreview);
//SetPreviewRotationAsync();
-> i want it to be VideoRotation.Clockwise360Degrees not instead Clockwise180Degrees is there any way to increas
mc.SetRecordRotation(VideoRotation.Clockwise180Degrees);
await llmr.StartAsync();
await llmr.StopAsync();
CaptureElement.Source = mc;
CaptureElement.FlowDirection = _mirroringPreview ? FlowDirection.LeftToRight : FlowDirection.RightToLeft;
CaptureStack.Visibility = Visibility.Visible;
//if (localSettings.Values.ContainsKey("camValue") == false)
//{
//CameraErrorTextBlock.Visibility = Visibility.Visible;
//}
RecordProgress.Visibility = Visibility.Visible;
CaptureGrid.Visibility = Visibility.Visible;
CancelButton.HorizontalAlignment = HorizontalAlignment.Right;
//CaptureElement.FlowDirection = FlowDirection.LeftToRight;
//Prepare low lag recording
stream = new InMemoryRandomAccessStream();
//var encodingProperties = (CaptureElement.Tag as StreamResolution).EncodingProperties;
var encodingProfile= MediaEncodingProfile.CreateMp4(VideoEncodingQuality.Auto);
// Calculate rotation angle, taking mirroring into account if necessary
//var rotationAngle = VideoRotation.Clockwise180Degrees + VideoRotation.Clockwise180Degrees;
//mc.SetRecordRotation(rotationAngle);
//var rotationAngle = 360 - ConvertDeviceOrientationToDegrees(GetCameraOrientation());
//encodingProfile.Video.Properties.Add(RotationKey, mc.SetRecordRotation(rotationAngle));
llmr = await mc.PrepareLowLagRecordToStreamAsync(encodingProfile, stream);
await mc.StartPreviewAsync();
}
else if (mc != null)
{
//if (localSettings.Values.ContainsKey("camValue") == true)
//{
CameraErrorTextBlock.Visibility = Visibility.Visible;
//}
}
}
Two things that I would like to call out:
Rotating anything 360 degrees is the same as rotating it 0 degrees, which means it will remain unchanged. What you want is to flip it horizontally, to mirror it.
Apps like Skype only do this for the user-side preview, not for the stream transmitted to the other endpoint, which remains unchanged. The reason for this is that if the user holds up something like text, the receiver should be able to see it the way it is. Reading mirrored text is a lot harder.
So, even though I said you should do mirroring instead of rotating 360 degrees, in reality you shouldn't do anything at all to the video capture stream in order to provide the best experience.
Finally, to mirror the preview, the easiest way is to use the FlowDirection property of the CaptureElement (for C# or C++), or alternatively use a transform of x:-1 y:1 on the style of the video element (for JS):
cameraPreview.style.transform = "scale(-1, 1)";
or a RenderTransform (for C# or C++). For a reference on how to mirror the preview, you can check out the CameraStarterKit sample on github, which covers C#, C++, JS and VB.
I am working on a people counter. For this I have the Microsoft Kinect installed over the door.
I am working with C# and EmguCV. I have extracted the heads of the people, so that they appear as white blobs on a black image. Then I have created a bounding box around the heads. That works fine. So I now how many blobs I have per frame and I also now their position. This works fine. But now I want to track the blobs because I want to count how much people come in and go out, but I don't know how to do this. Can anyone help me? The problem is that every frame, new blobs can appear and old blobs can disappear. Can anyone give me an algorithm or maybe some code? or a paper.
Thanks a lot!
Sure. This is the code for the blobs:
using (MemStorage stor = new MemStorage())
{
Contour<System.Drawing.Point> contours = head_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL, stor);
for (int i = 0; contours != null; contours = contours.HNext)
{
i++;
//if ((contours.Area > Math.Pow(sliderMinSize.Value, 2)) && (contours.Area < Math.Pow(sliderMaxSize.Value, 2)))
{
MCvBox2D box = contours.GetMinAreaRect();
blobCount++;
contour_image.Draw(box, new Bgr(System.Drawing.Color.Red), 1);
new_position = new System.Drawing.Point((int)(box.center.X), (int)(box.center.Y));
new_x = box.center.X;
new_y = box.center.Y;
}
}
}
Please see Emgu CV Blob Detection for more information. Assuming you are using Emgu CV 2.1 or higher, then the answer will work. If you are using version 1.5 or higher, see this thread on how to easily detect blobs. Or look at the code below
Capture capture = new Capture();
ImageViewer viewer = new ImageViewer();
BlobTrackerAutoParam param = new BlobTrackerAutoParam();
param.ForgroundDetector = new ForgroundDetector(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD);
param.FGTrainFrames = 10;
BlobTrackerAuto tracker = new BlobTrackerAuto(param);
Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
{
tracker.Process(capture.QuerySmallFrame().PyrUp());
Image<Gray, Byte> img = tracker.GetForgroundMask();
//viewer.Image = tracker.GetForgroundMask();
MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0);
foreach (MCvBlob blob in tracker)
{
img.Draw(Rectangle.Round(blob), new Gray(255.0), 2);
img.Draw(blob.ID.ToString(), ref font, Point.Round(blob.Center), new Gray(255.0));
}
viewer.Image = img;
});
viewer.ShowDialog();
Hope this helps!
EDIT
I think you should use this code every ten frames or so (~3 times a second) and do something like this:
Capture capture = new Capture();
ImageViewer viewer = new ImageViewer();
BlobTrackerAutoParam param = new BlobTrackerAutoParam();
param.ForgroundDetector = new ForgroundDetector(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD);
param.FGTrainFrames = 10;
BlobTrackerAuto tracker = new BlobTrackerAuto(param);
int frames = 0;
Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
{
frames++;//Add to number of frames
if (frames == 10)
{
frames = 0;//if it is after 10 frames, do processing and reset frames to 0
tracker.Process(capture.QuerySmallFrame().PyrUp());
Image<Gray, Byte> img = tracker.GetForgroundMask();
//viewer.Image = tracker.GetForgroundMask();
int blobs = 0;
MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0);
foreach (MCvBlob blob in tracker)
{
//img.Draw(Rectangle.Round(blob), new Gray(255.0), 2);
//img.Draw(blob.ID.ToString(), ref font, Point.Round(blob.Center), new Gray(255.0));
//Only uncomment these if you want to draw a rectangle around the blob and add text
blobs++;//count each blob
}
blobs = /*your counter here*/;
blobs = 0; //reset
viewer.Image = img;//get next frame
});
viewer.ShowDialog();
EDIT 2
It sounds like you just want to identify the blobs, it sounds like you want McvBlob.ID. This is the ID of the blob and you can check which ID's are still there and which are not. I would still do this every ten frames to not slow it down as much. You just need a simple algorithm that can observe what the ID's are, and if they have changed. I would store the IDs in a List<string> and check that list for changes every few frames. Example:
List<string> LastFrameIDs, CurrentFrameIDs;
Capture capture = new Capture();
ImageViewer viewer = new ImageViewer();
BlobTrackerAutoParam param = new BlobTrackerAutoParam();
param.ForgroundDetector = new ForgroundDetector(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD);
param.FGTrainFrames = 10;
BlobTrackerAuto tracker = new BlobTrackerAuto(param);
int frames = 0;
Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
{
frames++;//Add to number of frames
if (frames == 10)
{
frames = 0;//if it is after 10 frames, do processing and reset frames to 0
tracker.Process(capture.QuerySmallFrame().PyrUp());
Image<Gray, Byte> img = tracker.GetForgroundMask();
//viewer.Image = tracker.GetForgroundMask();
int blobs = 0, i = 0;
MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0);
foreach (MCvBlob blob in tracker)
{
i++;
//img.Draw(Rectangle.Round(blob), new Gray(255.0), 2);
//img.Draw(blob.ID.ToString(), ref font, Point.Round(blob.Center), new Gray(255.0));
//Only uncomment these if you want to draw a rectangle around the blob and add text
CurrentFrameIDs.Add(blob.ID.ToString());
if (CurrentFrameIDs[i] == LastFrameIDs[i])
img.Draw(Rectangle.Round(blob), new Gray(0,0), 2);//mark the new/changed blob
blobs++;//count each blob
}
blobs = /*your counter here*/;
blobs = 0; //reset
i = 0;
LastFrameIDs = CurrentFrameIDs;
CurrentFrameIDs = null;
viewer.Image = img;//get next frame
});
viewer.ShowDialog();