audiograph c# UWP - c#

I'm trying to create a callrecorder for winphone at the UWP. I'm trying to do this with Audio graphs. I need to make a node for an input device (microphone), a node for an output device (speaker) and submit them to the file(wave/mp3).
I'm receiving an exception.
AudioGraph graph;
AudioDeviceInputNode deviceInputNode;
AudioDeviceOutputNode deviceOutputNode;
AudioFileOutputNode fileOutputNode;
private async Task InitAudiographAsync()
{
AudioGraphSettings settings = new AudioGraphSettings(AudioRenderCategory.Speech);
CreateAudioGraphResult result = await AudioGraph.CreateAsync(settings);
if (result.Status == AudioGraphCreationStatus.Success)
{
graph = result.Graph;
CreateAudioDeviceOutputNodeResult deviceOutputNodeResult = await graph.CreateDeviceOutputNodeAsync();
if (deviceOutputNodeResult.Status == AudioDeviceNodeCreationStatus.Success)
{
deviceOutputNode = deviceOutputNodeResult.DeviceOutputNode;
var microphone = await DeviceInformation.CreateFromIdAsync(
MediaDevice.GetDefaultAudioCaptureId(AudioDeviceRole.Default));
var inProfile = MediaEncodingProfile.CreateWav(AudioEncodingQuality.High);
var deviceInputNodeResult = await graph.CreateDeviceInputNodeAsync(MediaCategory.Speech, inProfile.Audio, microphone);
if (deviceInputNodeResult.Status == AudioDeviceNodeCreationStatus.Success)
{
deviceInputNode = deviceInputNodeResult.DeviceInputNode;
FileSavePicker saveFilePicker = new FileSavePicker();
saveFilePicker.FileTypeChoices.Add("Windows Media Audio", new List<string>() { ".wma" });
saveFilePicker.FileTypeChoices.Add("MPEG Audio Layer-3", new List<string>() { ".mp3" });
saveFilePicker.SuggestedFileName = "New Audio Track";
StorageFile file = await saveFilePicker.PickSaveFileAsync();
// File can be null if cancel is hit in the file picker
if (file == null)
{
return;
}
MediaEncodingProfile mediaEncodingProfile;
switch (file.FileType.ToString().ToLowerInvariant())
{
case ".mp3":
mediaEncodingProfile = MediaEncodingProfile.CreateMp3(AudioEncodingQuality.High);
break;
case ".wav":
mediaEncodingProfile = MediaEncodingProfile.CreateWav(AudioEncodingQuality.High);
break;
default:
throw new ArgumentException();
}
CreateAudioFileOutputNodeResult fileOutputNodeResult = await graph.CreateFileOutputNodeAsync(file, mediaEncodingProfile);
if (fileOutputNodeResult.Status == AudioFileNodeCreationStatus.Success)
{
fileOutputNode = fileOutputNodeResult.FileOutputNode;
deviceInputNode.AddOutgoingConnection(deviceOutputNode);
deviceInputNode.AddOutgoingConnection(fileOutputNode);
graph.Start();
}
}
}
}
}

Did you check manifest\capabilities for microphone access and file access. Are you getting an access denied exception?

Related

Invalid cast from IDirect3DSurface to SoftwareBitmap

I try to implement processing frames from webcam to the WPF application using UWP API.
There is article how to work with MediaCapture & MediaFrameReader:
https://learn.microsoft.com/en-us/windows/uwp/audio-video-camera/process-media-frames-with-mediaframereader#handle-the-frame-arrived-event
If I set up MemoryPreference to cpu, SoftwareBitmaps are initialized to the null in the event. When I place Auto, I can see IDirect3DSurface objects are in the event, but in conversion to the SoftwareBitmap the exception "Specified cast is not valid." is raised.
How to convert IDirect3DSurface to SoftwareBitmap?
private async void MediaCaptureExample()
{
var frameSourceGroups = await MediaFrameSourceGroup.FindAllAsync();
MediaFrameSourceGroup selectedGroup = null;
MediaFrameSourceInfo colorSourceInfo = null;
foreach (var sourceGroup in frameSourceGroups)
{
foreach (var sourceInfo in sourceGroup.SourceInfos)
{
if (sourceInfo.MediaStreamType == MediaStreamType.VideoRecord && sourceInfo.SourceKind == MediaFrameSourceKind.Color)
{
colorSourceInfo = sourceInfo;
break;
}
}
if (colorSourceInfo != null)
{
selectedGroup = sourceGroup;
break;
}
}
capture = new MediaCapture();
var settings = new MediaCaptureInitializationSettings()
{
SourceGroup = selectedGroup,
SharingMode = MediaCaptureSharingMode.ExclusiveControl,
MemoryPreference = MediaCaptureMemoryPreference.Auto,
StreamingCaptureMode = StreamingCaptureMode.Video
};
await capture.InitializeAsync(settings);
var colorFrameSource = capture.FrameSources[colorSourceInfo.Id];
var preferredFormat = colorFrameSource.SupportedFormats.Where(format =>
{
return format.VideoFormat.Width >= 1080
&& String.Compare(format.Subtype, MediaEncodingSubtypes.Mjpg, true) == 0;
}).FirstOrDefault();
if (preferredFormat == null)
{
// Our desired format is not supported
return;
}
await colorFrameSource.SetFormatAsync(preferredFormat);
mediaFrameReader = await capture.CreateFrameReaderAsync(colorFrameSource);
mediaFrameReader.FrameArrived += MediaFrameReader_FrameArrived;
var result = await mediaFrameReader.StartAsync();
Console.WriteLine("Result = " + result.ToString());
}
private void MediaFrameReader_FrameArrived(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
{
try
{
var mediaFrameReference = sender.TryAcquireLatestFrame();
var videoMediaFrame = mediaFrameReference?.VideoMediaFrame;
var softwareBitmap = videoMediaFrame?.SoftwareBitmap;
var direct3DSurface = videoMediaFrame?.Direct3DSurface;
if (direct3DSurface != null)
{
var softwareBitmapTask = SoftwareBitmap.CreateCopyFromSurfaceAsync(mediaFrameReference.VideoMediaFrame.Direct3DSurface).AsTask();
softwareBitmap = softwareBitmapTask.Result;
}
if (softwareBitmap != null)
{
using (var stream = new Windows.Storage.Streams.InMemoryRandomAccessStream())
{
var encoderTask = BitmapEncoder.CreateAsync(BitmapEncoder.PngEncoderId, stream).AsTask();
encoderTask.Wait();
var encoder = encoderTask.Result;
encoder.SetSoftwareBitmap(softwareBitmap);
Task t = encoder.FlushAsync().AsTask();
t.Wait();
var image = new System.Windows.Media.Imaging.BitmapImage();
image.BeginInit();
image.StreamSource = stream.AsStream();
image.CacheOption = System.Windows.Media.Imaging.BitmapCacheOption.OnLoad;
image.EndInit();
imageElement.Source = image;
}
}
}
catch(Exception e)
{
Console.WriteLine(e.Message);
}
}
The issue was in format subtype. I changed format from Mjpg to Nv12, and everything start working properly (even for MediaCaptureMemoryPreference.Auto):
var preferredFormat = colorFrameSource.SupportedFormats.Where(format =>
{
return format.VideoFormat.Width >= 1080 && String.Compare(format.Subtype, MediaEncodingSubtypes.Nv12, true) == 0;
}).FirstOrDefault();

InvalidOperationException in Memory Streams

I am trying to upload an image to cloudinary cloud. The file converts fine to memory stream but when I try to call upload method of cloudinary to upload the image, I get InvlalidOperationException. What I think is, there is something wrong with converting file to stream.See the image showing error
[HttpPost]
public async Task<IActionResult> AddPhotoForUser(int userId, [FromForm] AddPhotoDto addPhotoDto)
{
try
{
if (userId != int.Parse(User.FindFirst(ClaimTypes.NameIdentifier).Value))
{
return Unauthorized();
}
var userFromRepo = await _datingRepository.GetUser(userId);
var file = addPhotoDto.File;
var uploadResult = new ImageUploadResult();
if (file.Length > 0)
{
using (var stream = file.OpenReadStream())
{
var uploadParams = new ImageUploadParams()
{
File = new FileDescription(file.Name, stream),
Transformation = new Transformation()
.Width(500).Height(500).Crop("fill").Gravity("face")
};
uploadResult = _cloudinary.Upload(uploadParams);
}
}
addPhotoDto.Url = uploadResult.Url.ToString();
addPhotoDto.PublicId = uploadResult.PublicId;
var photo = _mapper.Map<Photo>(addPhotoDto);
if (!userFromRepo.Photos.Any(p => p.IsMain))
{
photo.IsMain = true;
}
userFromRepo.Photos.Add(photo);
if (await _datingRepository.SaveAll())
{
var photoToReturn = _mapper.Map<ReturnPhotoDto>(photo);
return CreatedAtRoute("GetPhoto", new { id = photo.Id }, photoToReturn);
}
return BadRequest("Could not add photo");
}
catch (Exception ex)
{
return BadRequest(ex.Message);
}
}
Can you please share why do you use open stream? You can try:
var imageuploadParams = new ImageUploadParams () {
File = new FileDescription (#"https://res.cloudinary.com/demo/image/upload/v1561532539/sample.jpg"),
PublicId = "myimage",
Transformation = new Transformation().Width(500).Height(500).Crop("fill").Gravity("face")
};
var ImageuploadResult = cloudinary.Upload (imageuploadParams);

Setting MediaStreamProperties on HoloLens does not work

I am working on an UWP app for the HoloLens to read single frames from the devices camera. I want to use the camera mode with the lowest resolution available.
I took a look at the following links and examples and tried to create a minimal working app:
https://learn.microsoft.com/en-us/windows/uwp/audio-video-camera/set-media-encoding-properties
https://learn.microsoft.com/en-us/windows/uwp/audio-video-camera/use-opencv-with-mediaframereader
https://github.com/Microsoft/Windows-universal-samples/tree/master/Samples/CameraResolution
https://github.com/microsoft/Windows-universal-samples/tree/master/Samples/CameraOpenCV
This is the code snippet from MainPage.xaml.cs:
public async Task<int> Start()
{
// Find the sources
var allGroups = await MediaFrameSourceGroup.FindAllAsync();
var sourceGroups = allGroups.Select(g => new
{
Group = g,
SourceInfo = g.SourceInfos.FirstOrDefault(i => i.SourceKind == MediaFrameSourceKind.Color)
}).Where(g => g.SourceInfo != null).ToList();
if (sourceGroups.Count == 0)
{
// No camera sources found
return 0;
}
var selectedSource = sourceGroups.FirstOrDefault();
// Initialize MediaCapture
_mediaCapture = new MediaCapture();
var settings = new MediaCaptureInitializationSettings()
{
SourceGroup = selectedSource.Group,
SharingMode = MediaCaptureSharingMode.ExclusiveControl,
StreamingCaptureMode = StreamingCaptureMode.Video,
MemoryPreference = MediaCaptureMemoryPreference.Cpu
};
await _mediaCapture.InitializeAsync(settings);
// Query all properties of the device
IEnumerable<StreamResolution> allVideoProperties = _mediaCapture.VideoDeviceController.GetAvailableMediaStreamProperties(MediaStreamType.VideoRecord).Select(x => new StreamResolution(x));
// Order them by resolution then frame rate
allVideoProperties = allVideoProperties.OrderBy(x => x.Height * x.Width).ThenBy(x => x.FrameRate);
await _mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoRecord, allVideoProperties.ElementAt(0).EncodingProperties);
// Create the frame reader
MediaFrameSource frameSource = _mediaCapture.FrameSources[selectedSource.SourceInfo.Id];
_reader = await _mediaCapture.CreateFrameReaderAsync(frameSource, MediaEncodingSubtypes.Bgra8);
_reader.FrameArrived += ColorFrameReader_FrameArrivedAsync;
await _reader.StartAsync();
return 1;
}
private async void ColorFrameReader_FrameArrivedAsync(MediaFrameReader sender, MediaFrameArrivedEventArgs args)
{
var frame = sender.TryAcquireLatestFrame();
if (frame != null)
{
var inputBitmap = frame.VideoMediaFrame?.SoftwareBitmap;
}
}
On my local machine (MacBookPro with Bootcamp partition) this code works using the webcam. It detects three supported video modes. I can change the resolution of the bitmap image in FrameArrivedAsync by changing the index from 0 to 1 or 2 at:
_mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoRecord, allVideoProperties.ElementAt(0).EncodingProperties);
On HoloLens this code does not work. It detects the different modes like explained here (https://learn.microsoft.com/en-us/windows/mixed-reality/locatable-camera). But setting the MediaStreamProperties does not change anything regarding the received bitmap image. The bitmap is always 1280x720.
Just in case, we want to share how we setup the capture profile, you can refer to the following code with annotate to modify your project for testing. If in doubt, please feel free to add comments.
private async void SetupAndStartMediaCapture()
{
string deviceId = string.Empty;
_mediaCapture = new MediaCapture();
DeviceInformationCollection devices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
foreach (var device in devices)
{
if(MediaCapture.IsVideoProfileSupported(device.Id))
{
deviceId = device.Id;
break; // The video device for which supported video profile support is queried.
}
}
MediaCaptureInitializationSettings mediaCapSettings = new MediaCaptureInitializationSettings
{
VideoDeviceId = deviceId
};
IReadOnlyList<MediaCaptureVideoProfile> profiles = MediaCapture.FindAllVideoProfiles(deviceId);
var profileMatch = (
from profile in profiles
from desc in profile.SupportedRecordMediaDescription
where desc.Width == 896 && desc.Height == 504 && desc.FrameRate == 24 // HL1
select new { profile, desc }
).FirstOrDefault();// Select the Profile with the required resolution from all available profiles.
if (profileMatch != null)
{
mediaCapSettings.VideoProfile = profileMatch.profile;
mediaCapSettings.RecordMediaDescription = profileMatch.desc;
}
await _mediaCapture.InitializeAsync(mediaCapSettings); //Initializes the MediaCapture object.
}

UWP Windows 10 IoT 2 channel Simultaneous Audio Recording

I would like to know how to record 2 separate audio channel simultaneously.
I have 2 USB adapters with mic & speaker respectively.
The samples code which I can find only support single channel recording at a time.
Please help. Thanks.
For single channel my code as follow;
MediaCapture audioCapture = new MediaCapture();
MediaCaptureInitializationSettings captureInitSettings = new MediaCaptureInitializationSettings();
captureInitSettings.StreamingCaptureMode = StreamingCaptureMode.Audio;
captureInitSettings.MediaCategory = MediaCategory.Other;
captureInitSettings.AudioProcessing = AudioProcessing.Default;
await audioCapture.InitializeAsync(captureInitSettings);
private async void recordChannelA()
{
StorageFolder externalDevices = KnownFolders.RemovableDevices;
IReadOnlyList<StorageFolder> externalDrives = await externalDevices.GetFoldersAsync();
StorageFolder usbStorage = externalDrives[0];
if (usbStorage != null)
{
StorageFolder recordFolder = await usbStorage.CreateFolderAsync(recFolderName, CreationCollisionOption.OpenIfExists);
await usbStorage.GetFolderAsync(recFolderName);
StorageFile recordFile = await recordFolder.CreateFileAsync("Recording - " + DateTime.Now.ToString("yyyy-MM-dd_HH-mm-ss") + ".mp3", Windows.Storage.CreationCollisionOption.GenerateUniqueName);
MediaEncodingProfile profile = null;
profile = MediaEncodingProfile.CreateM4a(Windows.Media.MediaProperties.AudioEncodingQuality.Auto);
await audioCapture.StartRecordToStorageFileAsync(profile, recordFile);
Message.Text = "Recording ... ";
recordingtimerRun = new TimeSpan(0, 0, 0);
recordingTimer.Start();
}
else Message.Text = "Recording error !";
}
Update;
I created a 'listview' for the enumerated devices and to select the respective capture device. However, there is an Syntax Error which i cannot convert the enumaration.deviceinformation to imediasource.
captureInitSettings.AudioSource = captureDeviceList[audioCaptureList.SelectedIndex];
Update: I managed to get it to work
The solution is
captureInitSettingsA.AudioDeviceId = captureDeviceList[audioCaptureList.SelectedIndex].Id;
captureInitSettingsB.AudioDeviceId = captureDeviceList[audioCaptureList.SelectedIndex].Id;
However, how do i save these selections in app settings .. so that when I reboot I don't have to re-select again.
Update:
I manage to save the app setting for audiocapture & audiorender devices but I am not sure how to retrieve them & also to check if there is any previous settings saved.
Windows.Storage.ApplicationDataContainer localSettings = Windows.Storage.ApplicationData.Current.LocalSettings;
Windows.Storage.StorageFolder localFolder = Windows.Storage.ApplicationData.Current.LocalFolder;
localSettings.Values["audioACaptureSettings"] = captureAInitSettings.AudioDeviceId;
localSettings.Values["audioARenderSettings"] = mediaPlayerA.AudioDevice.Id;
localSettings.Values["audioBCaptureSettings"] = captureBInitSettings.AudioDeviceId;
localSettings.Values["audioBRenderSettings"] = mediaPlayerB.AudioDevice.Id;
private void loadAudioConfig()
{
Windows.Storage.ApplicationDataContainer localSettings = Windows.Storage.ApplicationData.Current.LocalSettings;
Windows.Storage.StorageFolder localFolder = Windows.Storage.ApplicationData.Current.LocalFolder;
if (localSettings.Values["audioACaptureSettings"] != null)
{
captureAInitSettings.AudioDeviceId = localSettings.Values["audioACaptureSettings"].ToString();
}
if (localSettings.Values["audioARenderSettings"] != null)
{
Object audioARenderValue = localSettings.Values["audioARenderSettings"];
mediaPlayerA.AudioDevice = audioARenderValue;
}
if (localSettings.Values["PAaudioCaptureSettings"] != null)
{
captureBInitSettings.AudioDeviceId = localSettings.Values["audioBCaptureSettings"].ToString();
}
if (localSettings.Values["PAaudioRenderSettings"] != null)
{
Object audioBRenderValue = localSettings.Values["audioBRenderSettings"];
mediaPlayerB.AudioDevice = audioBRenderValue;
}
You can refer to this document which introduced how to store and retrieve settings and other app data. You can save the data to Settings and Files.
When you use Settings, it only supports multiple data types as mentioned in the document.
If use files, you can store binary data or to enable your own, customized serialized types,.
In your provided code, it is correct to check if there is any previous settings saved:
if (localSettings.Values["audioACaptureSettings"] != null)
{
captureAInitSettings.AudioDeviceId = localSettings.Values["audioACaptureSettings"].ToString();
}
But it is incorrect about how to retrieve the setting as AudioDevice because it can not implicitly convert string to DeviceInformation. Please try in this way:
if (localSettings.Values["audioARenderSettings"] != null)
{
var aduioSource = localSettings.Values["audioARenderSettings"] as string;
mediaPlayerA.AudioDevice = await DeviceInformation.CreateFromIdAsync(aduioSource);
}

Trying to use back camera followed by front camera c# mediacapture windows phone 8.1

I am writing a program where I want to take pic from back camera and then front camera using a single button.
First it takes pic using back camera with no issue. but I try takin pic using front camera, it gives me exception saying like "No error text found associated with this error code" at await newFrontCapture.StartPreviewAsync(); line where newFrontCapture is the object of MediaCapture.
Following is the code I am trying:
//code to take back camera image
webcamList = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
backWebcam = (from webcam in webcamList
where webcam.EnclosureLocation != null
&& webcam.EnclosureLocation.Panel == Windows.Devices.Enumeration.Panel.Back
select webcam).FirstOrDefault();
MediaCapture newCapture = null;
DeviceInformationCollection webcamList;
const string filename = "mysetting.txt";
StorageFolder sf = null;
DeviceInformation backWebcam;
try
{
if (newCapture!= null)
newCapture.Dispose();
newCapture = new MediaCapture();
await newCapture.InitializeAsync(new MediaCaptureInitializationSettings()
{
VideoDeviceId = backWebcam.Id
});
cp.Source = newCapture;
// Start the preview
await newCapture.StartPreviewAsync();
}
catch (Exception ex)
{
newCapture.Dispose();
}
StorageFolder folder = ApplicationData.Current.LocalFolder;
var picPath = "Image_Test_" + Convert.ToString(new Random());
StorageFile captureFile = await folder.CreateFileAsync(picPath, CreationCollisionOption.GenerateUniqueName);
ImageEncodingProperties imageProperties = ImageEncodingProperties.CreateJpeg();
//Capture your picture into the given storage file
await newCapture.CapturePhotoToStorageFileAsync(imageProperties, captureFile);
BitmapImage bitmapToShow = new BitmapImage(new Uri(captureFile.Path));
imagePreivew.Source = bitmapToShow; // show image on screen inside Image
captureFile = null;
await newCapture.StopPreviewAsync();
newCapture.Dispose();
Frame.Navigate(typeof(FrontImagePage),imagePreivew);
}
catch (Exception ex)
{
printvlaue.Text = ex.Message;
await newCapture.StopPreviewAsync();
newCapture.Dispose();// disposing the object of mediacapture (back camera object)
}
// Code to take front camera pic
try
{
webcamList = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
frontWebCam = (from webcam in webcamList
where webcam.EnclosureLocation != null
&& webcam.EnclosureLocation.Panel == Windows.Devices.Enumeration.Panel.Front
select webcam).FirstOrDefault();
newFrontCapture = new MediaCapture();
await newFrontCapture.InitializeAsync(new MediaCaptureInitializationSettings()
{
VideoDeviceId = frontWebCam.Id,
PhotoCaptureSource = PhotoCaptureSource.Photo,
StreamingCaptureMode=StreamingCaptureMode.Video
});
//await newFrontCapture.InitializeAsync(new MediaCaptureInitializationSettings()
//{
// VideoDeviceId = frontWebCam.Id,
// PhotoCaptureSource = PhotoCaptureSource.Photo
//});
await newFrontCapture.StartPreviewAsync();
StorageFolder folder = ApplicationData.Current.LocalFolder;
var picFront = "Image_Test_Front" + Convert.ToString(new Random());
StorageFile captureFrontFile = await folder.CreateFileAsync(picFront, CreationCollisionOption.GenerateUniqueName);
ImageEncodingProperties imageFrontProperties = ImageEncodingProperties.CreateJpeg();
//Capture your picture into the given storage file
await newFrontCapture.CapturePhotoToStorageFileAsync(imageFrontProperties, captureFrontFile);
BitmapImage bitmapToShowFront = new BitmapImage(new Uri(captureFrontFile.Path));
imageFront.Source = bitmapToShowFront;
newFrontCapture.Dispose();
newFrontCapture = null;
imageBack.Source = this.im_.Source;
}
catch (Exception ex)
{
await newFrontCapture.StopPreviewAsync();
newFrontCapture.Dispose();
//throw;
}
Below code is working. it captures both font and back pictures in one click.
private async Task CaptureBackAndFront()
{
//front Camera capture...
DeviceInformationCollection webcamList;
webcamList = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
DeviceInformation backWebcam;
backWebcam = (from webcam in webcamList
where webcam.EnclosureLocation != null && webcam.EnclosureLocation.Panel == Windows.Devices.Enumeration.Panel.Back
select webcam).FirstOrDefault();
MediaCapture newCapture = new MediaCapture();
StorageFolder folder = ApplicationData.Current.LocalFolder;
try
{
await newCapture.InitializeAsync(new MediaCaptureInitializationSettings()
{
VideoDeviceId = backWebcam.Id
});
cp.Source = newCapture;
await newCapture.StartPreviewAsync();
var picPath = "Image_Test_" + Convert.ToString(new Random());
StorageFile captureFile = await folder.CreateFileAsync(picPath, CreationCollisionOption.GenerateUniqueName);
ImageEncodingProperties imageProperties = ImageEncodingProperties.CreateJpeg();
//Capture your picture into the given storage file
await newCapture.CapturePhotoToStorageFileAsync(imageProperties, captureFile);
BitmapImage bitmapToShow = new BitmapImage(new Uri(captureFile.Path));
imagePreivew.Source = bitmapToShow; // show image on screen inside Image
captureFile = null;
}
catch (Exception ex)
{
//handel error situation...
}
finally
{
await newCapture.StopPreviewAsync();
newCapture.Dispose();
}
// Code to take front camera pic
MediaCapture newFrontCapture = new MediaCapture();
try
{
var frontWebCam = (from webcam in webcamList
where webcam.EnclosureLocation != null
&& webcam.EnclosureLocation.Panel == Windows.Devices.Enumeration.Panel.Front
select webcam).FirstOrDefault();
await newFrontCapture.InitializeAsync(new MediaCaptureInitializationSettings()
{
VideoDeviceId = frontWebCam.Id,
});
cp.Source = newFrontCapture;
await newFrontCapture.StartPreviewAsync();
var picFront = "Image_Test_Front" + Convert.ToString(new Random());
StorageFile captureFrontFile = await folder.CreateFileAsync(picFront, CreationCollisionOption.GenerateUniqueName);
ImageEncodingProperties imageFrontProperties = ImageEncodingProperties.CreateJpeg();
//Capture your picture into the given storage file
await newFrontCapture.CapturePhotoToStorageFileAsync(imageFrontProperties, captureFrontFile);
BitmapImage bitmapToShowFront = new BitmapImage(new Uri(captureFrontFile.Path));
imagePreivew1.Source = bitmapToShowFront;
}
catch (Exception ex)
{
// Hanel error situation...
}
finally
{
await newFrontCapture.StopPreviewAsync();
newFrontCapture.Dispose();
newFrontCapture = null;
}
}
}
Hope this helps

Categories