Trying to make use of ID2D1SpriteBatch in direct2d to get performance boost over the regular DrawBitmap().
Managed to set it up but i get "The object was not in the correct state to process the method" when i call DeviceContext.EndDraw().
I can get the DeviceContext.DrawBitmap() to work (see commented out section). Tried everything i can think to get the device context in the right state to handle the spritebatch but no luck.
Tried to boil this sample down as much as possible but also didn't want to leave out any step just in case that was the culprit.
Any ideas how to get it to work?
using SharpDX;
using _d2d = SharpDX.Direct2D1;
using _d3d = SharpDX.Direct3D;
using _d3d11 = SharpDX.Direct3D11;
using _dxgi = SharpDX.DXGI;
using _directWrite = SharpDX.DirectWrite;
using _wic = SharpDX.WIC;
using SharpDX.Direct2D1;
using SharpDX.Direct3D;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Windows;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using SharpDX.IO;
using SharpDX.Mathematics.Interop;
namespace TestApp
{
public class SpriteBatchIssue
{
[STAThread]
static void Main(string[] args)
{
var app = new SpriteBatchIssue();
app.Run();
}
bool isClosed = false;
public void Run()
{
#region setup resources
var clientSize = new Size2(1000, 500);
var mainForm = new RenderForm();
mainForm.ClientSize = new System.Drawing.Size(
clientSize.Width,
clientSize.Height);
mainForm.FormClosed += mainForm_FormClosed;
var scDescription = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription =
new ModeDescription(
clientSize.Width,
clientSize.Height,
new Rational(60, 1),
Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = mainForm.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
// Create Device and SwapChain
_d3d11.Device d3d11Device;
SwapChain swapChain;
_d3d11.Device.CreateWithSwapChain(
DriverType.Hardware,
DeviceCreationFlags.BgraSupport,
new[] { _d3d.FeatureLevel.Level_12_1 },
scDescription,
out d3d11Device,
out swapChain);
// Ignore all windows events
var dxgiFactory = swapChain.GetParent<_dxgi.Factory1>();
dxgiFactory.MakeWindowAssociation(mainForm.Handle, WindowAssociationFlags.IgnoreAll);
// New RenderTargetView from the backbuffer
var backBuffer = Texture2D.FromSwapChain<Texture2D>(swapChain, 0);
var backBufferView = new RenderTargetView(d3d11Device, backBuffer);
var d2dFactory = new _d2d.Factory();
var d2dFactory4 = d2dFactory.QueryInterface<_d2d.Factory4>();
var dxgiDevice = d3d11Device.QueryInterface<_dxgi.Device>();
var d2dDevice3 = new _d2d.Device3(d2dFactory4, dxgiDevice);
var d2dDeviceContext3 = new _d2d.DeviceContext3(d2dDevice3, DeviceContextOptions.None);
using (var surface = backBuffer.QueryInterface<Surface>())
{
var bmpProperties = new BitmapProperties1(
new PixelFormat(Format.R8G8B8A8_UNorm, _d2d.AlphaMode.Premultiplied),
dpiX: 96,
dpiY: 96,
bitmapOptions: BitmapOptions.Target | BitmapOptions.CannotDraw);
var d2dTarget = new Bitmap1(
d2dDeviceContext3,
surface,
bmpProperties);
d2dDeviceContext3.Target = d2dTarget;
}
#endregion
#region setup drawing parameters
var bmp = createD2DBitmap(#"C:\yourPath\yourImage.png", d2dDeviceContext3);
var spriteBatch = new SpriteBatch(d2dDeviceContext3);
var destinationRects = new RawRectangleF[1];
destinationRects[0] = new RectangleF(100, 50, bmp.Size.Width, bmp.Size.Height);
var sourceRects = new RawRectangle[1];
sourceRects[0] = new RectangleF(0, 0, bmp.Size.Width, bmp.Size.Height);
var colors = new RawColor4[1];
colors[0] = Color.White;
var transforms = new RawMatrix3x2[1];
transforms[0] = Matrix3x2.Identity;
#endregion
#region mainLoop
RenderLoop.Run(mainForm, () =>
{
if (isClosed)
{
return;
}
d3d11Device.ImmediateContext.Rasterizer.SetViewport(new Viewport(0, 0, clientSize.Width, clientSize.Height));
d3d11Device.ImmediateContext.OutputMerger.SetTargets(backBufferView);
d2dDeviceContext3.BeginDraw();
//this technique works
//d2dDeviceContext3.DrawBitmap(
// bitmap: bmp,
// destinationRectangle: destinationRects[0],
// opacity: 1,
// interpolationMode: BitmapInterpolationMode.Linear,
// sourceRectangle: new RectangleF(0, 0, bmp.Size.Width, bmp.Size.Height));
//this technique does not work
spriteBatch.Clear();
spriteBatch.AddSprites(
1,
destinationRects,
sourceRects,
colors,
transforms,
destinationRectanglesStride: 0, //0 stride because there is only 1 element
sourceRectanglesStride: 0, //i've also tried using Marshal.SizeOf() to get the stride, but i get the same error
colorsStride: 0,
transformsStride: 0);
d2dDeviceContext3.DrawSpriteBatch(
spriteBatch: spriteBatch,
startIndex: 0,
spriteCount: 1,
bitmap: bmp,
interpolationMode: BitmapInterpolationMode.Linear,
spriteOptions: SpriteOptions.ClampToSourceRectangle);
//when using the spritebatch technique, this throws exception:
// "The object was not in the correct state to process the method"
d2dDeviceContext3.EndDraw();
//first param set to 1 would indicate waitVerticalBlanking
swapChain.Present(0, PresentFlags.None);
});
#endregion
}
Bitmap createD2DBitmap(string filePath, _d2d.DeviceContext deviceContext)
{
var imagingFactory = new _wic.ImagingFactory();
var fileStream = new NativeFileStream(
filePath,
NativeFileMode.Open,
NativeFileAccess.Read);
var bitmapDecoder = new _wic.BitmapDecoder(imagingFactory, fileStream, _wic.DecodeOptions.CacheOnDemand);
var frame = bitmapDecoder.GetFrame(0);
var converter = new _wic.FormatConverter(imagingFactory);
converter.Initialize(frame, SharpDX.WIC.PixelFormat.Format32bppPRGBA);
var newBitmap = SharpDX.Direct2D1.Bitmap1.FromWicBitmap(deviceContext, converter);
return newBitmap;
}
void mainForm_FormClosed(object sender, System.Windows.Forms.FormClosedEventArgs e)
{
isClosed = true;
}
}
}
The issue was that you can't use per primitive antialiasing with a spritebatch. This line before BeginDraw() fixed it: d2dDeviceContext3.AntialiasMode = AntialiasMode.Aliased;
Also learned finally how to get the debug layer working. Include debug flag when creating your device (see comments below). If it throws an exception, that is probably because you dont have the right version of windows sdk. If you're using visual studio, go to visual studio installer and modify your installation to include windows sdk.
Next you need to right click your project->properties->debug (on the left panel)-> check "enable native code debugging". After i did this there was a line written to the output window stating: "D2D DEBUG ERROR - DrawSpriteBatch requires that the antialias mode be set to D2D1_ANTIALIAS_MODE_ALIASED."
Something else i learned that isn't directly related to the answer but worth noting: "Note that ComObject in SharpDX is not disposed by the .NET finalizer. If a COM object is not released by a call to Dispose() or ReleaseReference(), it will not release the native object and memory attached to it." from here: http://sharpdx.org/wiki/usage/
Here's a full working example:
using SharpDX;
using _d2d = SharpDX.Direct2D1;
using _d3d = SharpDX.Direct3D;
using _d3d11 = SharpDX.Direct3D11;
using _dxgi = SharpDX.DXGI;
using _directWrite = SharpDX.DirectWrite;
using _wic = SharpDX.WIC;
using SharpDX.Direct2D1;
using SharpDX.Direct3D;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Windows;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using SharpDX.IO;
using SharpDX.Mathematics.Interop;
namespace TestApp
{
public class SpriteBatchIssue
{
[STAThread]
static void Main(string[] args)
{
var app = new SpriteBatchIssue();
app.Run();
}
#region Variables
_d3d11.Device d3d11Device;
SwapChain swapChain;
_dxgi.Factory1 dxgiFactory;
_d2d.Factory d2dFactory;
_d2d.Factory4 d2dFactory4;
_dxgi.Device dxgiDevice;
_d2d.Device3 d2dDevice3;
_d2d.DeviceContext3 d2dDeviceContext3;
Bitmap1 sourceImage;
SpriteBatch spriteBatch;
Bitmap1 d2dTarget;
#endregion
~SpriteBatchIssue()
{
safeDispose(ref d3d11Device);
safeDispose(ref swapChain);
safeDispose(ref dxgiFactory);
safeDispose(ref d2dFactory);
safeDispose(ref d2dFactory4);
safeDispose(ref dxgiDevice);
safeDispose(ref d2dDevice3);
safeDispose(ref d2dDeviceContext3);
safeDispose(ref sourceImage);
safeDispose(ref spriteBatch);
safeDispose(ref d2dTarget);
}
public void Run()
{
#region setup resources
var mainForm = new RenderForm();
var scDescription = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription =
new ModeDescription(
0,
0,
new Rational(60, 1),
Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = mainForm.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
//DeviceCreationFlags.Debug flag below will show debug layer messages in your output window.
//Need proper version of windows sdk for it to work, otherwise it will throw an exception.
//You also need to right click your project->properties->debug (on the left panel)-> check "enable native code debugging"
// Create Device and SwapChain
_d3d11.Device.CreateWithSwapChain(
DriverType.Hardware,
DeviceCreationFlags.BgraSupport | DeviceCreationFlags.Debug,
new[] { _d3d.FeatureLevel.Level_12_1 },
scDescription,
out d3d11Device,
out swapChain);
// Ignore all windows events
dxgiFactory = swapChain.GetParent<_dxgi.Factory1>();
dxgiFactory.MakeWindowAssociation(mainForm.Handle, WindowAssociationFlags.IgnoreAll);
d2dFactory = new _d2d.Factory();
d2dFactory4 = d2dFactory.QueryInterface<_d2d.Factory4>();
dxgiDevice = d3d11Device.QueryInterface<_dxgi.Device>();
d2dDevice3 = new _d2d.Device3(d2dFactory4, dxgiDevice);
d2dDeviceContext3 = new _d2d.DeviceContext3(d2dDevice3, DeviceContextOptions.None);
#endregion
#region create drawing input
sourceImage = createD2DBitmap(#"yourFile.png", d2dDeviceContext3);
spriteBatch = new SpriteBatch(d2dDeviceContext3);
var destinationRects = new RawRectangleF[1];
destinationRects[0] = new RectangleF(100, 50, sourceImage.Size.Width, sourceImage.Size.Height);
var sourceRects = new RawRectangle[1];
sourceRects[0] = new RectangleF(0, 0, sourceImage.Size.Width, sourceImage.Size.Height);
#endregion
#region mainLoop
RenderLoop.Run(mainForm, () =>
{
if (d2dTarget != null)
{
d2dTarget.Dispose();
d2dTarget = null;
}
using (var backBuffer = Texture2D.FromSwapChain<Texture2D>(swapChain, 0))
{
using (var surface = backBuffer.QueryInterface<Surface>())
{
var bmpProperties = new BitmapProperties1(
new PixelFormat(Format.R8G8B8A8_UNorm, _d2d.AlphaMode.Premultiplied),
dpiX: 96,
dpiY: 96,
bitmapOptions: BitmapOptions.Target | BitmapOptions.CannotDraw);
d2dTarget = new Bitmap1(
d2dDeviceContext3,
surface,
bmpProperties);
d2dDeviceContext3.Target = d2dTarget;
}
}
//the key missing piece: cannot use per primitive antialiasing with spritebatch
d2dDeviceContext3.AntialiasMode = AntialiasMode.Aliased;
d2dDeviceContext3.BeginDraw();
spriteBatch.Clear();
spriteBatch.AddSprites(
1,
destinationRects,
sourceRects,
null,
null,
destinationRectanglesStride: 0, //0 stride because there is only 1 element
sourceRectanglesStride: 0,
colorsStride: 0,
transformsStride: 0);
d2dDeviceContext3.DrawSpriteBatch(
spriteBatch: spriteBatch,
startIndex: 0,
spriteCount: 1,
bitmap: sourceImage,
interpolationMode: BitmapInterpolationMode.Linear,
spriteOptions: SpriteOptions.ClampToSourceRectangle);
d2dDeviceContext3.EndDraw();
//first param set to 1 would indicate waitVerticalBlanking
swapChain.Present(0, PresentFlags.None);
});
#endregion
}
void safeDispose<T>(ref T disposable) where T : class, IDisposable
{
if (disposable != null)
{
disposable.Dispose();
disposable = null;
}
}
Bitmap1 createD2DBitmap(string filePath, _d2d.DeviceContext deviceContext)
{
var imagingFactory = new _wic.ImagingFactory();
var fileStream = new NativeFileStream(
filePath,
NativeFileMode.Open,
NativeFileAccess.Read);
var bitmapDecoder = new _wic.BitmapDecoder(imagingFactory, fileStream, _wic.DecodeOptions.CacheOnDemand);
var frame = bitmapDecoder.GetFrame(0);
var converter = new _wic.FormatConverter(imagingFactory);
converter.Initialize(frame, SharpDX.WIC.PixelFormat.Format32bppPRGBA);
var newBitmap = SharpDX.Direct2D1.Bitmap1.FromWicBitmap(deviceContext, converter);
return newBitmap;
}
}
}
Related
I am building a screen recording app in C# using Windows Graphics Capture API. I am using this script. I can select monitor and can record it to mp4 file. I can also select a Window and record it too. But how can we record a region with this? Ideally I need to give x,y coordinates along with width and height to record that specific region.
Here are the functions which return GraphicsCaptureItem for Window or Monitor hwnd, which can be used to record.
public static GraphicsCaptureItem CreateItemForWindow(IntPtr hwnd)
{
var factory = WindowsRuntimeMarshal.GetActivationFactory(typeof(GraphicsCaptureItem));
var interop = (IGraphicsCaptureItemInterop)factory;
var temp = typeof(GraphicsCaptureItem);
var itemPointer = interop.CreateForWindow(hwnd, GraphicsCaptureItemGuid);
var item = Marshal.GetObjectForIUnknown(itemPointer) as GraphicsCaptureItem;
Marshal.Release(itemPointer);
return item;
}
public static GraphicsCaptureItem CreateItemForMonitor(IntPtr hmon)
{
var factory = WindowsRuntimeMarshal.GetActivationFactory(typeof(GraphicsCaptureItem));
var interop = (IGraphicsCaptureItemInterop)factory;
var temp = typeof(GraphicsCaptureItem);
var itemPointer = interop.CreateForMonitor(hmon, GraphicsCaptureItemGuid);
var item = Marshal.GetObjectForIUnknown(itemPointer) as GraphicsCaptureItem;
Marshal.Release(itemPointer);
return item;
}
And this is Recording function
private async void RecordScreen(GraphicsCaptureItem item)
{
_device = Direct3D11Helpers.CreateDevice();
// Get our encoder properties
uint frameRate = 30;
uint bitrate = 3 * 1000000;
var width = (uint)item.Size.Width;
var height = (uint)item.Size.Height;
// Kick off the encoding
try
{
newFile = GetTempFile();
using (var stream = new FileStream(newFile, FileMode.CreateNew).AsRandomAccessStream())
using (_encoder = new Encoder(_device, item))
{
await _encoder.EncodeAsync(
stream,
width, height, bitrate,
frameRate);
}
}
catch (Exception ex)
{}
}
I achieved this by passing a custom region to CopySubresourceRegion in WaitForNewFrame method.
public SurfaceWithInfo WaitForNewFrame()
{
.....
using (var multithreadLock = new MultithreadLock(_multithread))
using (var sourceTexture = Direct3D11Helpers.CreateSharpDXTexture2D(_currentFrame.Surface))
{
.....
using (var copyTexture = new SharpDX.Direct3D11.Texture2D(_d3dDevice, description))
{
.....
var region = new SharpDX.Direct3D11.ResourceRegion(
_region.Left,
_region.Top,
0,
_region.Left + _region.Width,
_region.Top + _region.Height,
1
);
_d3dDevice.ImmediateContext.CopyResource(_blankTexture, copyTexture);
_d3dDevice.ImmediateContext.CopySubresourceRegion(sourceTexture, 0, region, copyTexture, 0);
result.Surface = Direct3D11Helpers.CreateDirect3DSurfaceFromSharpDXTexture(copyTexture);
}
}
....
}
Using C#, I have created video with WindowsMediaRenderer's Render() method as mentioned in below code, the created video is not working in web browser, it runs only in windows media player.
Can you please help me to resolve this issue ?
using (WindowsMediaRenderer renderer = new WindowsMediaRenderer(timeline, resultMobileVideoFile,
WindowsMediaProfiles.HighQualityVideo, null, null))
{
renderer.Render();
}
full code is
using (ITimeline timeline = new DefaultTimeline())
{
IGroup groupV = timeline.AddVideoGroup("groupV", 25, 32, 1280, 720);
IGroup groupA = timeline.AddAudioGroup("groupA", 25);
var firstClip = groupV.AddTrack().AddVideo(startingVideoFile);
groupA.AddTrack().AddAudio(audioFile, 0, 0, 5);
var secondClip = groupV.AddTrack().AddVideo(middleVideo, firstClip.Duration);
groupA.AddTrack().AddAudio(middleVideo, firstClip.Duration);
var fouthClip = groupV.AddTrack().AddVideo(endingVideoFile, firstClip.Duration + secondClip.Duration);
groupA.AddTrack().AddAudio(audioFile, firstClip.Duration + secondClip.Duration, 0, 5);
using (WindowsMediaRenderer renderer = new WindowsMediaRenderer(timeline, resultMobileVideoFile,
WindowsMediaProfiles.HighQualityVideo, null, null))
{
renderer.Render();
}
}
The following question answers how to resize a printscreen taken with SharpDX by a power of two Resizing a DXGI Resource or Texture2D in SharpDX. I'm trying to resize the printscreen by a variable amount (e.g. 80% of original size - not necessarily a power of two). Right now I found "a way to make my goal work" by resizing the bitmap generated by the printscreen. I achieve this by first converting into a WicImage:
private void button1_Click(object sender, EventArgs e)
{
Stopwatch stopWatchInstance = Stopwatch.StartNew();
//or Bitmap.save(new filestream)
var stream = File.OpenRead("c:\\test\\pc.png");
var test = DrawResizedImage(stream);
stopWatchInstance.Stop();
File.WriteAllBytes("c:\\test\\result.png", test.ToArray());
int previousCalculationTimeServer = (int)(stopWatchInstance.ElapsedMilliseconds % Int32.MaxValue);
}
MemoryStream DrawResizedImage(Stream fileName)
{
ImagingFactory wic = new WIC.ImagingFactory();
D2D.Factory d2d = new D2D.Factory();
FormatConverter image = CreateWicImage(wic, fileName);
var wicBitmap = new WIC.Bitmap(wic, image.Size.Width, image.Size.Height, WIC.PixelFormat.Format32bppPBGRA, WIC.BitmapCreateCacheOption.CacheOnDemand);
var target = new D2D.WicRenderTarget(d2d, wicBitmap, new D2D.RenderTargetProperties());
var bmpPicture = D2D.Bitmap.FromWicBitmap(target, image);
target.BeginDraw();
{
target.DrawBitmap(bmpPicture, new SharpDX.RectangleF(0, 0, target.Size.Width, target.Size.Height), 1.0f, D2D.BitmapInterpolationMode.Linear);
}
target.EndDraw();
var ms = new MemoryStream();
SaveD2DBitmap(wic, wicBitmap, ms);
return ms;
}
void SaveD2DBitmap(WIC.ImagingFactory wicFactory, WIC.Bitmap wicBitmap, Stream outputStream)
{
var encoder = new WIC.BitmapEncoder(wicFactory, WIC.ContainerFormatGuids.Png);
encoder.Initialize(outputStream);
var frame = new WIC.BitmapFrameEncode(encoder);
frame.Initialize();
frame.SetSize(wicBitmap.Size.Width, wicBitmap.Size.Height);
var pixelFormat = wicBitmap.PixelFormat;
frame.SetPixelFormat(ref pixelFormat);
frame.WriteSource(wicBitmap);
frame.Commit();
encoder.Commit();
}
WIC.FormatConverter CreateWicImage(WIC.ImagingFactory wicFactory, Stream stream)
{
var decoder = new WIC.PngBitmapDecoder(wicFactory);
var decodeStream = new WIC.WICStream(wicFactory, stream);
decoder.Initialize(decodeStream, WIC.DecodeOptions.CacheOnLoad);
var decodeFrame = decoder.GetFrame(0);
var scaler = new BitmapScaler(wicFactory);
scaler.Initialize(decodeFrame, 2000, 2000, SharpDX.WIC.BitmapInterpolationMode.Fant);
var test = (BitmapSource)scaler;
var converter = new WIC.FormatConverter(wicFactory);
converter.Initialize(test, WIC.PixelFormat.Format32bppPBGRA);
return converter;
}
Upon clicking on button, the above code resizes a bitmap (containing the printscreen) to 2000x2000. However, the above code is very slow, it takes about 200ms (not taking into account the fileread and filewrite time). I use BitmapScaler to do the resizing.
Does anyone know how to variably resize the output produced from the Resizing a DXGI Resource or Texture2D in SharpDX question, so the resizing becomes much faster? I tried to look for documentation to apply bitmapscaler directly to any of the objects in the answered code, but didn't succeed.
I've uploaded the above code can be found as a small Visual Studio Project which compiles
Here is a rewritten and commented version of your program that gets a video frame from the desktop using DXGI's Output Duplication, resizes it using any ratio using Direct2D, and saves it to a .jpeg file using WIC.
It works only in the GPU until the image is saved to a file (stream) using WIC. On my PC, I get something like 10-15 ms for the capture and resize, 30-40 ms for WIC save to file.
I've not used the D2D Scale effect I talked about in my comment because the ID2D1DeviceContext::DrawBitmap method can do resize that with various interpolation factors, without using any effect. But you can use the same code to apply Hardware accelerated effects.
Note some objects I create and dispose in button1_Click could be created in the constructor (like factories, etc.) and reused.
using System;
using System.Windows.Forms;
using System.IO;
using DXGI = SharpDX.DXGI;
using D3D11 = SharpDX.Direct3D11;
using D2D = SharpDX.Direct2D1;
using WIC = SharpDX.WIC;
using Interop = SharpDX.Mathematics.Interop;
namespace WindowsFormsApp1
{
public partial class Form1 : Form
{
private readonly D3D11.Device _device;
private readonly DXGI.OutputDuplication _outputDuplication;
public Form1()
{
InitializeComponent();
var adapterIndex = 0; // adapter index
var outputIndex = 0; // output index
using (var dxgiFactory = new DXGI.Factory1())
using (var dxgiAdapter = dxgiFactory.GetAdapter1(adapterIndex))
using (var output = dxgiAdapter.GetOutput(outputIndex))
using (var dxgiOutput = output.QueryInterface<DXGI.Output1>())
{
_device = new D3D11.Device(dxgiAdapter,
#if DEBUG
D3D11.DeviceCreationFlags.Debug |
#endif
D3D11.DeviceCreationFlags.BgraSupport); // for D2D support
_outputDuplication = dxgiOutput.DuplicateOutput(_device);
}
}
protected override void Dispose(bool disposing) // remove from Designer.cs
{
if (disposing && components != null)
{
components.Dispose();
_outputDuplication?.Dispose();
_device?.Dispose();
}
base.Dispose(disposing);
}
private void button1_Click(object sender, EventArgs e)
{
var ratio = 0.8; // resize ratio
using (var dxgiDevice = _device.QueryInterface<DXGI.Device>())
using (var d2dFactory = new D2D.Factory1())
using (var d2dDevice = new D2D.Device(d2dFactory, dxgiDevice))
{
// acquire frame
_outputDuplication.AcquireNextFrame(10000, out var _, out var frame);
using (frame)
{
// get DXGI surface/bitmap from resource
using (var frameDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None))
using (var frameSurface = frame.QueryInterface<DXGI.Surface>())
using (var frameBitmap = new D2D.Bitmap1(frameDc, frameSurface))
{
// create a GPU resized texture/surface/bitmap
var desc = new D3D11.Texture2DDescription
{
CpuAccessFlags = D3D11.CpuAccessFlags.None, // only GPU
BindFlags = D3D11.BindFlags.RenderTarget, // to use D2D
Format = DXGI.Format.B8G8R8A8_UNorm,
Width = (int)(frameSurface.Description.Width * ratio),
Height = (int)(frameSurface.Description.Height * ratio),
OptionFlags = D3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = D3D11.ResourceUsage.Default
};
using (var texture = new D3D11.Texture2D(_device, desc))
using (var textureDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None)) // create a D2D device context
using (var textureSurface = texture.QueryInterface<DXGI.Surface>()) // this texture is a DXGI surface
using (var textureBitmap = new D2D.Bitmap1(textureDc, textureSurface)) // we can create a GPU bitmap on a DXGI surface
{
// associate the DC with the GPU texture/surface/bitmap
textureDc.Target = textureBitmap;
// this is were we draw on the GPU texture/surface
textureDc.BeginDraw();
// this will automatically resize
textureDc.DrawBitmap(
frameBitmap,
new Interop.RawRectangleF(0, 0, desc.Width, desc.Height),
1,
D2D.InterpolationMode.HighQualityCubic, // change this for quality vs speed
null,
null);
// commit draw
textureDc.EndDraw();
// now save the file, create a WIC (jpeg) encoder
using (var file = File.OpenWrite("test.jpg"))
using (var wic = new WIC.ImagingFactory2())
using (var jpegEncoder = new WIC.BitmapEncoder(wic, WIC.ContainerFormatGuids.Jpeg))
{
jpegEncoder.Initialize(file);
using (var jpegFrame = new WIC.BitmapFrameEncode(jpegEncoder))
{
jpegFrame.Initialize();
// here we use the ImageEncoder (IWICImageEncoder)
// that can write any D2D bitmap directly
using (var imageEncoder = new WIC.ImageEncoder(wic, d2dDevice))
{
imageEncoder.WriteFrame(textureBitmap, jpegFrame, new WIC.ImageParameters(
new D2D.PixelFormat(desc.Format, D2D.AlphaMode.Premultiplied),
textureDc.DotsPerInch.Width,
textureDc.DotsPerInch.Height,
0,
0,
desc.Width,
desc.Height));
}
// commit
jpegFrame.Commit();
jpegEncoder.Commit();
}
}
}
}
}
_outputDuplication.ReleaseFrame();
}
}
}
}
The following question answers how to resize a printscreen taken with SharpDX by a power of two Resizing a DXGI Resource or Texture2D in SharpDX. I'm trying to resize the printscreen by a variable amount (e.g. 80% of original size - not necessarily a power of two). Right now I found "a way to make my goal work" by resizing the bitmap generated by the printscreen. I achieve this by first converting into a WicImage:
private void button1_Click(object sender, EventArgs e)
{
Stopwatch stopWatchInstance = Stopwatch.StartNew();
//or Bitmap.save(new filestream)
var stream = File.OpenRead("c:\\test\\pc.png");
var test = DrawResizedImage(stream);
stopWatchInstance.Stop();
File.WriteAllBytes("c:\\test\\result.png", test.ToArray());
int previousCalculationTimeServer = (int)(stopWatchInstance.ElapsedMilliseconds % Int32.MaxValue);
}
MemoryStream DrawResizedImage(Stream fileName)
{
ImagingFactory wic = new WIC.ImagingFactory();
D2D.Factory d2d = new D2D.Factory();
FormatConverter image = CreateWicImage(wic, fileName);
var wicBitmap = new WIC.Bitmap(wic, image.Size.Width, image.Size.Height, WIC.PixelFormat.Format32bppPBGRA, WIC.BitmapCreateCacheOption.CacheOnDemand);
var target = new D2D.WicRenderTarget(d2d, wicBitmap, new D2D.RenderTargetProperties());
var bmpPicture = D2D.Bitmap.FromWicBitmap(target, image);
target.BeginDraw();
{
target.DrawBitmap(bmpPicture, new SharpDX.RectangleF(0, 0, target.Size.Width, target.Size.Height), 1.0f, D2D.BitmapInterpolationMode.Linear);
}
target.EndDraw();
var ms = new MemoryStream();
SaveD2DBitmap(wic, wicBitmap, ms);
return ms;
}
void SaveD2DBitmap(WIC.ImagingFactory wicFactory, WIC.Bitmap wicBitmap, Stream outputStream)
{
var encoder = new WIC.BitmapEncoder(wicFactory, WIC.ContainerFormatGuids.Png);
encoder.Initialize(outputStream);
var frame = new WIC.BitmapFrameEncode(encoder);
frame.Initialize();
frame.SetSize(wicBitmap.Size.Width, wicBitmap.Size.Height);
var pixelFormat = wicBitmap.PixelFormat;
frame.SetPixelFormat(ref pixelFormat);
frame.WriteSource(wicBitmap);
frame.Commit();
encoder.Commit();
}
WIC.FormatConverter CreateWicImage(WIC.ImagingFactory wicFactory, Stream stream)
{
var decoder = new WIC.PngBitmapDecoder(wicFactory);
var decodeStream = new WIC.WICStream(wicFactory, stream);
decoder.Initialize(decodeStream, WIC.DecodeOptions.CacheOnLoad);
var decodeFrame = decoder.GetFrame(0);
var scaler = new BitmapScaler(wicFactory);
scaler.Initialize(decodeFrame, 2000, 2000, SharpDX.WIC.BitmapInterpolationMode.Fant);
var test = (BitmapSource)scaler;
var converter = new WIC.FormatConverter(wicFactory);
converter.Initialize(test, WIC.PixelFormat.Format32bppPBGRA);
return converter;
}
Upon clicking on button, the above code resizes a bitmap (containing the printscreen) to 2000x2000. However, the above code is very slow, it takes about 200ms (not taking into account the fileread and filewrite time). I use BitmapScaler to do the resizing.
Does anyone know how to variably resize the output produced from the Resizing a DXGI Resource or Texture2D in SharpDX question, so the resizing becomes much faster? I tried to look for documentation to apply bitmapscaler directly to any of the objects in the answered code, but didn't succeed.
I've uploaded the above code can be found as a small Visual Studio Project which compiles
Here is a rewritten and commented version of your program that gets a video frame from the desktop using DXGI's Output Duplication, resizes it using any ratio using Direct2D, and saves it to a .jpeg file using WIC.
It works only in the GPU until the image is saved to a file (stream) using WIC. On my PC, I get something like 10-15 ms for the capture and resize, 30-40 ms for WIC save to file.
I've not used the D2D Scale effect I talked about in my comment because the ID2D1DeviceContext::DrawBitmap method can do resize that with various interpolation factors, without using any effect. But you can use the same code to apply Hardware accelerated effects.
Note some objects I create and dispose in button1_Click could be created in the constructor (like factories, etc.) and reused.
using System;
using System.Windows.Forms;
using System.IO;
using DXGI = SharpDX.DXGI;
using D3D11 = SharpDX.Direct3D11;
using D2D = SharpDX.Direct2D1;
using WIC = SharpDX.WIC;
using Interop = SharpDX.Mathematics.Interop;
namespace WindowsFormsApp1
{
public partial class Form1 : Form
{
private readonly D3D11.Device _device;
private readonly DXGI.OutputDuplication _outputDuplication;
public Form1()
{
InitializeComponent();
var adapterIndex = 0; // adapter index
var outputIndex = 0; // output index
using (var dxgiFactory = new DXGI.Factory1())
using (var dxgiAdapter = dxgiFactory.GetAdapter1(adapterIndex))
using (var output = dxgiAdapter.GetOutput(outputIndex))
using (var dxgiOutput = output.QueryInterface<DXGI.Output1>())
{
_device = new D3D11.Device(dxgiAdapter,
#if DEBUG
D3D11.DeviceCreationFlags.Debug |
#endif
D3D11.DeviceCreationFlags.BgraSupport); // for D2D support
_outputDuplication = dxgiOutput.DuplicateOutput(_device);
}
}
protected override void Dispose(bool disposing) // remove from Designer.cs
{
if (disposing && components != null)
{
components.Dispose();
_outputDuplication?.Dispose();
_device?.Dispose();
}
base.Dispose(disposing);
}
private void button1_Click(object sender, EventArgs e)
{
var ratio = 0.8; // resize ratio
using (var dxgiDevice = _device.QueryInterface<DXGI.Device>())
using (var d2dFactory = new D2D.Factory1())
using (var d2dDevice = new D2D.Device(d2dFactory, dxgiDevice))
{
// acquire frame
_outputDuplication.AcquireNextFrame(10000, out var _, out var frame);
using (frame)
{
// get DXGI surface/bitmap from resource
using (var frameDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None))
using (var frameSurface = frame.QueryInterface<DXGI.Surface>())
using (var frameBitmap = new D2D.Bitmap1(frameDc, frameSurface))
{
// create a GPU resized texture/surface/bitmap
var desc = new D3D11.Texture2DDescription
{
CpuAccessFlags = D3D11.CpuAccessFlags.None, // only GPU
BindFlags = D3D11.BindFlags.RenderTarget, // to use D2D
Format = DXGI.Format.B8G8R8A8_UNorm,
Width = (int)(frameSurface.Description.Width * ratio),
Height = (int)(frameSurface.Description.Height * ratio),
OptionFlags = D3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = D3D11.ResourceUsage.Default
};
using (var texture = new D3D11.Texture2D(_device, desc))
using (var textureDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None)) // create a D2D device context
using (var textureSurface = texture.QueryInterface<DXGI.Surface>()) // this texture is a DXGI surface
using (var textureBitmap = new D2D.Bitmap1(textureDc, textureSurface)) // we can create a GPU bitmap on a DXGI surface
{
// associate the DC with the GPU texture/surface/bitmap
textureDc.Target = textureBitmap;
// this is were we draw on the GPU texture/surface
textureDc.BeginDraw();
// this will automatically resize
textureDc.DrawBitmap(
frameBitmap,
new Interop.RawRectangleF(0, 0, desc.Width, desc.Height),
1,
D2D.InterpolationMode.HighQualityCubic, // change this for quality vs speed
null,
null);
// commit draw
textureDc.EndDraw();
// now save the file, create a WIC (jpeg) encoder
using (var file = File.OpenWrite("test.jpg"))
using (var wic = new WIC.ImagingFactory2())
using (var jpegEncoder = new WIC.BitmapEncoder(wic, WIC.ContainerFormatGuids.Jpeg))
{
jpegEncoder.Initialize(file);
using (var jpegFrame = new WIC.BitmapFrameEncode(jpegEncoder))
{
jpegFrame.Initialize();
// here we use the ImageEncoder (IWICImageEncoder)
// that can write any D2D bitmap directly
using (var imageEncoder = new WIC.ImageEncoder(wic, d2dDevice))
{
imageEncoder.WriteFrame(textureBitmap, jpegFrame, new WIC.ImageParameters(
new D2D.PixelFormat(desc.Format, D2D.AlphaMode.Premultiplied),
textureDc.DotsPerInch.Width,
textureDc.DotsPerInch.Height,
0,
0,
desc.Width,
desc.Height));
}
// commit
jpegFrame.Commit();
jpegEncoder.Commit();
}
}
}
}
}
_outputDuplication.ReleaseFrame();
}
}
}
}
I'm trying to find a working sample to record videos with IOS (using xamarin) but there's always something missing or not working for me.
My best try using several forum posts and samples is the following :
using System;
using CoreGraphics;
using Foundation;
using UIKit;
using AVFoundation;
using CoreVideo;
using CoreMedia;
using CoreFoundation;
using System.IO;
using AssetsLibrary;
namespace avcaptureframes {
public partial class AppDelegate : UIApplicationDelegate {
public static UIImageView ImageView;
UIViewController vc;
AVCaptureSession session;
OutputRecorder outputRecorder;
DispatchQueue queue;
public override bool FinishedLaunching (UIApplication application, NSDictionary launchOptions)
{
ImageView = new UIImageView (new CGRect (10f, 10f, 200f, 200f));
ImageView.ContentMode = UIViewContentMode.Top;
vc = new UIViewController {
View = ImageView
};
window.RootViewController = vc;
window.MakeKeyAndVisible ();
window.BackgroundColor = UIColor.Black;
if (!SetupCaptureSession ())
window.AddSubview (new UILabel (new CGRect (20f, 20f, 200f, 60f)) {
Text = "No input device"
});
return true;
}
bool SetupCaptureSession ()
{
// configure the capture session for low resolution, change this if your code
// can cope with more data or volume
session = new AVCaptureSession {
SessionPreset = AVCaptureSession.PresetMedium
};
// create a device input and attach it to the session
var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video);
if (captureDevice == null) {
Console.WriteLine ("No captureDevice - this won't work on the simulator, try a physical device");
return false;
}
//Configure for 15 FPS. Note use of LockForConigfuration()/UnlockForConfiguration()
NSError error = null;
captureDevice.LockForConfiguration (out error);
if (error != null) {
Console.WriteLine (error);
captureDevice.UnlockForConfiguration ();
return false;
}
if (UIDevice.CurrentDevice.CheckSystemVersion (7, 0))
captureDevice.ActiveVideoMinFrameDuration = new CMTime (1, 15);
captureDevice.UnlockForConfiguration ();
var input = AVCaptureDeviceInput.FromDevice (captureDevice);
if (input == null) {
Console.WriteLine ("No input - this won't work on the simulator, try a physical device");
return false;
}
session.AddInput (input);
// create a VideoDataOutput and add it to the sesion
var settings = new CVPixelBufferAttributes {
PixelFormatType = CVPixelFormatType.CV32BGRA
};
using (var output = new AVCaptureVideoDataOutput { WeakVideoSettings = settings.Dictionary }) {
queue = new DispatchQueue ("myQueue");
outputRecorder = new OutputRecorder ();
output.SetSampleBufferDelegate (outputRecorder, queue);
session.AddOutput (output);
}
session.StartRunning ();
return true;
}
public override void OnActivated (UIApplication application)
{
}
public class OutputRecorder : AVCaptureVideoDataOutputSampleBufferDelegate
{
AVAssetWriter writer=null;
AVAssetWriterInput writerinput= null;
CMTime lastSampleTime;
int frame=0;
NSUrl url;
public OutputRecorder()
{
string tempFile = Path.Combine(Path.GetTempPath(), "NewVideo.mp4");
if (File.Exists(tempFile)) File.Delete(tempFile);
url = NSUrl.FromFilename(tempFile);
NSError assetWriterError;
writer = new AVAssetWriter(url, AVFileType.Mpeg4, out assetWriterError);
var outputSettings = new AVVideoSettingsCompressed()
{
Height = 300,
Width = 300,
Codec = AVVideoCodec.H264,
CodecSettings = new AVVideoCodecSettings
{
AverageBitRate = 1000000
}
};
writerinput = new AVAssetWriterInput(mediaType: AVMediaType.Video, outputSettings: outputSettings);
writerinput.ExpectsMediaDataInRealTime = false;
writer.AddInput(writerinput);
}
public override void DidOutputSampleBuffer (AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection)
{
try
{
lastSampleTime = sampleBuffer.PresentationTimeStamp;
var image = ImageFromSampleBuffer(sampleBuffer);
if (frame == 0)
{
writer.StartWriting();
writer.StartSessionAtSourceTime(lastSampleTime);
frame = 1;
}
String infoString = "";
if (writerinput.ReadyForMoreMediaData)
{
if (!writerinput.AppendSampleBuffer(sampleBuffer))
{
infoString = "Failed to append sample buffer";
}
else
{
infoString = String.Format("{0} frames captured", frame++);
}
}
else
{
infoString = "Writer not ready";
}
Console.WriteLine(infoString);
ImageView.BeginInvokeOnMainThread(() => ImageView.Image = image);
}
catch (Exception e)
{
Console.WriteLine(e);
}
finally
{
sampleBuffer.Dispose();
}
}
UIImage ImageFromSampleBuffer (CMSampleBuffer sampleBuffer)
{
// Get the CoreVideo image
using (var pixelBuffer = sampleBuffer.GetImageBuffer () as CVPixelBuffer)
{
// Lock the base address
pixelBuffer.Lock (CVOptionFlags.None);
// Get the number of bytes per row for the pixel buffer
var baseAddress = pixelBuffer.BaseAddress;
var bytesPerRow = (int)pixelBuffer.BytesPerRow;
var width = (int)pixelBuffer.Width;
var height = (int)pixelBuffer.Height;
var flags = CGBitmapFlags.PremultipliedFirst | CGBitmapFlags.ByteOrder32Little;
// Create a CGImage on the RGB colorspace from the configured parameter above
using (var cs = CGColorSpace.CreateDeviceRGB ())
{
using (var context = new CGBitmapContext (baseAddress, width, height, 8, bytesPerRow, cs, (CGImageAlphaInfo)flags))
{
using (CGImage cgImage = context.ToImage ())
{
pixelBuffer.Unlock (CVOptionFlags.None);
return UIImage.FromImage (cgImage);
}
}
}
}
}
void TryDispose (IDisposable obj)
{
if (obj != null)
obj.Dispose ();
}
}
}
}
This works displaying live camera image and I get "frames captured" message in consol but I don't find how to record to file.
I read somewhere about adding VideoCapture but I don't know how to link with my code.
Any help will is welcome.
From your code, in the construct of class OutputRecorder you have defined the url where you want to save the recording:
string tempFile = Path.Combine(Path.GetTempPath(), "NewVideo.mp4");
if (File.Exists(tempFile)) File.Delete(tempFile);
url = NSUrl.FromFilename(tempFile);
It means you want to save the video in the tmp folder in the app's sandbox. If you want to use the video sometime later, I recommend you to change the folder to documents by using:
string filePath = Path.Combine(NSSearchPath.GetDirectories(NSSearchPathDirectory.DocumentDirectory, NSSearchPathDomain.User)[0], "NewVideo.mp4");
I notice that you have called session.StartRunning(); in the method bool SetupCaptureSession() to start recording. please add session.StopRunning(); to end recording then the video will be saved in the path we just defined above.
Moreover, you can retrieve the video with the path like:
NSData videoData = NSData.FromFile(filePath);