The following question answers how to resize a printscreen taken with SharpDX by a power of two Resizing a DXGI Resource or Texture2D in SharpDX. I'm trying to resize the printscreen by a variable amount (e.g. 80% of original size - not necessarily a power of two). Right now I found "a way to make my goal work" by resizing the bitmap generated by the printscreen. I achieve this by first converting into a WicImage:
private void button1_Click(object sender, EventArgs e)
{
Stopwatch stopWatchInstance = Stopwatch.StartNew();
//or Bitmap.save(new filestream)
var stream = File.OpenRead("c:\\test\\pc.png");
var test = DrawResizedImage(stream);
stopWatchInstance.Stop();
File.WriteAllBytes("c:\\test\\result.png", test.ToArray());
int previousCalculationTimeServer = (int)(stopWatchInstance.ElapsedMilliseconds % Int32.MaxValue);
}
MemoryStream DrawResizedImage(Stream fileName)
{
ImagingFactory wic = new WIC.ImagingFactory();
D2D.Factory d2d = new D2D.Factory();
FormatConverter image = CreateWicImage(wic, fileName);
var wicBitmap = new WIC.Bitmap(wic, image.Size.Width, image.Size.Height, WIC.PixelFormat.Format32bppPBGRA, WIC.BitmapCreateCacheOption.CacheOnDemand);
var target = new D2D.WicRenderTarget(d2d, wicBitmap, new D2D.RenderTargetProperties());
var bmpPicture = D2D.Bitmap.FromWicBitmap(target, image);
target.BeginDraw();
{
target.DrawBitmap(bmpPicture, new SharpDX.RectangleF(0, 0, target.Size.Width, target.Size.Height), 1.0f, D2D.BitmapInterpolationMode.Linear);
}
target.EndDraw();
var ms = new MemoryStream();
SaveD2DBitmap(wic, wicBitmap, ms);
return ms;
}
void SaveD2DBitmap(WIC.ImagingFactory wicFactory, WIC.Bitmap wicBitmap, Stream outputStream)
{
var encoder = new WIC.BitmapEncoder(wicFactory, WIC.ContainerFormatGuids.Png);
encoder.Initialize(outputStream);
var frame = new WIC.BitmapFrameEncode(encoder);
frame.Initialize();
frame.SetSize(wicBitmap.Size.Width, wicBitmap.Size.Height);
var pixelFormat = wicBitmap.PixelFormat;
frame.SetPixelFormat(ref pixelFormat);
frame.WriteSource(wicBitmap);
frame.Commit();
encoder.Commit();
}
WIC.FormatConverter CreateWicImage(WIC.ImagingFactory wicFactory, Stream stream)
{
var decoder = new WIC.PngBitmapDecoder(wicFactory);
var decodeStream = new WIC.WICStream(wicFactory, stream);
decoder.Initialize(decodeStream, WIC.DecodeOptions.CacheOnLoad);
var decodeFrame = decoder.GetFrame(0);
var scaler = new BitmapScaler(wicFactory);
scaler.Initialize(decodeFrame, 2000, 2000, SharpDX.WIC.BitmapInterpolationMode.Fant);
var test = (BitmapSource)scaler;
var converter = new WIC.FormatConverter(wicFactory);
converter.Initialize(test, WIC.PixelFormat.Format32bppPBGRA);
return converter;
}
Upon clicking on button, the above code resizes a bitmap (containing the printscreen) to 2000x2000. However, the above code is very slow, it takes about 200ms (not taking into account the fileread and filewrite time). I use BitmapScaler to do the resizing.
Does anyone know how to variably resize the output produced from the Resizing a DXGI Resource or Texture2D in SharpDX question, so the resizing becomes much faster? I tried to look for documentation to apply bitmapscaler directly to any of the objects in the answered code, but didn't succeed.
I've uploaded the above code can be found as a small Visual Studio Project which compiles
Here is a rewritten and commented version of your program that gets a video frame from the desktop using DXGI's Output Duplication, resizes it using any ratio using Direct2D, and saves it to a .jpeg file using WIC.
It works only in the GPU until the image is saved to a file (stream) using WIC. On my PC, I get something like 10-15 ms for the capture and resize, 30-40 ms for WIC save to file.
I've not used the D2D Scale effect I talked about in my comment because the ID2D1DeviceContext::DrawBitmap method can do resize that with various interpolation factors, without using any effect. But you can use the same code to apply Hardware accelerated effects.
Note some objects I create and dispose in button1_Click could be created in the constructor (like factories, etc.) and reused.
using System;
using System.Windows.Forms;
using System.IO;
using DXGI = SharpDX.DXGI;
using D3D11 = SharpDX.Direct3D11;
using D2D = SharpDX.Direct2D1;
using WIC = SharpDX.WIC;
using Interop = SharpDX.Mathematics.Interop;
namespace WindowsFormsApp1
{
public partial class Form1 : Form
{
private readonly D3D11.Device _device;
private readonly DXGI.OutputDuplication _outputDuplication;
public Form1()
{
InitializeComponent();
var adapterIndex = 0; // adapter index
var outputIndex = 0; // output index
using (var dxgiFactory = new DXGI.Factory1())
using (var dxgiAdapter = dxgiFactory.GetAdapter1(adapterIndex))
using (var output = dxgiAdapter.GetOutput(outputIndex))
using (var dxgiOutput = output.QueryInterface<DXGI.Output1>())
{
_device = new D3D11.Device(dxgiAdapter,
#if DEBUG
D3D11.DeviceCreationFlags.Debug |
#endif
D3D11.DeviceCreationFlags.BgraSupport); // for D2D support
_outputDuplication = dxgiOutput.DuplicateOutput(_device);
}
}
protected override void Dispose(bool disposing) // remove from Designer.cs
{
if (disposing && components != null)
{
components.Dispose();
_outputDuplication?.Dispose();
_device?.Dispose();
}
base.Dispose(disposing);
}
private void button1_Click(object sender, EventArgs e)
{
var ratio = 0.8; // resize ratio
using (var dxgiDevice = _device.QueryInterface<DXGI.Device>())
using (var d2dFactory = new D2D.Factory1())
using (var d2dDevice = new D2D.Device(d2dFactory, dxgiDevice))
{
// acquire frame
_outputDuplication.AcquireNextFrame(10000, out var _, out var frame);
using (frame)
{
// get DXGI surface/bitmap from resource
using (var frameDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None))
using (var frameSurface = frame.QueryInterface<DXGI.Surface>())
using (var frameBitmap = new D2D.Bitmap1(frameDc, frameSurface))
{
// create a GPU resized texture/surface/bitmap
var desc = new D3D11.Texture2DDescription
{
CpuAccessFlags = D3D11.CpuAccessFlags.None, // only GPU
BindFlags = D3D11.BindFlags.RenderTarget, // to use D2D
Format = DXGI.Format.B8G8R8A8_UNorm,
Width = (int)(frameSurface.Description.Width * ratio),
Height = (int)(frameSurface.Description.Height * ratio),
OptionFlags = D3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = D3D11.ResourceUsage.Default
};
using (var texture = new D3D11.Texture2D(_device, desc))
using (var textureDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None)) // create a D2D device context
using (var textureSurface = texture.QueryInterface<DXGI.Surface>()) // this texture is a DXGI surface
using (var textureBitmap = new D2D.Bitmap1(textureDc, textureSurface)) // we can create a GPU bitmap on a DXGI surface
{
// associate the DC with the GPU texture/surface/bitmap
textureDc.Target = textureBitmap;
// this is were we draw on the GPU texture/surface
textureDc.BeginDraw();
// this will automatically resize
textureDc.DrawBitmap(
frameBitmap,
new Interop.RawRectangleF(0, 0, desc.Width, desc.Height),
1,
D2D.InterpolationMode.HighQualityCubic, // change this for quality vs speed
null,
null);
// commit draw
textureDc.EndDraw();
// now save the file, create a WIC (jpeg) encoder
using (var file = File.OpenWrite("test.jpg"))
using (var wic = new WIC.ImagingFactory2())
using (var jpegEncoder = new WIC.BitmapEncoder(wic, WIC.ContainerFormatGuids.Jpeg))
{
jpegEncoder.Initialize(file);
using (var jpegFrame = new WIC.BitmapFrameEncode(jpegEncoder))
{
jpegFrame.Initialize();
// here we use the ImageEncoder (IWICImageEncoder)
// that can write any D2D bitmap directly
using (var imageEncoder = new WIC.ImageEncoder(wic, d2dDevice))
{
imageEncoder.WriteFrame(textureBitmap, jpegFrame, new WIC.ImageParameters(
new D2D.PixelFormat(desc.Format, D2D.AlphaMode.Premultiplied),
textureDc.DotsPerInch.Width,
textureDc.DotsPerInch.Height,
0,
0,
desc.Width,
desc.Height));
}
// commit
jpegFrame.Commit();
jpegEncoder.Commit();
}
}
}
}
}
_outputDuplication.ReleaseFrame();
}
}
}
}
Related
I am building a screen recording app in C# using Windows Graphics Capture API. I am using this script. I can select monitor and can record it to mp4 file. I can also select a Window and record it too. But how can we record a region with this? Ideally I need to give x,y coordinates along with width and height to record that specific region.
Here are the functions which return GraphicsCaptureItem for Window or Monitor hwnd, which can be used to record.
public static GraphicsCaptureItem CreateItemForWindow(IntPtr hwnd)
{
var factory = WindowsRuntimeMarshal.GetActivationFactory(typeof(GraphicsCaptureItem));
var interop = (IGraphicsCaptureItemInterop)factory;
var temp = typeof(GraphicsCaptureItem);
var itemPointer = interop.CreateForWindow(hwnd, GraphicsCaptureItemGuid);
var item = Marshal.GetObjectForIUnknown(itemPointer) as GraphicsCaptureItem;
Marshal.Release(itemPointer);
return item;
}
public static GraphicsCaptureItem CreateItemForMonitor(IntPtr hmon)
{
var factory = WindowsRuntimeMarshal.GetActivationFactory(typeof(GraphicsCaptureItem));
var interop = (IGraphicsCaptureItemInterop)factory;
var temp = typeof(GraphicsCaptureItem);
var itemPointer = interop.CreateForMonitor(hmon, GraphicsCaptureItemGuid);
var item = Marshal.GetObjectForIUnknown(itemPointer) as GraphicsCaptureItem;
Marshal.Release(itemPointer);
return item;
}
And this is Recording function
private async void RecordScreen(GraphicsCaptureItem item)
{
_device = Direct3D11Helpers.CreateDevice();
// Get our encoder properties
uint frameRate = 30;
uint bitrate = 3 * 1000000;
var width = (uint)item.Size.Width;
var height = (uint)item.Size.Height;
// Kick off the encoding
try
{
newFile = GetTempFile();
using (var stream = new FileStream(newFile, FileMode.CreateNew).AsRandomAccessStream())
using (_encoder = new Encoder(_device, item))
{
await _encoder.EncodeAsync(
stream,
width, height, bitrate,
frameRate);
}
}
catch (Exception ex)
{}
}
I achieved this by passing a custom region to CopySubresourceRegion in WaitForNewFrame method.
public SurfaceWithInfo WaitForNewFrame()
{
.....
using (var multithreadLock = new MultithreadLock(_multithread))
using (var sourceTexture = Direct3D11Helpers.CreateSharpDXTexture2D(_currentFrame.Surface))
{
.....
using (var copyTexture = new SharpDX.Direct3D11.Texture2D(_d3dDevice, description))
{
.....
var region = new SharpDX.Direct3D11.ResourceRegion(
_region.Left,
_region.Top,
0,
_region.Left + _region.Width,
_region.Top + _region.Height,
1
);
_d3dDevice.ImmediateContext.CopyResource(_blankTexture, copyTexture);
_d3dDevice.ImmediateContext.CopySubresourceRegion(sourceTexture, 0, region, copyTexture, 0);
result.Surface = Direct3D11Helpers.CreateDirect3DSurfaceFromSharpDXTexture(copyTexture);
}
}
....
}
The following question answers how to resize a printscreen taken with SharpDX by a power of two Resizing a DXGI Resource or Texture2D in SharpDX. I'm trying to resize the printscreen by a variable amount (e.g. 80% of original size - not necessarily a power of two). Right now I found "a way to make my goal work" by resizing the bitmap generated by the printscreen. I achieve this by first converting into a WicImage:
private void button1_Click(object sender, EventArgs e)
{
Stopwatch stopWatchInstance = Stopwatch.StartNew();
//or Bitmap.save(new filestream)
var stream = File.OpenRead("c:\\test\\pc.png");
var test = DrawResizedImage(stream);
stopWatchInstance.Stop();
File.WriteAllBytes("c:\\test\\result.png", test.ToArray());
int previousCalculationTimeServer = (int)(stopWatchInstance.ElapsedMilliseconds % Int32.MaxValue);
}
MemoryStream DrawResizedImage(Stream fileName)
{
ImagingFactory wic = new WIC.ImagingFactory();
D2D.Factory d2d = new D2D.Factory();
FormatConverter image = CreateWicImage(wic, fileName);
var wicBitmap = new WIC.Bitmap(wic, image.Size.Width, image.Size.Height, WIC.PixelFormat.Format32bppPBGRA, WIC.BitmapCreateCacheOption.CacheOnDemand);
var target = new D2D.WicRenderTarget(d2d, wicBitmap, new D2D.RenderTargetProperties());
var bmpPicture = D2D.Bitmap.FromWicBitmap(target, image);
target.BeginDraw();
{
target.DrawBitmap(bmpPicture, new SharpDX.RectangleF(0, 0, target.Size.Width, target.Size.Height), 1.0f, D2D.BitmapInterpolationMode.Linear);
}
target.EndDraw();
var ms = new MemoryStream();
SaveD2DBitmap(wic, wicBitmap, ms);
return ms;
}
void SaveD2DBitmap(WIC.ImagingFactory wicFactory, WIC.Bitmap wicBitmap, Stream outputStream)
{
var encoder = new WIC.BitmapEncoder(wicFactory, WIC.ContainerFormatGuids.Png);
encoder.Initialize(outputStream);
var frame = new WIC.BitmapFrameEncode(encoder);
frame.Initialize();
frame.SetSize(wicBitmap.Size.Width, wicBitmap.Size.Height);
var pixelFormat = wicBitmap.PixelFormat;
frame.SetPixelFormat(ref pixelFormat);
frame.WriteSource(wicBitmap);
frame.Commit();
encoder.Commit();
}
WIC.FormatConverter CreateWicImage(WIC.ImagingFactory wicFactory, Stream stream)
{
var decoder = new WIC.PngBitmapDecoder(wicFactory);
var decodeStream = new WIC.WICStream(wicFactory, stream);
decoder.Initialize(decodeStream, WIC.DecodeOptions.CacheOnLoad);
var decodeFrame = decoder.GetFrame(0);
var scaler = new BitmapScaler(wicFactory);
scaler.Initialize(decodeFrame, 2000, 2000, SharpDX.WIC.BitmapInterpolationMode.Fant);
var test = (BitmapSource)scaler;
var converter = new WIC.FormatConverter(wicFactory);
converter.Initialize(test, WIC.PixelFormat.Format32bppPBGRA);
return converter;
}
Upon clicking on button, the above code resizes a bitmap (containing the printscreen) to 2000x2000. However, the above code is very slow, it takes about 200ms (not taking into account the fileread and filewrite time). I use BitmapScaler to do the resizing.
Does anyone know how to variably resize the output produced from the Resizing a DXGI Resource or Texture2D in SharpDX question, so the resizing becomes much faster? I tried to look for documentation to apply bitmapscaler directly to any of the objects in the answered code, but didn't succeed.
I've uploaded the above code can be found as a small Visual Studio Project which compiles
Here is a rewritten and commented version of your program that gets a video frame from the desktop using DXGI's Output Duplication, resizes it using any ratio using Direct2D, and saves it to a .jpeg file using WIC.
It works only in the GPU until the image is saved to a file (stream) using WIC. On my PC, I get something like 10-15 ms for the capture and resize, 30-40 ms for WIC save to file.
I've not used the D2D Scale effect I talked about in my comment because the ID2D1DeviceContext::DrawBitmap method can do resize that with various interpolation factors, without using any effect. But you can use the same code to apply Hardware accelerated effects.
Note some objects I create and dispose in button1_Click could be created in the constructor (like factories, etc.) and reused.
using System;
using System.Windows.Forms;
using System.IO;
using DXGI = SharpDX.DXGI;
using D3D11 = SharpDX.Direct3D11;
using D2D = SharpDX.Direct2D1;
using WIC = SharpDX.WIC;
using Interop = SharpDX.Mathematics.Interop;
namespace WindowsFormsApp1
{
public partial class Form1 : Form
{
private readonly D3D11.Device _device;
private readonly DXGI.OutputDuplication _outputDuplication;
public Form1()
{
InitializeComponent();
var adapterIndex = 0; // adapter index
var outputIndex = 0; // output index
using (var dxgiFactory = new DXGI.Factory1())
using (var dxgiAdapter = dxgiFactory.GetAdapter1(adapterIndex))
using (var output = dxgiAdapter.GetOutput(outputIndex))
using (var dxgiOutput = output.QueryInterface<DXGI.Output1>())
{
_device = new D3D11.Device(dxgiAdapter,
#if DEBUG
D3D11.DeviceCreationFlags.Debug |
#endif
D3D11.DeviceCreationFlags.BgraSupport); // for D2D support
_outputDuplication = dxgiOutput.DuplicateOutput(_device);
}
}
protected override void Dispose(bool disposing) // remove from Designer.cs
{
if (disposing && components != null)
{
components.Dispose();
_outputDuplication?.Dispose();
_device?.Dispose();
}
base.Dispose(disposing);
}
private void button1_Click(object sender, EventArgs e)
{
var ratio = 0.8; // resize ratio
using (var dxgiDevice = _device.QueryInterface<DXGI.Device>())
using (var d2dFactory = new D2D.Factory1())
using (var d2dDevice = new D2D.Device(d2dFactory, dxgiDevice))
{
// acquire frame
_outputDuplication.AcquireNextFrame(10000, out var _, out var frame);
using (frame)
{
// get DXGI surface/bitmap from resource
using (var frameDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None))
using (var frameSurface = frame.QueryInterface<DXGI.Surface>())
using (var frameBitmap = new D2D.Bitmap1(frameDc, frameSurface))
{
// create a GPU resized texture/surface/bitmap
var desc = new D3D11.Texture2DDescription
{
CpuAccessFlags = D3D11.CpuAccessFlags.None, // only GPU
BindFlags = D3D11.BindFlags.RenderTarget, // to use D2D
Format = DXGI.Format.B8G8R8A8_UNorm,
Width = (int)(frameSurface.Description.Width * ratio),
Height = (int)(frameSurface.Description.Height * ratio),
OptionFlags = D3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = D3D11.ResourceUsage.Default
};
using (var texture = new D3D11.Texture2D(_device, desc))
using (var textureDc = new D2D.DeviceContext(d2dDevice, D2D.DeviceContextOptions.None)) // create a D2D device context
using (var textureSurface = texture.QueryInterface<DXGI.Surface>()) // this texture is a DXGI surface
using (var textureBitmap = new D2D.Bitmap1(textureDc, textureSurface)) // we can create a GPU bitmap on a DXGI surface
{
// associate the DC with the GPU texture/surface/bitmap
textureDc.Target = textureBitmap;
// this is were we draw on the GPU texture/surface
textureDc.BeginDraw();
// this will automatically resize
textureDc.DrawBitmap(
frameBitmap,
new Interop.RawRectangleF(0, 0, desc.Width, desc.Height),
1,
D2D.InterpolationMode.HighQualityCubic, // change this for quality vs speed
null,
null);
// commit draw
textureDc.EndDraw();
// now save the file, create a WIC (jpeg) encoder
using (var file = File.OpenWrite("test.jpg"))
using (var wic = new WIC.ImagingFactory2())
using (var jpegEncoder = new WIC.BitmapEncoder(wic, WIC.ContainerFormatGuids.Jpeg))
{
jpegEncoder.Initialize(file);
using (var jpegFrame = new WIC.BitmapFrameEncode(jpegEncoder))
{
jpegFrame.Initialize();
// here we use the ImageEncoder (IWICImageEncoder)
// that can write any D2D bitmap directly
using (var imageEncoder = new WIC.ImageEncoder(wic, d2dDevice))
{
imageEncoder.WriteFrame(textureBitmap, jpegFrame, new WIC.ImageParameters(
new D2D.PixelFormat(desc.Format, D2D.AlphaMode.Premultiplied),
textureDc.DotsPerInch.Width,
textureDc.DotsPerInch.Height,
0,
0,
desc.Width,
desc.Height));
}
// commit
jpegFrame.Commit();
jpegEncoder.Commit();
}
}
}
}
}
_outputDuplication.ReleaseFrame();
}
}
}
}
Trying to make use of ID2D1SpriteBatch in direct2d to get performance boost over the regular DrawBitmap().
Managed to set it up but i get "The object was not in the correct state to process the method" when i call DeviceContext.EndDraw().
I can get the DeviceContext.DrawBitmap() to work (see commented out section). Tried everything i can think to get the device context in the right state to handle the spritebatch but no luck.
Tried to boil this sample down as much as possible but also didn't want to leave out any step just in case that was the culprit.
Any ideas how to get it to work?
using SharpDX;
using _d2d = SharpDX.Direct2D1;
using _d3d = SharpDX.Direct3D;
using _d3d11 = SharpDX.Direct3D11;
using _dxgi = SharpDX.DXGI;
using _directWrite = SharpDX.DirectWrite;
using _wic = SharpDX.WIC;
using SharpDX.Direct2D1;
using SharpDX.Direct3D;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Windows;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using SharpDX.IO;
using SharpDX.Mathematics.Interop;
namespace TestApp
{
public class SpriteBatchIssue
{
[STAThread]
static void Main(string[] args)
{
var app = new SpriteBatchIssue();
app.Run();
}
bool isClosed = false;
public void Run()
{
#region setup resources
var clientSize = new Size2(1000, 500);
var mainForm = new RenderForm();
mainForm.ClientSize = new System.Drawing.Size(
clientSize.Width,
clientSize.Height);
mainForm.FormClosed += mainForm_FormClosed;
var scDescription = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription =
new ModeDescription(
clientSize.Width,
clientSize.Height,
new Rational(60, 1),
Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = mainForm.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
// Create Device and SwapChain
_d3d11.Device d3d11Device;
SwapChain swapChain;
_d3d11.Device.CreateWithSwapChain(
DriverType.Hardware,
DeviceCreationFlags.BgraSupport,
new[] { _d3d.FeatureLevel.Level_12_1 },
scDescription,
out d3d11Device,
out swapChain);
// Ignore all windows events
var dxgiFactory = swapChain.GetParent<_dxgi.Factory1>();
dxgiFactory.MakeWindowAssociation(mainForm.Handle, WindowAssociationFlags.IgnoreAll);
// New RenderTargetView from the backbuffer
var backBuffer = Texture2D.FromSwapChain<Texture2D>(swapChain, 0);
var backBufferView = new RenderTargetView(d3d11Device, backBuffer);
var d2dFactory = new _d2d.Factory();
var d2dFactory4 = d2dFactory.QueryInterface<_d2d.Factory4>();
var dxgiDevice = d3d11Device.QueryInterface<_dxgi.Device>();
var d2dDevice3 = new _d2d.Device3(d2dFactory4, dxgiDevice);
var d2dDeviceContext3 = new _d2d.DeviceContext3(d2dDevice3, DeviceContextOptions.None);
using (var surface = backBuffer.QueryInterface<Surface>())
{
var bmpProperties = new BitmapProperties1(
new PixelFormat(Format.R8G8B8A8_UNorm, _d2d.AlphaMode.Premultiplied),
dpiX: 96,
dpiY: 96,
bitmapOptions: BitmapOptions.Target | BitmapOptions.CannotDraw);
var d2dTarget = new Bitmap1(
d2dDeviceContext3,
surface,
bmpProperties);
d2dDeviceContext3.Target = d2dTarget;
}
#endregion
#region setup drawing parameters
var bmp = createD2DBitmap(#"C:\yourPath\yourImage.png", d2dDeviceContext3);
var spriteBatch = new SpriteBatch(d2dDeviceContext3);
var destinationRects = new RawRectangleF[1];
destinationRects[0] = new RectangleF(100, 50, bmp.Size.Width, bmp.Size.Height);
var sourceRects = new RawRectangle[1];
sourceRects[0] = new RectangleF(0, 0, bmp.Size.Width, bmp.Size.Height);
var colors = new RawColor4[1];
colors[0] = Color.White;
var transforms = new RawMatrix3x2[1];
transforms[0] = Matrix3x2.Identity;
#endregion
#region mainLoop
RenderLoop.Run(mainForm, () =>
{
if (isClosed)
{
return;
}
d3d11Device.ImmediateContext.Rasterizer.SetViewport(new Viewport(0, 0, clientSize.Width, clientSize.Height));
d3d11Device.ImmediateContext.OutputMerger.SetTargets(backBufferView);
d2dDeviceContext3.BeginDraw();
//this technique works
//d2dDeviceContext3.DrawBitmap(
// bitmap: bmp,
// destinationRectangle: destinationRects[0],
// opacity: 1,
// interpolationMode: BitmapInterpolationMode.Linear,
// sourceRectangle: new RectangleF(0, 0, bmp.Size.Width, bmp.Size.Height));
//this technique does not work
spriteBatch.Clear();
spriteBatch.AddSprites(
1,
destinationRects,
sourceRects,
colors,
transforms,
destinationRectanglesStride: 0, //0 stride because there is only 1 element
sourceRectanglesStride: 0, //i've also tried using Marshal.SizeOf() to get the stride, but i get the same error
colorsStride: 0,
transformsStride: 0);
d2dDeviceContext3.DrawSpriteBatch(
spriteBatch: spriteBatch,
startIndex: 0,
spriteCount: 1,
bitmap: bmp,
interpolationMode: BitmapInterpolationMode.Linear,
spriteOptions: SpriteOptions.ClampToSourceRectangle);
//when using the spritebatch technique, this throws exception:
// "The object was not in the correct state to process the method"
d2dDeviceContext3.EndDraw();
//first param set to 1 would indicate waitVerticalBlanking
swapChain.Present(0, PresentFlags.None);
});
#endregion
}
Bitmap createD2DBitmap(string filePath, _d2d.DeviceContext deviceContext)
{
var imagingFactory = new _wic.ImagingFactory();
var fileStream = new NativeFileStream(
filePath,
NativeFileMode.Open,
NativeFileAccess.Read);
var bitmapDecoder = new _wic.BitmapDecoder(imagingFactory, fileStream, _wic.DecodeOptions.CacheOnDemand);
var frame = bitmapDecoder.GetFrame(0);
var converter = new _wic.FormatConverter(imagingFactory);
converter.Initialize(frame, SharpDX.WIC.PixelFormat.Format32bppPRGBA);
var newBitmap = SharpDX.Direct2D1.Bitmap1.FromWicBitmap(deviceContext, converter);
return newBitmap;
}
void mainForm_FormClosed(object sender, System.Windows.Forms.FormClosedEventArgs e)
{
isClosed = true;
}
}
}
The issue was that you can't use per primitive antialiasing with a spritebatch. This line before BeginDraw() fixed it: d2dDeviceContext3.AntialiasMode = AntialiasMode.Aliased;
Also learned finally how to get the debug layer working. Include debug flag when creating your device (see comments below). If it throws an exception, that is probably because you dont have the right version of windows sdk. If you're using visual studio, go to visual studio installer and modify your installation to include windows sdk.
Next you need to right click your project->properties->debug (on the left panel)-> check "enable native code debugging". After i did this there was a line written to the output window stating: "D2D DEBUG ERROR - DrawSpriteBatch requires that the antialias mode be set to D2D1_ANTIALIAS_MODE_ALIASED."
Something else i learned that isn't directly related to the answer but worth noting: "Note that ComObject in SharpDX is not disposed by the .NET finalizer. If a COM object is not released by a call to Dispose() or ReleaseReference(), it will not release the native object and memory attached to it." from here: http://sharpdx.org/wiki/usage/
Here's a full working example:
using SharpDX;
using _d2d = SharpDX.Direct2D1;
using _d3d = SharpDX.Direct3D;
using _d3d11 = SharpDX.Direct3D11;
using _dxgi = SharpDX.DXGI;
using _directWrite = SharpDX.DirectWrite;
using _wic = SharpDX.WIC;
using SharpDX.Direct2D1;
using SharpDX.Direct3D;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Windows;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows.Forms;
using SharpDX.IO;
using SharpDX.Mathematics.Interop;
namespace TestApp
{
public class SpriteBatchIssue
{
[STAThread]
static void Main(string[] args)
{
var app = new SpriteBatchIssue();
app.Run();
}
#region Variables
_d3d11.Device d3d11Device;
SwapChain swapChain;
_dxgi.Factory1 dxgiFactory;
_d2d.Factory d2dFactory;
_d2d.Factory4 d2dFactory4;
_dxgi.Device dxgiDevice;
_d2d.Device3 d2dDevice3;
_d2d.DeviceContext3 d2dDeviceContext3;
Bitmap1 sourceImage;
SpriteBatch spriteBatch;
Bitmap1 d2dTarget;
#endregion
~SpriteBatchIssue()
{
safeDispose(ref d3d11Device);
safeDispose(ref swapChain);
safeDispose(ref dxgiFactory);
safeDispose(ref d2dFactory);
safeDispose(ref d2dFactory4);
safeDispose(ref dxgiDevice);
safeDispose(ref d2dDevice3);
safeDispose(ref d2dDeviceContext3);
safeDispose(ref sourceImage);
safeDispose(ref spriteBatch);
safeDispose(ref d2dTarget);
}
public void Run()
{
#region setup resources
var mainForm = new RenderForm();
var scDescription = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription =
new ModeDescription(
0,
0,
new Rational(60, 1),
Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = mainForm.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
//DeviceCreationFlags.Debug flag below will show debug layer messages in your output window.
//Need proper version of windows sdk for it to work, otherwise it will throw an exception.
//You also need to right click your project->properties->debug (on the left panel)-> check "enable native code debugging"
// Create Device and SwapChain
_d3d11.Device.CreateWithSwapChain(
DriverType.Hardware,
DeviceCreationFlags.BgraSupport | DeviceCreationFlags.Debug,
new[] { _d3d.FeatureLevel.Level_12_1 },
scDescription,
out d3d11Device,
out swapChain);
// Ignore all windows events
dxgiFactory = swapChain.GetParent<_dxgi.Factory1>();
dxgiFactory.MakeWindowAssociation(mainForm.Handle, WindowAssociationFlags.IgnoreAll);
d2dFactory = new _d2d.Factory();
d2dFactory4 = d2dFactory.QueryInterface<_d2d.Factory4>();
dxgiDevice = d3d11Device.QueryInterface<_dxgi.Device>();
d2dDevice3 = new _d2d.Device3(d2dFactory4, dxgiDevice);
d2dDeviceContext3 = new _d2d.DeviceContext3(d2dDevice3, DeviceContextOptions.None);
#endregion
#region create drawing input
sourceImage = createD2DBitmap(#"yourFile.png", d2dDeviceContext3);
spriteBatch = new SpriteBatch(d2dDeviceContext3);
var destinationRects = new RawRectangleF[1];
destinationRects[0] = new RectangleF(100, 50, sourceImage.Size.Width, sourceImage.Size.Height);
var sourceRects = new RawRectangle[1];
sourceRects[0] = new RectangleF(0, 0, sourceImage.Size.Width, sourceImage.Size.Height);
#endregion
#region mainLoop
RenderLoop.Run(mainForm, () =>
{
if (d2dTarget != null)
{
d2dTarget.Dispose();
d2dTarget = null;
}
using (var backBuffer = Texture2D.FromSwapChain<Texture2D>(swapChain, 0))
{
using (var surface = backBuffer.QueryInterface<Surface>())
{
var bmpProperties = new BitmapProperties1(
new PixelFormat(Format.R8G8B8A8_UNorm, _d2d.AlphaMode.Premultiplied),
dpiX: 96,
dpiY: 96,
bitmapOptions: BitmapOptions.Target | BitmapOptions.CannotDraw);
d2dTarget = new Bitmap1(
d2dDeviceContext3,
surface,
bmpProperties);
d2dDeviceContext3.Target = d2dTarget;
}
}
//the key missing piece: cannot use per primitive antialiasing with spritebatch
d2dDeviceContext3.AntialiasMode = AntialiasMode.Aliased;
d2dDeviceContext3.BeginDraw();
spriteBatch.Clear();
spriteBatch.AddSprites(
1,
destinationRects,
sourceRects,
null,
null,
destinationRectanglesStride: 0, //0 stride because there is only 1 element
sourceRectanglesStride: 0,
colorsStride: 0,
transformsStride: 0);
d2dDeviceContext3.DrawSpriteBatch(
spriteBatch: spriteBatch,
startIndex: 0,
spriteCount: 1,
bitmap: sourceImage,
interpolationMode: BitmapInterpolationMode.Linear,
spriteOptions: SpriteOptions.ClampToSourceRectangle);
d2dDeviceContext3.EndDraw();
//first param set to 1 would indicate waitVerticalBlanking
swapChain.Present(0, PresentFlags.None);
});
#endregion
}
void safeDispose<T>(ref T disposable) where T : class, IDisposable
{
if (disposable != null)
{
disposable.Dispose();
disposable = null;
}
}
Bitmap1 createD2DBitmap(string filePath, _d2d.DeviceContext deviceContext)
{
var imagingFactory = new _wic.ImagingFactory();
var fileStream = new NativeFileStream(
filePath,
NativeFileMode.Open,
NativeFileAccess.Read);
var bitmapDecoder = new _wic.BitmapDecoder(imagingFactory, fileStream, _wic.DecodeOptions.CacheOnDemand);
var frame = bitmapDecoder.GetFrame(0);
var converter = new _wic.FormatConverter(imagingFactory);
converter.Initialize(frame, SharpDX.WIC.PixelFormat.Format32bppPRGBA);
var newBitmap = SharpDX.Direct2D1.Bitmap1.FromWicBitmap(deviceContext, converter);
return newBitmap;
}
}
}
I'm using Ghostscript.NET, a handy C# wrapper for Ghostscript functionality. I have a batch of PDFs being sent from the clientside to be converted to images on the ASP .NET WebAPI server and returned to the client.
public static IEnumerable<Image> PdfToImagesGhostscript(byte[] binaryPdfData, int dpi)
{
List<Image> pagesAsImages = new List<Image>();
GhostscriptVersionInfo gvi = new GhostscriptVersionInfo(AppDomain.CurrentDomain.BaseDirectory + #"\bin\gsdll32.dll");
using (var pdfDataStream = new MemoryStream(binaryPdfData))
using (var rasterizer = new Ghostscript.NET.Rasterizer.GhostscriptRasterizer())
{
rasterizer.Open(pdfDataStream, gvi, true);
for (int i = 1; i <= rasterizer.PageCount; i++)
{
Image pageAsImage = rasterizer.GetPage(dpi, dpi, i); // Out of Memory Exception on this line
pagesAsImages.Add(pageAsImage);
}
}
return pagesAsImages;
}
This generally works fine (I generally use 500 dpi, which I know is high, but even dropping to 300 I can reproduce this error). But if I give it many PDFs from the clientside (150 1-page PDFs, for example) it will often hit an Out of Memory Exception in Ghostscript.NET Rasterizer. How can I overcome this? Should this be threaded? If so how would that work? Would it help to use the 64 bit version of GhostScript? Thanks in advance.
I'm new to this myself, on here looking for techniques.
According to the example in the documentation here, they show this:
for (int page = 1; page <= _rasterizer.PageCount; page++)
{
var docName = String.Format("Page-{0}.pdf", page);
var pageFilePath = Path.Combine(outputPath, docName);
var pdf = _rasterizer.GetPage(desired_x_dpi, desired_y_dpi, pageNumber);
pdf.Save(pageFilePath);
pagesAsImages.Add(pdf);
}
It looks like you aren't saving your files.
I am still working at getting something similar to this to work on my end as well. Currently, I have 2 methods that I'm going to try, using the GhostscriptProcessor first:
private static void GhostscriptNetProcess(String fileName, String outputPath)
{
var version = Ghostscript.NET.GhostscriptVersionInfo.GetLastInstalledVersion();
var source = (fileName.IndexOf(' ') == -1) ? fileName : String.Format("\"{0}\"", fileName);
var gsArgs = new List<String>();
gsArgs.Add("-q");
gsArgs.Add("-dNOPAUSE");
gsArgs.Add("-dNOPROMPT");
gsArgs.Add("-sDEVICE=pdfwrite");
gsArgs.Add(String.Format(#"-sOutputFile={0}", outputPath));
gsArgs.Add(source);
var processor = new Ghostscript.NET.Processor.GhostscriptProcessor(version, false);
processor.Process(gsArgs.ToArray());
}
This version below is similar to yours, and what I started out using until I started finding other code examples:
private static void GhostscriptNetRaster(String fileName, String outputPath)
{
var version = Ghostscript.NET.GhostscriptVersionInfo.GetLastInstalledVersion();
using (var rasterizer = new Ghostscript.NET.Rasterizer.GhostscriptRasterizer())
{
rasterizer.Open(File.Open(fileName, FileMode.Open, FileAccess.Read), version, false);
for (int page = 0; page < rasterizer.PageCount; page++)
{
var img = rasterizer.GetPage(96, 96, page);
img.Save(outputPath);
}
}
}
Does that get you anywhere?
You don't have to rasterize all pages at the same GhostscriptRasterizer instance. Use disposable rasterizer on each page and collect results in List Image or List byte[] .
Example with results List of Jpeg encoded byte arrays.
List<byte[]> result = new List<byte[]>();
for (int i = 1; i <= pdfPagesCount; i++)
{
using (var pageRasterizer = new GhostscriptRasterizer())
{
pageRasterizer.Open(stream, gsVersion, true);
using (Image tempImage = pageRasterizer.GetPage(dpiX, dpiY, i))
{
var encoder = ImageCodecInfo.GetImageEncoders().First(c => c.FormatID == System.Drawing.Imaging.ImageFormat.Jpeg.Guid);
var encoderParams = new EncoderParameters() { Param = new[] { new EncoderParameter(System.Drawing.Imaging.Encoder.Quality, 95L) } };
using (MemoryStream memoryStream = new MemoryStream())
{
tempImage.Save(memoryStream, encoder, encoderParams);
result.Add(memoryStream.ToArray());
}
}
}
}
If you don't know number of pages in PDF you could call rasterizer one time, and get PageCount property.
i am beginning in develop winphone and nokia imaging sdk. i have two function.
firstly, i call the function below to change image to gray color
private async void PickImageCallback(object sender, PhotoResult e)
{
if (e.TaskResult != TaskResult.OK || e.ChosenPhoto == null)
{
return;
}
using (var source = new StreamImageSource(e.ChosenPhoto))
{
using (var filters = new FilterEffect(source))
{
var sampleFilter = new GrayscaleFilter();
filters.Filters = new IFilter[] { sampleFilter };
var target = new WriteableBitmap((int)CartoonImage.ActualWidth, (int)CartoonImage.ActualHeight);
var renderer = new WriteableBitmapRenderer(filters, target);
{
await renderer.RenderAsync();
_thumbnailImageBitmap = target;
CartoonImage.Source = target;
}
}
}
SaveButton.IsEnabled = true;
}
then i call function to change image to binary color
private async void Binary(WriteableBitmap bm_image)
{
var target = new WriteableBitmap((int)CartoonImage.ActualWidth, (int)CartoonImage.ActualHeight);
MemoryStream stream= new MemoryStream();
bm_image.SaveJpeg(stream, bm_image.PixelWidth, bm_image.PixelHeight, 0, 100);
using (var source = new StreamImageSource(stream))
{
using (var filters = new FilterEffect(source))
{
var sampleFilter = new StampFilter(5, 0.7);
filters.Filters = new IFilter[] { sampleFilter };
var renderer1 =new WriteableBitmapRenderer(filters, target);
{
await renderer1.RenderAsync();
CartoonImage.Source = target;
}
}
}
}
but when it run to " await renderer1.RenderAsync();" in the second function, it doesn't work. How can i solve it. And you can explain for me about how "await" and "async" work ?
thank you very much!
I'm mostly guessing here since I do not know what error you get, but I'm pretty sure your problem lies in setting up the source. Have you made sure the memory stream position is set to the beginning (0) before creating an StreamImageSource?
Try adding:
stream.Position = 0;
before creating the StreamImageSource.
Instead of trying to create a memory stream from the writeable bitmap I suggest doing:
using Nokia.InteropServices.WindowsRuntime;
...
using (var source = new BitmapImageSource(bm_image.AsBitmap())
{
...
}