I want to draw a simple Path which uses RenderedGeometry of a Polygon as Data.
Polygon polygon = new Polygon();
polygon.Points = new PointCollection { new Point(0, 0), new Point(0, 100), new Point(150, 150) };
var path = new Path
{
Data = polygon.RenderedGeometry,
Stroke = Brushes.LightBlue,
StrokeThickness = 2,
Fill = Brushes.Green,
Opacity = 0.5
};
Panel.SetZIndex(path, 2);
canvas.Children.Add(path);
However my Canvas does not display anything.
You should force the geometry to be rendered before you it to the Canvas. You can do this by calling the Arrange and Measure methods of the Polygon:
Polygon polygon = new Polygon();
polygon.Points = new PointCollection { new Point(0, 0), new Point(0, 100), new Point(150, 150) };
polygon.Arrange(new Rect(canvas.RenderSize));
polygon.Measure(canvas.RenderSize);
var path = new Path
{
Data = polygon.RenderedGeometry,
Stroke = Brushes.LightBlue,
StrokeThickness = 2,
Fill = Brushes.Green,
Opacity = 0.5
};
Panel.SetZIndex(path, 2);
canvas.Children.Add(path);
You shouldn't be using a Polygon element to define the Geometry of a Path.
Instead directly create a PathGeometry like this:
var figure = new PathFigure
{
StartPoint = new Point(0, 0),
IsClosed = true
};
figure.Segments.Add(new PolyLineSegment
{
Points = new PointCollection { new Point(0, 100), new Point(150, 150) },
IsStroked = true
});
var geometry = new PathGeometry();
geometry.Figures.Add(figure);
var path = new Path
{
Data = geometry,
Stroke = Brushes.LightBlue,
StrokeThickness = 2,
Fill = Brushes.Green,
Opacity = 0.5
};
Or directly create a Geometry from a string using Path Markup Syntax:
var path = new Path
{
Data = Geometry.Parse("M0,0 L0,100 150,150Z"),
Stroke = Brushes.LightBlue,
StrokeThickness = 2,
Fill = Brushes.Green,
Opacity = 0.5
};
been trying to change a renderer I wrote from SlimDX to SharpDX and ran into a problem. I want to render to multiple render targets (in this case color and object ID for picking)
This is the initialization of the rendertargets (all with same dimension and multisample setting)
//Swapchain, Device, Primary Rendertarget
var description = new SwapChainDescription()
{
BufferCount = 1,
Usage = Usage.RenderTargetOutput,
OutputHandle = Form.Handle,
IsWindowed = true,
ModeDescription = new ModeDescription(0, 0, new Rational(60, 1), Format.R8G8B8A8_UNorm),
SampleDescription = new SampleDescription(1, 0),
Flags = SwapChainFlags.AllowModeSwitch,
SwapEffect = SwapEffect.Discard
};
this.Device = new Device(adapter);
this.SwapChain = new SwapChain(factory, Device, description);
this.backBuffer = SharpDX.Direct3D11.Texture2D.FromSwapChain<SharpDX.Direct3D11.Texture2D>(SwapChain, 0);
this.RenderTargetView = new RenderTargetView(Device, backBuffer);
//Depthbuffer
Texture2DDescription descDepth = new Texture2DDescription();
descDepth.Width = (int)Viewport.Width;
descDepth.Height = (int)Viewport.Height;
descDepth.MipLevels = 1;
descDepth.ArraySize = 1;
descDepth.Format = Format.D32_Float;
descDepth.Usage = ResourceUsage.Default;
descDepth.SampleDescription = new SampleDescription(1, 0);
descDepth.BindFlags = BindFlags.DepthStencil;
descDepth.CpuAccessFlags = 0;
descDepth.OptionFlags = 0;
using (Texture2D depthStencil = new Texture2D(Device, descDepth))
{
depthView = new DepthStencilView(Device, depthStencil);
}
//Rendertargetview for the ID
Texture2DDescription IdMapDesc = new Texture2DDescription();
IdMapDesc.Width = (int)Viewport.Width;
IdMapDesc.Height = (int)Viewport.Height;
IdMapDesc.ArraySize = 1;
IdMapDesc.MipLevels = 1;
IdMapDesc.Format = Format.R16_UInt;
IdMapDesc.Usage = ResourceUsage.Default;
IdMapDesc.SampleDescription = new SampleDescription(1, 0);
IdMapDesc.BindFlags = BindFlags.ShaderResource | BindFlags.RenderTarget;
IdMapDesc.CpuAccessFlags = 0;
IdMapDesc.OptionFlags = 0;
using (Texture2D idMap = new Texture2D(Device, IdMapDesc))
{
idView = new RenderTargetView(Device, idMap);
}
This is how I do the Rendering
public override void Render()
{
Context.ClearDepthStencilView(depthView, DepthStencilClearFlags.Depth, 1f, 0);
Context.OutputMerger.SetTargets(depthView, RenderTargetView);
staticMeshRenderer.UpdateCameraConstants();
foreach(TerrainSegment segment in terrain.SegmentMap)
{
terrainRenderer.Draw(segment, null);
}
objectManager.DrawContent(Device);
}
producing this output (can't post images, it's a scene with working depthstencil)
However when using multiple rendertargets like this
Context.OutputMerger.SetTargets(depthView, RenderTargetView, idView);
the Depthstencil stops doing its job.
HLSL code used for both attempts:
struct PS_Output
{
float4 Color : SV_TARGET0;
uint ID : SV_TARGET1;
};
PS_Output PShader(VS_OutputStatic input)
{
PS_Output output;
output.ID = 3; //test
output.Color = Diffuse.Sample(StateLinear, input.TexCoords).rgba;
return output;
}
What am I doing wrong here?
Thanks in advance!
Debugging with RenderDoc showed that my Buffer Dimensions were NOT the same. Fixing that solved the problem.
I'm trying to make simple SlimDX example to test some performance vs GDI and unfortunately I got stuck on the very beginning. I've created simple Console Application in VS2010 and added this code to programs main method:
// 0. STEP
SlimDX.Direct3D10.Device device = new SlimDX.Direct3D10.Device(DriverType.Hardware, DeviceCreationFlags.BgraSupport);
SlimDX.Direct2D.Factory factory = new SlimDX.Direct2D.Factory();
// 1. STEP
Texture2DDescription textureDesc = new Texture2DDescription();
textureDesc.Width = 512;
textureDesc.Height = 512;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm;
textureDesc.SampleDescription = new SampleDescription(1, 0);
textureDesc.Usage = ResourceUsage.Default;
textureDesc.BindFlags = BindFlags.RenderTarget;
textureDesc.CpuAccessFlags = CpuAccessFlags.None;
textureDesc.OptionFlags = ResourceOptionFlags.None;
Texture2D maskTexture = new Texture2D(device, textureDesc);
// 2. STEP
SlimDX.DXGI.Surface surface = maskTexture.AsSurface();
// 3. STEPpro
RenderTargetProperties props = new RenderTargetProperties
{
HorizontalDpi = 96,
VerticalDpi = 96,
MinimumFeatureLevel = SlimDX.Direct2D.FeatureLevel.Default,
PixelFormat = new PixelFormat(SlimDX.DXGI.Format.Unknown, AlphaMode.Premultiplied),
Type = RenderTargetType.Default,
Usage = RenderTargetUsage.None
};
RenderTarget target = RenderTarget.FromDXGI(factory, surface, props);
This crashes on the RenderTarget.FromDXGI call with this message:
Additional information: E_INVALIDARG: An invalid parameter was passed to the returning function (-2147024809)
Unfortunately, I could not enable DirectX debug output as stated in this SO question:
DirectX 10 debug output not working
... so, this is all I've got.
BUT, I've tried this both at home (win8.1, vs2013) and at work (win7, vs2010sp1) and at home it works when I debug the app using Graphics Diagnostics from 2013. It doesn't work when I start regular debug or try to start exe manualy. At work it does not work at all.
Any ideas just from code? I'm kinda desperate here. :(
I just noticed your comment on my other answer. Here is the creation function I use:
public static Texture2D CreateRenderTexture(this RenderHelper helper, int width, int height)
{
Texture2DDescription description = new Texture2DDescription
{
ArraySize = 1,
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
CpuAccessFlags = CpuAccessFlags.None,
Format = SlimDX.DXGI.Format.B8G8R8A8_UNorm,
Width = width,
Height = height,
MipLevels = 1,
OptionFlags = ResourceOptionFlags.None,
SampleDescription = new SlimDX.DXGI.SampleDescription(1, 0),
Usage = ResourceUsage.Default,
};
return new Texture2D(helper.Device, description);
}
Good day,
I am trying to display a real-time stereo video using nvidia 3DVision and two IP cameras. I am totally new to DirectX, but have tried to work through some tutorials and other questions on this and other sites. For now, I am displaying two static bitmaps for left and right eyes. These will be replaced by bitmaps from my cameras once I have got this part of my program working.
This question NV_STEREO_IMAGE_SIGNATURE and DirectX 10/11 (nVidia 3D Vision) has helped me quite a bit, but I am still struggling to get my program working as it should. What I am finding is that my shutter glasses start working as they should, but only the image for the right eye gets displayed, while the left eye remains blank (except for the mouse cursor).
Here is my code for generating the stereo images:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Windows.Forms;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using SlimDX;
using SlimDX.Direct3D11;
using SlimDX.Windows;
using SlimDX.DXGI;
using Device = SlimDX.Direct3D11.Device; // Make sure we use DX11
using Resource = SlimDX.Direct3D11.Resource;
namespace SlimDxTest2
{
static class Program
{
private static Device device; // DirectX11 Device
private static int Count; // Just to make sure things are being updated
// The NVSTEREO header.
static byte[] stereo_data = new byte[] {0x4e, 0x56, 0x33, 0x44, //NVSTEREO_IMAGE_SIGNATURE = 0x4433564e;
0x00, 0x0F, 0x00, 0x00, //Screen width * 2 = 1920*2 = 3840 = 0x00000F00;
0x38, 0x04, 0x00, 0x00, //Screen height = 1080 = 0x00000438;
0x20, 0x00, 0x00, 0x00, //dwBPP = 32 = 0x00000020;
0x02, 0x00, 0x00, 0x00}; //dwFlags = SIH_SCALE_TO_FIT = 0x00000002
[STAThread]
static void Main()
{
Bitmap left_im = new Bitmap("Blue.png"); // Read in Bitmaps
Bitmap right_im = new Bitmap("Red.png");
// Device creation
var form = new RenderForm("Stereo test") { ClientSize = new Size(1920, 1080) };
var desc = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription = new ModeDescription(1920, 1080, new Rational(120, 1), Format.R8G8B8A8_UNorm),
IsWindowed = false, //true,
OutputHandle = form.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
SwapChain swapChain;
Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.Debug, desc, out device, out swapChain);
RenderTargetView renderTarget; // create a view of our render target, which is the backbuffer of the swap chain we just created
using (var resource = Resource.FromSwapChain<Texture2D>(swapChain, 0))
renderTarget = new RenderTargetView(device, resource);
var context = device.ImmediateContext; // set up a viewport
var viewport = new Viewport(0.0f, 0.0f, form.ClientSize.Width, form.ClientSize.Height);
context.OutputMerger.SetTargets(renderTarget);
context.Rasterizer.SetViewports(viewport);
// prevent DXGI handling of alt+enter, which doesn't work properly with Winforms
using (var factory = swapChain.GetParent<Factory>())
factory.SetWindowAssociation(form.Handle, WindowAssociationFlags.IgnoreAll);
form.KeyDown += (o, e) => // handle alt+enter ourselves
{
if (e.Alt && e.KeyCode == Keys.Enter)
swapChain.IsFullScreen = !swapChain.IsFullScreen;
};
form.KeyDown += (o, e) => // Alt + X -> Exit Program
{
if (e.Alt && e.KeyCode == Keys.X)
{
form.Close();
}
};
context.ClearRenderTargetView(renderTarget, Color.Green); // Fill Screen with specified colour
Texture2DDescription stereoDesc = new Texture2DDescription()
{
ArraySize = 1,
Width = 3840,
Height = 1081,
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm,
OptionFlags = ResourceOptionFlags.None,
Usage = ResourceUsage.Staging,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0)
};
// Main Loop
MessagePump.Run(form, () =>
{
Texture2D texture_stereo = Make3D(left_im, right_im); // Create Texture from two bitmaps in memory
ResourceRegion stereoSrcBox = new ResourceRegion { Front = 0, Back = 1, Top = 0, Bottom = 1080, Left = 0, Right = 1920 };
context.CopySubresourceRegion(texture_stereo, 0, stereoSrcBox, renderTarget.Resource, 0, 0, 0, 0);
texture_stereo.Dispose();
swapChain.Present(0, PresentFlags.None);
});
// Dispose resources
swapChain.IsFullScreen = false; // Required before swapchain dispose
device.Dispose();
swapChain.Dispose();
renderTarget.Dispose();
}
static Texture2D Make3D(Bitmap leftBmp, Bitmap rightBmp)
{
var context = device.ImmediateContext;
Bitmap left2 = leftBmp.Clone(new RectangleF(0, 0, leftBmp.Width, leftBmp.Height), PixelFormat.Format32bppArgb); // Change bmp to 32bit ARGB
Bitmap right2 = rightBmp.Clone(new RectangleF(0, 0, rightBmp.Width, rightBmp.Height), PixelFormat.Format32bppArgb);
// Show FrameCount on screen: (To test)
Graphics left_graph = Graphics.FromImage(left2);
left_graph.DrawString("Frame: " + Count.ToString(), new System.Drawing.Font("Arial", 16), Brushes.Black, new PointF(100, 100));
left_graph.Dispose();
Graphics right_graph = Graphics.FromImage(right2);
right_graph.DrawString("Frame: " + Count.ToString(), new System.Drawing.Font("Arial", 16), Brushes.Black, new PointF(200, 200));
right_graph.Dispose();
Count++;
Texture2DDescription desc2d = new Texture2DDescription()
{
ArraySize = 1,
Width = 1920,
Height = 1080,
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm,
OptionFlags = ResourceOptionFlags.None,
Usage = ResourceUsage.Staging,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0)
};
Texture2D leftText2 = new Texture2D(device, desc2d); // Texture2D for each bmp
Texture2D rightText2 = new Texture2D(device, desc2d);
Rectangle rect = new Rectangle(0, 0, left2.Width, left2.Height);
BitmapData leftData = left2.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
IntPtr left_ptr = leftData.Scan0;
int left_num_bytes = Math.Abs(leftData.Stride) * leftData.Height;
byte[] left_bytes = new byte[left_num_bytes];
byte[] left_bytes2 = new byte[left_num_bytes];
System.Runtime.InteropServices.Marshal.Copy(left_ptr, left_bytes, 0, left_num_bytes); // Get Byte array from bitmap
left2.UnlockBits(leftData);
DataBox box1 = context.MapSubresource(leftText2, 0, MapMode.Write, SlimDX.Direct3D11.MapFlags.None);
box1.Data.Write(left_bytes, 0, left_bytes.Length);
context.UnmapSubresource(leftText2, 0);
BitmapData rightData = right2.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
IntPtr right_ptr = rightData.Scan0;
int right_num_bytes = Math.Abs(rightData.Stride) * rightData.Height;
byte[] right_bytes = new byte[right_num_bytes];
System.Runtime.InteropServices.Marshal.Copy(right_ptr, right_bytes, 0, right_num_bytes); // Get Byte array from bitmap
right2.UnlockBits(rightData);
DataBox box2 = context.MapSubresource(rightText2, 0, MapMode.Write, SlimDX.Direct3D11.MapFlags.None);
box2.Data.Write(right_bytes, 0, right_bytes.Length);
context.UnmapSubresource(rightText2, 0);
Texture2DDescription stereoDesc = new Texture2DDescription()
{
ArraySize = 1,
Width = 3840,
Height = 1081,
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm,
OptionFlags = ResourceOptionFlags.None,
Usage = ResourceUsage.Staging,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0)
};
Texture2D stereoTexture = new Texture2D(device, stereoDesc); // Texture2D to contain stereo images and Nvidia 3DVision Signature
// Identify the source texture region to copy (all of it)
ResourceRegion stereoSrcBox = new ResourceRegion { Front = 0, Back = 1, Top = 0, Bottom = 1080, Left = 0, Right = 1920 };
// Copy it to the stereo texture
context.CopySubresourceRegion(leftText2, 0, stereoSrcBox, stereoTexture, 0, 0, 0, 0);
context.CopySubresourceRegion(rightText2, 0, stereoSrcBox, stereoTexture, 0, 1920, 0, 0); // Offset by 1920 pixels
// Open the staging texture for reading and go to last row
DataBox box = context.MapSubresource(stereoTexture, 0, MapMode.Write, SlimDX.Direct3D11.MapFlags.None);
box.Data.Seek(stereoTexture.Description.Width * (stereoTexture.Description.Height - 1) * 4, System.IO.SeekOrigin.Begin);
box.Data.Write(stereo_data, 0, stereo_data.Length); // Write the NVSTEREO header
context.UnmapSubresource(stereoTexture, 0);
left2.Dispose();
leftText2.Dispose();
right2.Dispose();
rightText2.Dispose();
return stereoTexture;
}
}
}
I have tried various methods of copying the Texture2D of the stereo image including signature (3840x1081) to the backbuffer, but none of the methods I have tried display both images...
Any help or comments will be much appreciated,
Ryan
If using DirectX11.1 is an option, there is a much easier way to enable stereoscopic features, without having to rely on nVidia's byte wizardry. Basically, you create a SwapChan1 instead of a regular SwapChain, then it is as simple as setting Stereo to True.
Have a look at this post I made, it shows you how to create a Stereo swapChain. The code is a porting to C# of MS's own stereo sample. Then you'll have two render targets and it is much more simple. Before rendering you have to:
void RenderEye(bool rightEye, ITarget target)
{
RenderTargetView currentTarget = rightEye ? target.RenderTargetViewRight : target.RenderTargetView;
context.OutputMerger.SetTargets(target.DepthStencilView, currentTarget);
[clean color/depth]
[render scene]
[repeat for each eye]
}
where ITarget is an interface for a class providing access to the backbuffer, rendertargets, etc.
That's it, DirectX will take care of everything. Hope this helps.
Try creating the backbufer with width = 1920 and not 3840.
stretch each image to half the size in width and put them side by side.
I remember seeing this exact same question while searching a couple of days ago on the Nvidia Developer forums. Unfortunately the forums are down due to a recent hacker attack. I remember that the OP on that thread was able to get it working with DX11 and Slimdx using the signature hack. You do not use the stretchRectangle method its was something like createResuroseRegion() or but not that exactly I can't remember. It might be these methods CopyResource() or CopySubresourceRegion() found in this similar thread on stack over flow.
Copy Texture to Texture
Also are you rendering the image continuously or at least a few times? I was doing the same thing in DX9 and had to tell DX to render 3 frames before the driver recognized it as 3D vision. Did your glasses kick on? Is your backbuffer = (width*2), (Height+1) and are you writing the backbuffer like so:
_________________________
| | |
| img1 | img2 |
| | |
--------------------------
|_______signature________| where this last row = 1 pix tall
I am getting an issue with SlimDX March SDK (For DXSDK11 June 2010 I believe). The problem is that whenever I turn the attach the depth view to the output merger state I don't get any output on the screen. I have compared my code with DX11 samples and it does seem to be correct. I have tried all sorts of flags and formats for the depth test (including always passing etc.) but nothing seems to work. I'd appreciate if anyone can spot a mistake. Here is the code. Here are the steps:
Initialize the back buffer:
D3DDevice device;
SwapChain swapChain;
/// Create the swap chain
SwapChainDescription desc = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription = new ModeDescription
{
Width = ContextSettings.Width,
Height = ContextSettings.Height,
RefreshRate = new SlimDX.Rational(ContextSettings.RefreshRate, 1),
Format = ContextSettings.BufferFormat,
},
IsWindowed = !ContextSettings.FullScreen,
OutputHandle = WindowHandle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput,
};
FeatureLevel[] featureLevels = new FeatureLevel[] { FeatureLevel.Level_11_0, FeatureLevel.Level_10_1 };
DriverType driverType = DriverType.Hardware;
D3DDevice.CreateWithSwapChain(driverType, DeviceCreationFlags.Debug, featureLevels, desc, out device, out swapChain);
Device = device;
SwapChain = swapChain;
/// Setup window association
Factory factory = swapChain.GetParent<Factory>();
factory.SetWindowAssociation(WindowHandle, WindowAssociationFlags.IgnoreAll);
/// Setup back buffers and render target views
RenderBuffer = DXTexture2D.FromSwapChain<DXTexture2D>(swapChain, 0);
RenderView = new RenderTargetView(Device, RenderBuffer);
Then initialize the depth buffer:
Format depthFormat = Format.D32_Float;
Texture2DDescription depthBufferDesc = new Texture2DDescription
{
ArraySize = 1,
BindFlags = BindFlags.DepthStencil,
CpuAccessFlags = CpuAccessFlags.None,
Format = depthFormat,
Height = width,
Width = height,
MipLevels = 1,
OptionFlags = ResourceOptionFlags.None,
SampleDescription = new SampleDescription( 1, 0 ),
Usage = ResourceUsage.Default
};
DepthBuffer = new DXTexture2D(Device, depthBufferDesc);
DepthStencilViewDescription dsViewDesc = new DepthStencilViewDescription
{
ArraySize = 0,
Format = depthFormat,
Dimension = DepthStencilViewDimension.Texture2D,
MipSlice = 0,
Flags = 0,
FirstArraySlice = 0
};
DepthView = new DepthStencilView(Device, DepthBuffer, dsViewDesc);
DepthStencilStateDescription dsStateDesc = new DepthStencilStateDescription()
{
IsDepthEnabled = true,
IsStencilEnabled = false,
DepthWriteMask = DepthWriteMask.All,
DepthComparison = Comparison.Less,
};
DepthState = DepthStencilState.FromDescription(Device, dsStateDesc);
Setup the render targets:
DeviceContext.OutputMerger.DepthStencilState = DepthState;
DeviceContext.OutputMerger.SetTargets(DepthView, RenderView);
DeviceContext.Rasterizer.SetViewports(new Viewport(0, 0, ContextSettings.Width, ContextSettings.Height, 0.0f, 1.0f));
Clear();
As soon as I remove DepthView from OutputMerger.SetTargets I start seeing images on the screen (without the depth test of course) and vice-versa otherwise.
It turned out that in the depthBufferDesc, I was passing width to the Height variable and height to the Width. This was creating the two render targets with different dimensions which was breaking it down.