Nvidia 3d Video using DirectX11 and SlimDX in C# - c#

Good day,
I am trying to display a real-time stereo video using nvidia 3DVision and two IP cameras. I am totally new to DirectX, but have tried to work through some tutorials and other questions on this and other sites. For now, I am displaying two static bitmaps for left and right eyes. These will be replaced by bitmaps from my cameras once I have got this part of my program working.
This question NV_STEREO_IMAGE_SIGNATURE and DirectX 10/11 (nVidia 3D Vision) has helped me quite a bit, but I am still struggling to get my program working as it should. What I am finding is that my shutter glasses start working as they should, but only the image for the right eye gets displayed, while the left eye remains blank (except for the mouse cursor).
Here is my code for generating the stereo images:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Windows.Forms;
using System.Drawing;
using System.Drawing.Imaging;
using System.IO;
using SlimDX;
using SlimDX.Direct3D11;
using SlimDX.Windows;
using SlimDX.DXGI;
using Device = SlimDX.Direct3D11.Device; // Make sure we use DX11
using Resource = SlimDX.Direct3D11.Resource;
namespace SlimDxTest2
{
static class Program
{
private static Device device; // DirectX11 Device
private static int Count; // Just to make sure things are being updated
// The NVSTEREO header.
static byte[] stereo_data = new byte[] {0x4e, 0x56, 0x33, 0x44, //NVSTEREO_IMAGE_SIGNATURE = 0x4433564e;
0x00, 0x0F, 0x00, 0x00, //Screen width * 2 = 1920*2 = 3840 = 0x00000F00;
0x38, 0x04, 0x00, 0x00, //Screen height = 1080 = 0x00000438;
0x20, 0x00, 0x00, 0x00, //dwBPP = 32 = 0x00000020;
0x02, 0x00, 0x00, 0x00}; //dwFlags = SIH_SCALE_TO_FIT = 0x00000002
[STAThread]
static void Main()
{
Bitmap left_im = new Bitmap("Blue.png"); // Read in Bitmaps
Bitmap right_im = new Bitmap("Red.png");
// Device creation
var form = new RenderForm("Stereo test") { ClientSize = new Size(1920, 1080) };
var desc = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription = new ModeDescription(1920, 1080, new Rational(120, 1), Format.R8G8B8A8_UNorm),
IsWindowed = false, //true,
OutputHandle = form.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
SwapChain swapChain;
Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.Debug, desc, out device, out swapChain);
RenderTargetView renderTarget; // create a view of our render target, which is the backbuffer of the swap chain we just created
using (var resource = Resource.FromSwapChain<Texture2D>(swapChain, 0))
renderTarget = new RenderTargetView(device, resource);
var context = device.ImmediateContext; // set up a viewport
var viewport = new Viewport(0.0f, 0.0f, form.ClientSize.Width, form.ClientSize.Height);
context.OutputMerger.SetTargets(renderTarget);
context.Rasterizer.SetViewports(viewport);
// prevent DXGI handling of alt+enter, which doesn't work properly with Winforms
using (var factory = swapChain.GetParent<Factory>())
factory.SetWindowAssociation(form.Handle, WindowAssociationFlags.IgnoreAll);
form.KeyDown += (o, e) => // handle alt+enter ourselves
{
if (e.Alt && e.KeyCode == Keys.Enter)
swapChain.IsFullScreen = !swapChain.IsFullScreen;
};
form.KeyDown += (o, e) => // Alt + X -> Exit Program
{
if (e.Alt && e.KeyCode == Keys.X)
{
form.Close();
}
};
context.ClearRenderTargetView(renderTarget, Color.Green); // Fill Screen with specified colour
Texture2DDescription stereoDesc = new Texture2DDescription()
{
ArraySize = 1,
Width = 3840,
Height = 1081,
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm,
OptionFlags = ResourceOptionFlags.None,
Usage = ResourceUsage.Staging,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0)
};
// Main Loop
MessagePump.Run(form, () =>
{
Texture2D texture_stereo = Make3D(left_im, right_im); // Create Texture from two bitmaps in memory
ResourceRegion stereoSrcBox = new ResourceRegion { Front = 0, Back = 1, Top = 0, Bottom = 1080, Left = 0, Right = 1920 };
context.CopySubresourceRegion(texture_stereo, 0, stereoSrcBox, renderTarget.Resource, 0, 0, 0, 0);
texture_stereo.Dispose();
swapChain.Present(0, PresentFlags.None);
});
// Dispose resources
swapChain.IsFullScreen = false; // Required before swapchain dispose
device.Dispose();
swapChain.Dispose();
renderTarget.Dispose();
}
static Texture2D Make3D(Bitmap leftBmp, Bitmap rightBmp)
{
var context = device.ImmediateContext;
Bitmap left2 = leftBmp.Clone(new RectangleF(0, 0, leftBmp.Width, leftBmp.Height), PixelFormat.Format32bppArgb); // Change bmp to 32bit ARGB
Bitmap right2 = rightBmp.Clone(new RectangleF(0, 0, rightBmp.Width, rightBmp.Height), PixelFormat.Format32bppArgb);
// Show FrameCount on screen: (To test)
Graphics left_graph = Graphics.FromImage(left2);
left_graph.DrawString("Frame: " + Count.ToString(), new System.Drawing.Font("Arial", 16), Brushes.Black, new PointF(100, 100));
left_graph.Dispose();
Graphics right_graph = Graphics.FromImage(right2);
right_graph.DrawString("Frame: " + Count.ToString(), new System.Drawing.Font("Arial", 16), Brushes.Black, new PointF(200, 200));
right_graph.Dispose();
Count++;
Texture2DDescription desc2d = new Texture2DDescription()
{
ArraySize = 1,
Width = 1920,
Height = 1080,
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm,
OptionFlags = ResourceOptionFlags.None,
Usage = ResourceUsage.Staging,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0)
};
Texture2D leftText2 = new Texture2D(device, desc2d); // Texture2D for each bmp
Texture2D rightText2 = new Texture2D(device, desc2d);
Rectangle rect = new Rectangle(0, 0, left2.Width, left2.Height);
BitmapData leftData = left2.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
IntPtr left_ptr = leftData.Scan0;
int left_num_bytes = Math.Abs(leftData.Stride) * leftData.Height;
byte[] left_bytes = new byte[left_num_bytes];
byte[] left_bytes2 = new byte[left_num_bytes];
System.Runtime.InteropServices.Marshal.Copy(left_ptr, left_bytes, 0, left_num_bytes); // Get Byte array from bitmap
left2.UnlockBits(leftData);
DataBox box1 = context.MapSubresource(leftText2, 0, MapMode.Write, SlimDX.Direct3D11.MapFlags.None);
box1.Data.Write(left_bytes, 0, left_bytes.Length);
context.UnmapSubresource(leftText2, 0);
BitmapData rightData = right2.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
IntPtr right_ptr = rightData.Scan0;
int right_num_bytes = Math.Abs(rightData.Stride) * rightData.Height;
byte[] right_bytes = new byte[right_num_bytes];
System.Runtime.InteropServices.Marshal.Copy(right_ptr, right_bytes, 0, right_num_bytes); // Get Byte array from bitmap
right2.UnlockBits(rightData);
DataBox box2 = context.MapSubresource(rightText2, 0, MapMode.Write, SlimDX.Direct3D11.MapFlags.None);
box2.Data.Write(right_bytes, 0, right_bytes.Length);
context.UnmapSubresource(rightText2, 0);
Texture2DDescription stereoDesc = new Texture2DDescription()
{
ArraySize = 1,
Width = 3840,
Height = 1081,
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
Format = SlimDX.DXGI.Format.R8G8B8A8_UNorm,
OptionFlags = ResourceOptionFlags.None,
Usage = ResourceUsage.Staging,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0)
};
Texture2D stereoTexture = new Texture2D(device, stereoDesc); // Texture2D to contain stereo images and Nvidia 3DVision Signature
// Identify the source texture region to copy (all of it)
ResourceRegion stereoSrcBox = new ResourceRegion { Front = 0, Back = 1, Top = 0, Bottom = 1080, Left = 0, Right = 1920 };
// Copy it to the stereo texture
context.CopySubresourceRegion(leftText2, 0, stereoSrcBox, stereoTexture, 0, 0, 0, 0);
context.CopySubresourceRegion(rightText2, 0, stereoSrcBox, stereoTexture, 0, 1920, 0, 0); // Offset by 1920 pixels
// Open the staging texture for reading and go to last row
DataBox box = context.MapSubresource(stereoTexture, 0, MapMode.Write, SlimDX.Direct3D11.MapFlags.None);
box.Data.Seek(stereoTexture.Description.Width * (stereoTexture.Description.Height - 1) * 4, System.IO.SeekOrigin.Begin);
box.Data.Write(stereo_data, 0, stereo_data.Length); // Write the NVSTEREO header
context.UnmapSubresource(stereoTexture, 0);
left2.Dispose();
leftText2.Dispose();
right2.Dispose();
rightText2.Dispose();
return stereoTexture;
}
}
}
I have tried various methods of copying the Texture2D of the stereo image including signature (3840x1081) to the backbuffer, but none of the methods I have tried display both images...
Any help or comments will be much appreciated,
Ryan

If using DirectX11.1 is an option, there is a much easier way to enable stereoscopic features, without having to rely on nVidia's byte wizardry. Basically, you create a SwapChan1 instead of a regular SwapChain, then it is as simple as setting Stereo to True.
Have a look at this post I made, it shows you how to create a Stereo swapChain. The code is a porting to C# of MS's own stereo sample. Then you'll have two render targets and it is much more simple. Before rendering you have to:
void RenderEye(bool rightEye, ITarget target)
{
RenderTargetView currentTarget = rightEye ? target.RenderTargetViewRight : target.RenderTargetView;
context.OutputMerger.SetTargets(target.DepthStencilView, currentTarget);
[clean color/depth]
[render scene]
[repeat for each eye]
}
where ITarget is an interface for a class providing access to the backbuffer, rendertargets, etc.
That's it, DirectX will take care of everything. Hope this helps.

Try creating the backbufer with width = 1920 and not 3840.
stretch each image to half the size in width and put them side by side.

I remember seeing this exact same question while searching a couple of days ago on the Nvidia Developer forums. Unfortunately the forums are down due to a recent hacker attack. I remember that the OP on that thread was able to get it working with DX11 and Slimdx using the signature hack. You do not use the stretchRectangle method its was something like createResuroseRegion() or but not that exactly I can't remember. It might be these methods CopyResource() or CopySubresourceRegion() found in this similar thread on stack over flow.
Copy Texture to Texture

Also are you rendering the image continuously or at least a few times? I was doing the same thing in DX9 and had to tell DX to render 3 frames before the driver recognized it as 3D vision. Did your glasses kick on? Is your backbuffer = (width*2), (Height+1) and are you writing the backbuffer like so:
_________________________
| | |
| img1 | img2 |
| | |
--------------------------
|_______signature________| where this last row = 1 pix tall

Related

use vtkUnsignedCharArray import byte[]

I don't understand why a column of pixels on the right is moved to the left, and there is also a problem with the imported colors
This is My code
private void RendervtkImageImportBmp2()
{
var myBitMap = new Bitmap(bmpPath);
byte[] bmpBytes = BmpHelper.BitmapToByte(myBitMap);
GCHandle hObject = GCHandle.Alloc(bmpBytes, GCHandleType.Pinned);
var bmpInptr = hObject.AddrOfPinnedObject();
vtkImageImport imageImport = new vtkImageImport();
imageImport.SetDataSpacing(1, 1, 0);
imageImport.SetDataOrigin(0, 0, 0);
imageImport.SetWholeExtent(0, MyBitMap.Width - 1, 0, MyBitMap.Height - 1, 0, 0);
imageImport.SetDataExtentToWholeExtent();
imageImport.SetDataScalarTypeToUnsignedChar();
imageImport.SetNumberOfScalarComponents(3);
imageImport.SetImportVoidPointer(bmpInptr);
imageImport.Update();
var actor = new vtkImageActor();
actor.SetInput(imageImport.GetOutput());
var ren = new vtkRenderer();
ren.AddActor(actor);
var renWin = new vtkRenderWindow();
renWin.SetParentId(this.Handle);
renWin.SetSize((this.Width - (this.Width / 2)), this.Height);
renWin.SetPosition(0, 0);
renWin.AddRenderer(ren);
renWin.Render();
}
pictures show the result
this is afterimport picture
this is original image
I'm just getting started with VTK, please help me to see where I made a mistake, thank you

Replacing colour of an Image

I am trying to replace the black colours of a picture with white, and vice versa. This is actually so my OCR code can read it on white backgrounds better. It's currently getting the image from clipboard
Image img = Clipboard.GetImage();
pictureBox1.SizeMode = PictureBoxSizeMode.StretchImage;
pictureBox1.Image = img;
I've seen some other questions where they're working with an actual bitmap, but how do I approach it direct from Clipboard?
Another solution using the ColorMatrix class.
You can use the Graphics.DrawImage overload that accepts an ImageAttributes argument.
ImageAttributes.SetColorMatrix() sets the color-adjustment matrix, optionally targeting a specific category (Bitmap, Pen, Brush etc.) and can be instructed to skip the Gray Colors, modify the Gray colors only or all Colors.
The ImageAttributes.SetThreshold() method allows to regulate the Colors cutoff point (threshold) to fine tune the Brightness.
It accepts values from 0 to 1.
When set to 0, an image is all white, all black when set to 1 (see the Docs about it).
Also consider that the "Inversion" depends on the original bitmap color pattern, so try different approaches. Sometimes, inverting the brightness can give you a better result, sometime it doesn't.
You OCR must be "trained", to verify what values suits it better.
Take a look at these articles:
Recoloring (MSDN)
ASCII Art Generator (CodeProject)
Brightness Matrix:
R=Red G=Green B=Blue A=Alpha Channel W=White (Brightness)
Modify the Brightness component to obtain an "Inversion"
R G B A W
R [1 0 0 0 0]
G [0 1 0 0 0]
B [0 0 1 0 0]
A [0 0 0 1 0]
W [b b b 0 1] <= Brightness
using System.Drawing;
using System.Drawing.Imaging;
// ...
Image colorImage = Clipboard.GetImage();
// Default values, no inversion, no threshold adjustment
var bmpBlackWhite = BitmapToBlackAndWhite(colorImage);
// Inverted, use threshold adjustment set to .75f
var bmpBlackWhite = BitmapToBlackAndWhite(colorImage, true, true, .75f);
// ...
private Bitmap BitmapToBlackAndWhite(Image image, bool invert = false, bool useThreshold = false, float threshold = .5f)
{
var mxBlackWhiteInverted = new float[][]
{
new float[] { -1, -1, -1, 0, 0},
new float[] { -1, -1, -1, 0, 0},
new float[] { -1, -1, -1, 0, 0},
new float[] { 0, 0, 0, 1, 0},
new float[] { 1, 1, 1, 0, 1}
};
var mxBlackWhite = new float[][]
{
new float[] { 1, 1, 1, 0, 0},
new float[] { 1, 1, 1, 0, 0},
new float[] { 1, 1, 1, 0, 0},
new float[] { 0, 0, 0, 1, 0},
new float[] {-1, -1, -1, 0, 1}
};
var bitmap = new Bitmap(image.Width, image.Height);
using (var g = Graphics.FromImage(bitmap))
using (var attributes = new ImageAttributes()) {
attributes.SetColorMatrix(new ColorMatrix(invert ? mxBlackWhiteInverted : mxBlackWhite));
// Adjust the threshold as needed
if (useThreshold) attributes.SetThreshold(threshold);
var rect = new Rectangle(Point.Empty, image.Size);
g.DrawImage(image, rect, 0, 0, image.Width, image.Height, GraphicsUnit.Pixel, attributes);
return bitmap;
}
}
You can use the ColorMap and ImageAttributes classes from the System.Drawimg.Imaging namespace to directly replace the pixels in your image:
Image img = Clipboard.GetImage();
if (img != null) {
ColorMap[] cm = new ColorMap[1];
cm[0] = new ColorMap();
cm[0].OldColor = Color.Black;
cm[0].NewColor = Color.White;
ImageAttributes ia = new ImageAttributes();
ia.SetRemapTable(cm);
using (Graphics g = Graphics.FromImage(img)) {
g.DrawImage(img, new Rectangle(Point.Empty, img.Size), 0, 0, img.Width, img.Height,
GraphicsUnit.Pixel, ia);
}
pictureBox1.SizeMode = PictureBoxSizeMode.StretchImage;
pictureBox1.Image = img;
}
if your image is B&W, it is very easy in OpenCV:
// bmp is your bitmap
var inverted = (255 - bitmap.ToMat()).ToMat().
.ToBitamp() // or
.ToWriteableBitmap() // or
.ToBitmapSource()
OpenCV can be a little overkill if this in your only manipulation in the whole app

Convert grayscale partially transparent image to a single color in c#

I am trying to create a function that takes a gray scale image and a color and colors the gray scale image using that color shade but keeps the shading levels of the gray scale image. The function also should not color the transparent parts of the image. I have multiple layers (multiple png's) I will be combining later and only need to color certain layers. I have looked around and found similar things but not quite what I need. I know how to do it in HTML5 on front end for the user using Canvas but I need a way to achieve same thing on the backend using I am guessing either a manual method using unlocked bitmap memory calls or a ColorMatrix class. Can anyone help me, graphics aren't my strongest area but I am slowly learning. See the function below for what I need in C# that I did in javascript. Doing the hidden canvas stuff isn't as important because I am doing this server side for saving to PNG file...
function drawImage(imageObj, color) {
var hidden_canvas = document.createElement("canvas");
hidden_canvas.width = imageObj.width;
hidden_canvas.height = imageObj.height;
var hidden_context = hidden_canvas.getContext("2d");
// draw the image on the hidden canvas
hidden_context.drawImage(imageObj, 0, 0);
if (color !== undefined) {
var imageData = hidden_context.getImageData(0, 0, imageObj.width, imageObj.height);
var data = imageData.data;
for (var i = 0; i < data.length; i += 4) {
var brightness = 0.34 * data[i] + 0.5 * data[i + 1] + 0.16 * data[i + 2];
//red
data[i] = brightness + color.R;
//green
data[i + 1] = brightness + color.G;
//blue
data[i + 2] = brightness + color.B;
}
//overwrite original image
hidden_context.putImageData(imageData, 0, 0);
}
var canvas = document.getElementById('card');
var context = canvas.getContext('2d');
context.drawImage(hidden_canvas, 0, 0);
};
This should do the job:
public static Bitmap MakeChromaChange(Bitmap bmp0, Color tCol, float gamma)
{
Bitmap bmp1 = new Bitmap(bmp0.Width, bmp0.Height);
using (Graphics g = Graphics.FromImage(bmp1))
{
float f = (tCol.R + tCol.G + tCol.B) / 765f;
float tr = tCol.R / 255f - f;
float tg = tCol.G / 255f - f;
float tb = tCol.B / 255f - f;
ColorMatrix colorMatrix = new ColorMatrix(new float[][]
{ new float[] {1f + tr, 0, 0, 0, 0},
new float[] {0, 1f + tg, 0, 0, 0},
new float[] {0, 0, 1f + tb, 0, 0},
new float[] {0, 0, 0, 1, 0},
new float[] {0, 0, 0, 0, 1} });
ImageAttributes attributes = new ImageAttributes();
attributes.SetGamma(gamma);
attributes.SetColorMatrix(colorMatrix);
g.DrawImage(bmp0, new Rectangle(0, 0, bmp0.Width, bmp0.Height),
0, 0, bmp0.Width, bmp0.Height, GraphicsUnit.Pixel, attributes);
}
return bmp1;
}
Note that I kept a gamma parameter; if you don't need it keep the value at 1f;
Here it is at work, adding first red then more red and some blue :
Transparent pixels are not affected.
For more on ColorMatrix here is a really nice intro!
As a fun project I applied the known colors to a known face:

SlimDX 11 Depth Buffer issues

I am getting an issue with SlimDX March SDK (For DXSDK11 June 2010 I believe). The problem is that whenever I turn the attach the depth view to the output merger state I don't get any output on the screen. I have compared my code with DX11 samples and it does seem to be correct. I have tried all sorts of flags and formats for the depth test (including always passing etc.) but nothing seems to work. I'd appreciate if anyone can spot a mistake. Here is the code. Here are the steps:
Initialize the back buffer:
D3DDevice device;
SwapChain swapChain;
/// Create the swap chain
SwapChainDescription desc = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription = new ModeDescription
{
Width = ContextSettings.Width,
Height = ContextSettings.Height,
RefreshRate = new SlimDX.Rational(ContextSettings.RefreshRate, 1),
Format = ContextSettings.BufferFormat,
},
IsWindowed = !ContextSettings.FullScreen,
OutputHandle = WindowHandle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput,
};
FeatureLevel[] featureLevels = new FeatureLevel[] { FeatureLevel.Level_11_0, FeatureLevel.Level_10_1 };
DriverType driverType = DriverType.Hardware;
D3DDevice.CreateWithSwapChain(driverType, DeviceCreationFlags.Debug, featureLevels, desc, out device, out swapChain);
Device = device;
SwapChain = swapChain;
/// Setup window association
Factory factory = swapChain.GetParent<Factory>();
factory.SetWindowAssociation(WindowHandle, WindowAssociationFlags.IgnoreAll);
/// Setup back buffers and render target views
RenderBuffer = DXTexture2D.FromSwapChain<DXTexture2D>(swapChain, 0);
RenderView = new RenderTargetView(Device, RenderBuffer);
Then initialize the depth buffer:
Format depthFormat = Format.D32_Float;
Texture2DDescription depthBufferDesc = new Texture2DDescription
{
ArraySize = 1,
BindFlags = BindFlags.DepthStencil,
CpuAccessFlags = CpuAccessFlags.None,
Format = depthFormat,
Height = width,
Width = height,
MipLevels = 1,
OptionFlags = ResourceOptionFlags.None,
SampleDescription = new SampleDescription( 1, 0 ),
Usage = ResourceUsage.Default
};
DepthBuffer = new DXTexture2D(Device, depthBufferDesc);
DepthStencilViewDescription dsViewDesc = new DepthStencilViewDescription
{
ArraySize = 0,
Format = depthFormat,
Dimension = DepthStencilViewDimension.Texture2D,
MipSlice = 0,
Flags = 0,
FirstArraySlice = 0
};
DepthView = new DepthStencilView(Device, DepthBuffer, dsViewDesc);
DepthStencilStateDescription dsStateDesc = new DepthStencilStateDescription()
{
IsDepthEnabled = true,
IsStencilEnabled = false,
DepthWriteMask = DepthWriteMask.All,
DepthComparison = Comparison.Less,
};
DepthState = DepthStencilState.FromDescription(Device, dsStateDesc);
Setup the render targets:
DeviceContext.OutputMerger.DepthStencilState = DepthState;
DeviceContext.OutputMerger.SetTargets(DepthView, RenderView);
DeviceContext.Rasterizer.SetViewports(new Viewport(0, 0, ContextSettings.Width, ContextSettings.Height, 0.0f, 1.0f));
Clear();
As soon as I remove DepthView from OutputMerger.SetTargets I start seeing images on the screen (without the depth test of course) and vice-versa otherwise.
It turned out that in the depthBufferDesc, I was passing width to the Height variable and height to the Width. This was creating the two render targets with different dimensions which was breaking it down.

How to do color balancing using a gray card in C#

I need to color balance an image that has an 18% gray card in it. The user loads this image into the application, then clicks on the gray card. From here is where I need help with an algorithm to color balance the image. I've found a few articles that mention doing a matrix transform, which I've tried, but without success (the image washes out or turns one color or another). The code I have now is:
int sampleSize = 20; // The square around the user's click on the gray card
int rVal = 0, gVal = 0, bVal = 0;
int count = 0;
for (int x = 0; x < sampleSize - 1; x++)
{
for (int y = 0; y < sampleSize - 1; y++)
{
System.Drawing.Color c = grayCardArea.GetPixel(x, y);
if (c.R > 0)
{
rVal += c.R;
gVal += c.G;
bVal += c.B;
rs.Add(c.R);
count++;
}
}
}
grayCardGraphics.Dispose();
int rAvg = 0, gAvg = 0, bAvg = 0;
rAvg = (int)Math.Round((decimal)rVal / (count));
gAvg = (int)Math.Round((decimal)gVal / (count));
bAvg = (int)Math.Round((decimal)bVal / (count));
// 117 is a value I found online for the neutral gray color of the gray card
float rDiff = (117 / (float)rAvg);
float gDiff = (117 / (float)gAvg);
float bDiff = (117 / (float)bAvg);
float[][] ptsArray =
{
new float[] {rDiff, 0, 0, 0, 0},
new float[] {0, gDiff, 0, 0, 0},
new float[] {0, 0, bDiff, 0, 0},
new float[] {0, 0, 0, 1, 0},
new float[] {0, 0, 0, .0f, 1}
};
// Create a ColorMatrix
ColorMatrix clrMatrix = new ColorMatrix(ptsArray);
// Create ImageAttributes
ImageAttributes imgAttribs = new ImageAttributes();
// Set color matrix
imgAttribs.SetColorMatrix(clrMatrix, ColorMatrixFlag.Default, ColorAdjustType.Default);
// Draw image with ImageAttributes
outputImageGraphics.DrawImage(srcImage, new System.Drawing.Rectangle(0, 0, srcImage.Width, srcImage.Height),
0, 0, srcImage.Width, srcImage.Height,
GraphicsUnit.Pixel, imgAttribs);
Viewing a saved copy of the outputImage shows an odd transformation of the image.
Any help is greatly appreciated!
My company, Atalasoft, has a free .NET Imaging SDK, with a class called LevelsCommand, that I think will do what you want.
http://atalasoft.com/photofree
Code is something like
AtalaImage img = new AtalaImage("filename");
LevelsCommand cmd = new LevelsCommand(/* ... */ ); // need to pass in leveling colors
img = cmd.Apply(img).Image;
img.Save("filename-new", new PngEncoder(), null); // or could be new JpegEncoder() or something else
You should use proper extensions on filenames to indicate the format.
Your first assumption appears to be that the image was properly exposed in the first place and that making the gray card read 117, 117, 117 will solve the problem. My advice is to leave the exposure alone and adjust just the color cast. You might find a different color model useful -- e.g., HSL. The saturation of a gray card should always be zero.
Alternatively, I have an example gray target reading 71, 72, 60. This is a bit warm. It would stand to reason that a more correct reading would be 67,67,67 or (R+G+B)/3. Because the image is a bit underexposed, I left it that way, but achieved a true neutral without altering the density of the image.
I hope this provides some help along your path toward getting the color right.

Categories