I have made my point cloud using the code from the librealsense:
var points = pc.Process(depthFrame).As<Points>();
//float depth = depthFrame.GetDistance(x, y);
//bbox = (287, 23, 86, 320);
// We colorize the depth frame for visualization purposes
var colorizedDepth = colorizer.Process<VideoFrame>(depthFrame).DisposeWith(frames);
//var org = Cv2.ImRead(colorFrame);
// CopyVertices is extensible, any of these will do:
//var vertices = new float[points.Count * 3];
var vertices = new Intel.RealSense.Math.Vertex[points.Count];
// var vertices = new UnityEngine.Vector3[points.Count];
// var vertices = new System.Numerics.Vector3[points.Count]; // SIMD
// var vertices = new GlmSharp.vec3[points.Count];
//var vertices = new byte[points.Count * 3 * sizeof(float)];
points.CopyVertices(vertices);
And I have converted the point cloud to a Point3DCollection from Media3D:
Point3DCollection pointss = new Point3DCollection();
foreach (var vertex in vertices)
{
var point3D = new Point3D(vertex.x, vertex.y, vertex.z);
pointss.Add(point3D);
}
I want to display those points using this line in the XAML file:
<h:HelixViewport3D Grid.ColumnSpan="1" Grid.Column="1" Margin="2.4,1,0,-0.4" >
<h:DefaultLights/>
<h:PointsVisual3D Points="{Binding pointss}" Color="Red" Size ="2"/>
</h:HelixViewport3D>
But I don't see my point cloud. Is there something wrong with my code?
The code that I am using right now looks like this. I have added what was given in the answer But I get the error object reference is not set on an example of an object. The code I am using is below:
namespace Intel.RealSense
{
/// <summary>
/// Interaction logic for Window.xaml
/// </summary>
public partial class CaptureWindow : System.Windows.Window
{
private Pipeline pipeline;
private Colorizer colorizer;
private CancellationTokenSource tokenSource = new CancellationTokenSource();
private Pipeline pipe = new Pipeline();
private PointCloud pc = new PointCloud();
private ThresholdFilter threshold;
private Point3DCollection _pointss;
public Point3DCollection pointss
{
get => _pointss;
set
{
if (_pointss == value)
return;
_pointss = value;
OnPropertyChanged();
}
}
public event PropertyChangedEventHandler PropertyChanged;
protected virtual void OnPropertyChanged(string propertyName = null)
{
PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(propertyName));
}
//static CvTrackbar Track;
//static OpenCvSharp.Point[][] contours;
//static HierarchyIndex[] hierarchy;
static Action<VideoFrame> UpdateImage(Image img)
{
var wbmp = img.Source as WriteableBitmap;
return new Action<VideoFrame>(frame =>
{
var rect = new Int32Rect(0, 0, frame.Width, frame.Height);
wbmp.WritePixels(rect, frame.Data, frame.Stride * frame.Height, frame.Stride);
});
}
public CaptureWindow()
{
InitializeComponent();
ModelImporter import = new ModelImporter();
try
{
Action<VideoFrame> updateDepth;
Action<VideoFrame> updateColor;
// The colorizer processing block will be used to visualize the depth frames.
colorizer = new Colorizer();
// Create and config the pipeline to strem color and depth frames.
pipeline = new Pipeline();
var cfg = new Config();
cfg.EnableStream(Stream.Depth, 640, 480);
cfg.EnableStream(Stream.Color, Format.Rgb8);
var pp = pipeline.Start(cfg);
PipelineProfile selection = pp;
var depth_stream = selection.GetStream<VideoStreamProfile>(Stream.Depth);
Intrinsics i = depth_stream.GetIntrinsics();
float[] fov = i.FOV;
SetupWindow(pp, out updateDepth, out updateColor);
Task.Factory.StartNew(() =>
{
while (!tokenSource.Token.IsCancellationRequested)
{
threshold = new ThresholdFilter();
threshold.Options[Option.MinDistance].Value = 0.0F;
threshold.Options[Option.MaxDistance].Value = 0.1F;
using (var releaser = new FramesReleaser())
{
using (var frames = pipeline.WaitForFrames().DisposeWith(releaser))
{
var pframes = frames
.ApplyFilter(threshold).DisposeWith(releaser);
}
}
// We wait for the next available FrameSet and using it as a releaser object that would track
// all newly allocated .NET frames, and ensure deterministic finalization
// at the end of scope.
using (var frames = pipeline.WaitForFrames())
{
var colorFrame = frames.ColorFrame.DisposeWith(frames);
var depthFrame = frames.DepthFrame.DisposeWith(frames);
var points = pc.Process(depthFrame).As<Points>();
//float depth = depthFrame.GetDistance(x, y);
//bbox = (287, 23, 86, 320);
// We colorize the depth frame for visualization purposes
var colorizedDepth = colorizer.Process<VideoFrame>(depthFrame).DisposeWith(frames);
//var org = Cv2.ImRead(colorFrame);
// CopyVertices is extensible, any of these will do:
//var vertices = new float[points.Count * 3];
var vertices = new Intel.RealSense.Math.Vertex[points.Count];
// var vertices = new UnityEngine.Vector3[points.Count];
// var vertices = new System.Numerics.Vector3[points.Count]; // SIMD
// var vertices = new GlmSharp.vec3[points.Count];
//var vertices = new byte[points.Count * 3 * sizeof(float)];
points.CopyVertices(vertices);
//Point3DCollection pointss = new Point3DCollection();
foreach (var vertex in vertices)
{
var point3D = new Point3D(vertex.x, vertex.y, vertex.z);
pointss.Add(point3D);
}
// Render the frames.
Dispatcher.Invoke(DispatcherPriority.Render, updateDepth, colorizedDepth);
Dispatcher.Invoke(DispatcherPriority.Render, updateColor, colorFrame);
Dispatcher.Invoke(new Action(() =>
{
String depth_dev_sn = depthFrame.Sensor.Info[CameraInfo.SerialNumber];
txtTimeStamp.Text = depth_dev_sn + " : " + String.Format("{0,-20:0.00}", depthFrame.Timestamp) + "(" + depthFrame.TimestampDomain.ToString() + ")";
}));
//HelixToolkit.Wpf.
}
}
}, tokenSource.Token);
}
catch (Exception ex)
{
System.Windows.MessageBox.Show(ex.Message);
System.Windows.Application.Current.Shutdown();
}
}
private void control_Closing(object sender, System.ComponentModel.CancelEventArgs e)
{
tokenSource.Cancel();
}
private void SetupWindow(PipelineProfile pipelineProfile, out Action<VideoFrame> depth, out Action<VideoFrame> color)
{
using (var p = pipelineProfile.GetStream(Stream.Depth).As<VideoStreamProfile>())
imgDepth.Source = new WriteableBitmap(p.Width, p.Height, 96d, 96d, PixelFormats.Rgb24, null);
depth = UpdateImage(imgDepth);
using (var p = pipelineProfile.GetStream(Stream.Color).As<VideoStreamProfile>())
imgColor.Source = new WriteableBitmap(p.Width, p.Height, 96d, 96d, PixelFormats.Rgb24, null);
color = UpdateImage(imgColor);
}
}
You can only bind to public properties, not to fields, so you have to define it like this:
public Point3DCollection pointss { get; } = new Point3DCollection();
If you want to reassign the collection at runtime, you should also implement INotifyPropertyChanged, otherwise assigning a new collection will not trigger a binding update and the change will not be reflected in the UI.
public class YourViewModel : INotifyPropertyChanged
{
private Point3DCollection _pointss;
public Point3DCollection pointss
{
get => _pointss;
set
{
if (_points == value)
return;
_points = value;
OnPropertyChanged();
}
}
public event PropertyChangedEventHandler PropertyChanged;
protected virtual void OnPropertyChanged(string propertyName = null)
{
PropertyChanged?.Invoke(this, new PropertyChangedEventArgs(propertyName));
}
}
Related
I was trying to do face recognition in C# with "EigenFaceRecognizer". But the problem is that the Recognizer recognizes an unknown face as a known one. Once the recognizer is trained to recognize that unknown face then it recognizes that face correctly. But it never shows "Unknown" as written in the code below.
This is the full code to recognize, capture, save and train faces:-
using System;
using System.Collections.Generic;
using System.Windows.Forms;
using System.IO;
using System.Threading;
using System.Drawing;
using System.ComponentModel;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Face;
using Emgu.CV.Structure;
namespace FaceRecognition
{
class FaceRecognition:Form
{
private double distance = 50;
private CascadeClassifier CascadeClassifier = new CascadeClassifier(Environment.CurrentDirectory + "/Resources/Haarcascade/haarcascade_frontalface_alt2.xml");
private Image<Bgr, byte> Frame = (Image<Bgr, byte>)null;
private Capture camera;
private Mat mat = new Mat();
private List<Image<Gray, byte>> trainedFaces = new List<Image<Gray, byte>>();
private List<int> PersonLabs = new List<int>();
private bool isEnable_SaveImage = false;
private string ImageName;
private PictureBox PictureBox_Frame;
private PictureBox PictureBox_smallFrame;
private string setPersonName;
public bool isTrained = false;
private List<string> Names = new List<string>();
private EigenFaceRecognizer eigenFaceRecognizer;
private IContainer components = (IContainer)null;
private List<String> retNames = new List<string>();
public FaceRecgnition()
{
this.InitializeComponent();
if (Directory.Exists(Environment.CurrentDirectory + "\\Training_Data\\Faces\\Image"))
return;
Directory.CreateDirectory(Environment.CurrentDirectory + "\\Training_Data\\Faces\\Image");
}
public void getPersonName(Control control)
{
System.Windows.Forms.Timer timer = new System.Windows.Forms.Timer();
timer.Tick += new EventHandler(timer_getPersonName_Tick);
timer.Interval = 100;
timer.Start();
void timer_getPersonName_Tick(object sender, EventArgs e) => control.Text = this.setPersonName;
}
public void openCamera(PictureBox pictureBox_Camera, PictureBox pictureBox_Trained)
{
this.PictureBox_Frame = pictureBox_Camera;
this.PictureBox_smallFrame = pictureBox_Trained;
this.camera = new Capture();
this.camera.ImageGrabbed += new EventHandler(this.Camera_ImageGrabbed);
this.camera.Start();
}
public void Save_IMAGE(string imageName)
{
this.ImageName = imageName;
this.isEnable_SaveImage = true;
}
private void Camera_ImageGrabbed(object sender, EventArgs e)
{
this.camera.Retrieve((IOutputArray)this.mat, 0);
this.Frame = this.mat.ToImage<Bgr, byte>(false).Resize(this.PictureBox_Frame.Width, this.PictureBox_Frame.Height, (Inter)2);
this.detectFace();
this.PictureBox_Frame.Image = (Image)this.Frame.Bitmap;
}
private void detectFace()
{
Image<Bgr, byte> resultImage = this.Frame.Convert<Bgr, byte>();
Mat mat = new Mat();
CvInvoke.CvtColor((IInputArray)this.Frame, (IOutputArray)mat, (ColorConversion)6, 0);
CvInvoke.EqualizeHist((IInputArray)mat, (IOutputArray)mat);
Rectangle[] rectangleArray = this.CascadeClassifier.DetectMultiScale((IInputArray)mat, 1.1, 4, new Size(), new Size());
if ((uint)rectangleArray.Length > 0U)
{
foreach (Rectangle face in rectangleArray)
{
Image<Bgr, byte> frame = this.Frame;
Rectangle rectangle = face;
Bgr bgr = new Bgr(Color.SpringGreen);
MCvScalar mcvScalar = ((Bgr)bgr).MCvScalar;
CvInvoke.Rectangle((IInputOutputArray)frame, rectangle, mcvScalar, 2, (LineType)8, 0);
this.SaveImage(face);
resultImage.ROI = face;
this.trainedIamge();
String name = this.CheckName(resultImage, face);
if (!retNames.Contains(name))
{
retNames.Add(name);
}
}
}
else
{
this.setPersonName = "";
retNames.Clear();
}
}
private void SaveImage(Rectangle face)
{
if (!this.isEnable_SaveImage)
return;
Image<Bgr, byte> image = this.Frame.Convert<Bgr, byte>();
image.ROI = face;
Task.Factory.StartNew(() =>
{
for(int i = 0; i < 40; i++)
{
((CvArray<byte>)image.Resize(100, 100, (Inter)2)).Save(Environment.CurrentDirectory + "\\Training_Data\\Faces\\Image\\" + this.ImageName + "_" + DateTime.Now.ToString("dd-mm-yyyy-hh-mm-ss") + ".jpg");
Thread.Sleep(1000);
}
});
this.isEnable_SaveImage = false;
this.trainedIamge();
}
private void trainedIamge()
{
try
{
int num = 0;
this.trainedFaces.Clear();
this.PersonLabs.Clear();
this.Names.Clear();
foreach (string file in Directory.GetFiles(Directory.GetCurrentDirectory() + "\\Training_Data\\Faces\\Image", "*.jpg", SearchOption.AllDirectories))
{
this.trainedFaces.Add(new Image<Gray, byte>(file));
this.PersonLabs.Add(num);
String name = file.Split('\\').Last().Split('_')[0];
this.Names.Add(name);
++num;
}
this.eigenFaceRecognizer = new EigenFaceRecognizer(num, this.distance);
((FaceRecognizer)this.eigenFaceRecognizer).Train<Gray, byte>(this.trainedFaces.ToArray(), this.PersonLabs.ToArray());
}
catch
{
}
}
private string CheckName(Image<Bgr, byte> resultImage, Rectangle face)
{
retNames.Clear();
try
{
if (!this.isTrained)
return null;
Image<Gray, byte> image = resultImage.Convert<Gray, byte>().Resize(100, 100, (Inter)2);
//);
CvInvoke.EqualizeHist((IInputArray)image, (IOutputArray)image);
//.Predict((IInputArray)image)
FaceRecognizer.PredictionResult predictionResult = ((FaceRecognizer)this.eigenFaceRecognizer).Predict(image);
if (predictionResult.Label != -1 && predictionResult.Distance < 5000)
{
this.PictureBox_smallFrame.Image = (Image)this.trainedFaces[(int)predictionResult.Label].Bitmap;
this.setPersonName = this.Names[(int)predictionResult.Label].Replace(Environment.CurrentDirectory + "\\Training_Data\\Faces\\Image\\", "").Replace(".jpg", "");
Image<Bgr, byte> frame = this.Frame;
string setPersonName = this.setPersonName;
Point point = new Point(face.X - 2, face.Y - 2);
Bgr bgr = new Bgr(Color.Gold);
MCvScalar mcvScalar = ((Bgr)bgr).MCvScalar;
CvInvoke.PutText((IInputOutputArray)frame, setPersonName, point, (FontFace)1, 1.0, mcvScalar, 1, (LineType)8, false);
return setPersonName;
}
else
{
Image<Bgr, byte> frame = this.Frame;
Point point = new Point(face.X - 2, face.Y - 2);
Bgr bgr = new Bgr(Color.OrangeRed);
MCvScalar mcvScalar = ((Bgr)bgr).MCvScalar;
CvInvoke.PutText((IInputOutputArray)frame, "Unknown", point, (FontFace)1, 1.0, mcvScalar, 1, (LineType)8, false);
return "Unknown";
}
}
catch
{
return null;
}
}
protected override void Dispose(bool disposing)
{
if (disposing && this.components != null)
this.components.Dispose();
base.Dispose(disposing);
}
private void InitializeComponent()
{
this.SuspendLayout();
this.AutoScaleDimensions = new SizeF(8f, 16f);
this.AutoScaleMode = AutoScaleMode.Font;
this.ClientSize = new Size(800, 450);
this.Name = nameof(FaceRecognition);
this.Text = nameof(FaceRecognition);
this.ResumeLayout(false);
}
public List<String> getRetNames { get => retNames; }
private String setRetNames { set => retNames.Add(value); }
}
}
This is the main piece of the code (If you are in hurry) where it recognizes the face:-
private string CheckName(Image<Bgr, byte> resultImage, Rectangle face)
{
retNames.Clear();
try
{
if (!this.isTrained)
return null;
Image<Gray, byte> image = resultImage.Convert<Gray, byte>().Resize(100, 100, (Inter)2);
CvInvoke.EqualizeHist((IInputArray)image, (IOutputArray)image);
FaceRecognizer.PredictionResult predictionResult = ((FaceRecognizer)this.eigenFaceRecognizer).Predict((IInputArray)image);
if (predictionResult.Label != -1 && predictionResult.Distance < 5000)
{
this.PictureBox_smallFrame.Image = (Image)this.trainedFaces[(int)predictionResult.Label].Bitmap;
this.setPersonName = this.Names[(int)predictionResult.Label].Replace(Environment.CurrentDirectory + "\\Training_Data\\Faces\\Image\\", "").Replace(".jpg", "");
Image<Bgr, byte> frame = this.Frame;
string setPersonName = this.setPersonName;
Point point = new Point(face.X - 2, face.Y - 2);
Bgr bgr = new Bgr(Color.Gold);
MCvScalar mcvScalar = ((Bgr)bgr).MCvScalar;
CvInvoke.PutText((IInputOutputArray)frame, setPersonName, point, (FontFace)1, 1.0, mcvScalar, 1, (LineType)8, false);
return setPersonName;
}
else
{
Image<Bgr, byte> frame = this.Frame;
Point point = new Point(face.X - 2, face.Y - 2);
Bgr bgr = new Bgr(Color.OrangeRed);
MCvScalar mcvScalar = ((Bgr)bgr).MCvScalar;
CvInvoke.PutText((IInputOutputArray)frame, "Unknown", point, (FontFace)1, 1.0, mcvScalar, 1, (LineType)8, false);
return "Unknown";
}
}
catch
{
return null;
}
}
Now no matter which face it is, --FaceRecognizer.PredictionResult predictionResult = ((FaceRecognizer)this.eigenFaceRecognizer).Predict((IInputArray)image); always returns predictionResult.Label = 0 and predictionResult.Distance = 0.
What I tried :-
Changing private double distance = 50;. Initially it was 1E+19, then I made it 5000, 2000 and tweaked it with many different values.
Using all the CascadeClassifier xml files.
But at all the instances of me doing something to fix the problem the values of predictionResult.Label and predictionResult.Distance were always "0".
P.S :- This question can be a duplicate of 1 or 2 questions but in those questions neither there was sufficient information provided by the questioner nor there is an answer.
I had a smiliar problem. What fixed it for me was to make sure that
your labels do not contain a zero. 0 is for errors or similar.
Get a public image dataset and train your type "unknown" on that.
This is what i used: http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
Then Eigen will recognize unknown faces as unknown most of the time.
I have e CustomMapRenderer on iOS project and I want to add two more fields in the marker click.
In the CustomMKAnnotationView.cs I create a two more objects - CodeNum and AlertLevel:
using MapKit;
namespace MaritsaTundzhaForecast.iOS
{
public class CustomMKAnnotationView : MKAnnotationView
{
public string Name { get; set; }
public string Url { get; set; }
public int AlertLevel { get; set; }
public int CodeNum { get; set; }
public CustomMKAnnotationView(IMKAnnotation annotation, string id)
: base(annotation, id)
{
}
}
}
In CustomMapRenderer.cs I use this line of code to show it but when I click on the pin they do not appear:
((CustomMKAnnotationView)annotationView).AlertLevel = customPin.AlertLevel;
((CustomMKAnnotationView)annotationView).CodeNum = customPin.CodeNum;
This is the full code of GetViewForAnnotation:
protected override MKAnnotationView GetViewForAnnotation(MKMapView mapView, IMKAnnotation annotation)
{
MKAnnotationView annotationView = null;
if (annotation is MKUserLocation)
return null;
var customPin = GetCustomPin(annotation as MKPointAnnotation);
if (customPin == null)
{
throw new Exception("Custom pin not found");
}
annotationView = mapView.DequeueReusableAnnotation(customPin.Name);
if (annotationView == null)
{
annotationView = new CustomMKAnnotationView(annotation, customPin.Name);
annotationView.Image = UIImage.FromFile("pin.png");
annotationView.CalloutOffset = new CGPoint(0, 0);
annotationView.LeftCalloutAccessoryView = new UIImageView(UIImage.FromFile("green.png"));
((CustomMKAnnotationView)annotationView).Name = customPin.Name;
((CustomMKAnnotationView)annotationView).Url = customPin.Url;
((CustomMKAnnotationView)annotationView).AlertLevel = customPin.AlertLevel;
((CustomMKAnnotationView)annotationView).CodeNum = customPin.CodeNum;
}
annotationView.CanShowCallout = true;
return annotationView;
}
I have OnDidSelectAnnotation methods, but I don't know what to write inside to display CodeNum and AlertLevel:
void OnDidSelectAnnotationView(object sender, MKAnnotationViewEventArgs e)
{
CustomMKAnnotationView customView = e.View as CustomMKAnnotationView;
customPinView = new UIView();
if (customView.Name.Equals("Xamarin"))
{
customPinView.Frame = new CGRect(0, 0, 200, 84);
/*
var image = new UIImageView(new CGRect(0, 0, 200, 84));
image.Image = UIImage.FromFile("green.png");
customPinView.AddSubview(image);
*/
customPinView.Center = new CGPoint(0, -(e.View.Frame.Height + 75));
e.View.AddSubview(customPinView);
}
}
void OnDidDeselectAnnotationView(object sender, MKAnnotationViewEventArgs e)
{
if (!e.View.Selected)
{
customPinView.RemoveFromSuperview();
customPinView.Dispose();
customPinView = null;
}
}
I use this code to create a new label but I want to put this values from customView.AlertLevel.ToString(); inside in the info window.
void OnDidSelectAnnotationView(object sender, MKAnnotationViewEventArgs e)
{
CustomMKAnnotationView customView = e.View as CustomMKAnnotationView;
customPinView = new UIView();
if (customView.Name.Equals("Xamarin"))
{
customPinView.Frame = new CGRect(0, 0, 200, 84);
/*
var image = new UIImageView(new CGRect(0, 0, 200, 84));
image.Image = UIImage.FromFile("green.png");
customPinView.AddSubview(image);
*/
customPinView.Center = new CGPoint(0, -(e.View.Frame.Height + 75));
e.View.AddSubview(customPinView);
var label = new UILabel(new CGRect(0, 0, 200, 84));
label.Text = customView.AlertLevel.ToString();
customPinView.AddSubview(label);
}
}
This is screenshot how look now:
you need to modify the UI to display the additional data
// you will need to experiment with the Bounds to fit your UI
var label = new UILabel(new CGRect(0, 0, 100, 50));
label.Text = customView.AlertLevel;
customPinView.AddSubview(label);
I wrote some code to draw something in a iOS App, if the server send some new coordinates to it.
I have a callback function that draw the coordinates. But when I create a new instance of a class in this function the callback exit without any error...
Does someone else have this problem before?
Here is my code if this is helping
CGPath pathtotal;
List<CGPath> path;
CGPoint initialPoint;
CGPoint latestPoint;
DrawDrawerDraw drawDrawerDraw;
public DrawGuessView(IntPtr handle) : base(handle)
{
BackgroundColor = UIColor.White;
pathtotal = new CGPath();
SocketEventHandler.Add("draw:drawer:draw", onDrawDrawerDraw);
}
public void onDrawDrawerDraw(dynamic obj)
{
drawDrawerDraw = (DrawDrawerDraw)obj;
for (int i = 0; i <= drawDrawerDraw.coords.Count; i++)
{
if (initialPoint.X != (nfloat)drawDrawerDraw.coords[i].x0 && initialPoint.Y != (nfloat)drawDrawerDraw.coords[i].y0)
{
path[i] = new CGPath();
}
initialPoint.X = (nfloat)drawDrawerDraw.coords[i].x0;
initialPoint.Y = (nfloat)drawDrawerDraw.coords[i].y0;
latestPoint.X = (nfloat)drawDrawerDraw.coords[i].x1;
latestPoint.Y = (nfloat)drawDrawerDraw.coords[i].y1;
//add lines to the touch points
if (path[i].IsEmpty)
{
path[i].AddLines(new CGPoint[] { initialPoint, latestPoint });
}
else
{
path[i].AddLineToPoint(latestPoint);
}
}
SetNeedsDisplay();
}
public override void Draw(CGRect rect)
{
base.Draw(rect);
try
{
foreach (var item in path)
{
if (!initialPoint.IsEmpty)
{
//get graphics context
using (CGContext g = UIGraphics.GetCurrentContext())
{
//set up drawing attributes
g.SetLineWidth(2);
UIColor.Black.SetStroke();
//add geometry to graphics context and draw it
pathtotal.AddPath(item);
g.AddPath(pathtotal);
g.DrawPath(CGPathDrawingMode.Stroke);
}
}
}
}
catch (Exception e) { }
}
}
There are two points you need to modify.
Initialize path in DrawGuessView method
public DrawGuessView(IntPtr handle) : base(handle)
{
BackgroundColor = UIColor.White;
pathtotal = new CGPath();
List<CGPath> path = new List<CGPath>();
SocketEventHandler.Add("draw:drawer:draw", onDrawDrawerDraw);
}
path[i] = new CGPath() will cause the ArgumentOutOfRangeException, we can't set value to the item in List by this way.
Modify the loop
CGPath pathItem = null;
for (int i = 0; i <= drawDrawerDraw.coords.Count; i++)
{
if (initialPoint.X != (nfloat)drawDrawerDraw.coords[i].x0 && initialPoint.Y != (nfloat)drawDrawerDraw.coords[i].y0)
{
pathItem = new CGPath();
}
initialPoint.X = (nfloat)drawDrawerDraw.coords[i].x0;
initialPoint.Y = (nfloat)drawDrawerDraw.coords[i].y0;
latestPoint.X = (nfloat)drawDrawerDraw.coords[i].x1;
latestPoint.Y = (nfloat)drawDrawerDraw.coords[i].y1;
//add lines to the touch points
if (pathItem.IsEmpty)
{
pathItem.AddLines(new CGPoint[] { initialPoint, latestPoint });
}
else
{
pathItem.AddLineToPoint(latestPoint);
}
path.Add(pathItem);
}
I'm attempting to create a model viewer for a game to try and learn SharpDX but the game uses .DDS files and the viewer can only read .BMPs. I've looked far and wide on the webs and the only things I can find are load them but don't seem to work for SharpDX (I don't know im a noob :D)
using SharpDX.Direct3D11;
using SharpDX.WIC;
namespace ModelViewer.Programming.GraphicClasses
{
public class TextureClass
{
public ShaderResourceView TextureResource { get; private set; }
public bool Init(Device device, string fileName)
{
try
{
using (var texture = LoadFromFile(device, new ImagingFactory(), fileName))
{
ShaderResourceViewDescription srvDesc = new ShaderResourceViewDescription()
{
Format = texture.Description.Format,
Dimension = SharpDX.Direct3D.ShaderResourceViewDimension.Texture2D,
};
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MipLevels = -1;
TextureResource = new ShaderResourceView(device, texture, srvDesc);
device.ImmediateContext.GenerateMips(TextureResource);
}
return true;
}
catch
{
return false;
}
}
public void Shutdown()
{
TextureResource?.Dispose();
TextureResource = null;
}
public Texture2D LoadFromFile(Device device, ImagingFactory factory, string fileName)
{
using (var bs = LoadBitmap(factory, fileName))
return CreateTextureFromBitmap(device, bs);
}
public BitmapSource LoadBitmap(ImagingFactory factory, string filename)
{
var bitmapDecoder = new BitmapDecoder(factory, filename, DecodeOptions.CacheOnDemand);
var result = new FormatConverter(factory);
result.Initialize(bitmapDecoder.GetFrame(0), SharpDX.WIC.PixelFormat.Format32bppPRGBA, BitmapDitherType.None, null, 0.0, BitmapPaletteType.Custom);
return result;
}
public Texture2D CreateTextureFromBitmap(Device device, BitmapSource bitmapSource)
{
int stride = bitmapSource.Size.Width * 4;
using (var buffer = new SharpDX.DataStream(bitmapSource.Size.Height * stride, true, true))
{
bitmapSource.CopyPixels(stride, buffer);
return new Texture2D(device, new Texture2DDescription()
{
Width = bitmapSource.Size.Width,
Height = bitmapSource.Size.Height,
ArraySize = 1,
BindFlags = BindFlags.ShaderResource | BindFlags.RenderTarget,
Usage = ResourceUsage.Default,
CpuAccessFlags = CpuAccessFlags.None,
Format = SharpDX.DXGI.Format.R8G8B8A8_UNorm,
MipLevels = 1,
OptionFlags = ResourceOptionFlags.GenerateMipMaps,
SampleDescription = new SharpDX.DXGI.SampleDescription(1, 0),
},
new SharpDX.DataRectangle(buffer.DataPointer, stride));
}
}
}
}
I have a feeling this will need to be completely redone to utilize the DDS format. Is it easier to simply read one and then convert it to a bitmap?
I have a framework for drawing made in C#. Recently I've been trying to do something and noticed the following problem:
When I draw a Geometry manually on DrawingVisual using its RenderOpen, and then move it using TranslateTransform, I sometime lose the fill.
To see what happens, you can insert the following framework element to a Window and run it:
class MyVisual : FrameworkElement {
private readonly DrawingVisual _visual = new DrawingVisual();
private readonly Geometry _geom;
private readonly Random _r = new Random();
public MyVisual()
{
AddVisualChild(_visual);
_geom = Geometry.Parse("M 95 100 L 130 130 95 160 Z").Clone();
_geom.Transform = new TranslateTransform();
UpdateVisual();
}
public void MoveGeom() {
var transform = _geom.Transform as TranslateTransform;
var x = _r.Next(-60, 200);
var y = _r.Next(-60, 200);
transform.X = x;
transform.Y = y;
}
void UpdateVisual()
{
using (var dc = _visual.RenderOpen())
{
UpdateVisual(dc);
}
}
private void UpdateVisual(DrawingContext dc)
{
var color = Brushes.Red;
var pen = new Pen(Brushes.Blue, 1);
dc.DrawGeometry(color, pen, _geom);
}
protected override int VisualChildrenCount => 1;
protected override Visual GetVisualChild(int index) => _visual;
}
public partial class MainWindow : Window
{
Timer _t = new Timer(500) { AutoReset = true };
public MainWindow()
{
InitializeComponent();
_t.Elapsed += (x, y) => Dispatcher.Invoke(() => _vis.MoveGeom());
_t.Start();
}
}
Is this a known issue, is there some simple workaround for it, or some other solution?
This solution seems to solve this issue:
public void MoveGeom()
{
var x = _r.Next(-60, 200);
var y = _r.Next(-60, 200);
_geom.Transform = new TranslateTransform(x, y);
}
Obviously there is a problem when the two coordinates are set separately because setting the single coordinates causes two updates and the rendering process is confused by that in any way.