I'm writing an app that calculates covered distance in real time. I'm doing this by comparing my actual postion to the previous one and count the distance between them (using the Haversine formula) and sum everything up. This gives me some result, the distance is growing everything seems to be working. BUT the problem is with the accuracy. I tested this app on a route that is ~15km long many times and the covered distance is always bigger than my car's counter says. The difference is ALWAYS different - from 500m to even 2km.
This is my Geolocator object:
Geolocator gl = new Geolocator() {
DesiredAccuracy = PositionAccuracy.High, MovementThreshold = 20, ReportInterval = 50
};
In the constructor I declare that when the position is changed, "OnPositionChanged" method should be fired and also the method to find my actual location:
gl.PositionChanged += OnPositionChanged;
setMyLocation();
This is the "OnPositionChanged()" method:
async private void OnPositionChanged(Geolocator sender, PositionChangedEventArgs e)
{
await Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
{
setMyLocation();
});
}
This is the setMyLocation() method:
private async void setMyLocation()
{
try
{
p = new Position();
location = await gl.GetGeopositionAsync(TimeSpan.FromMinutes(5), TimeSpan.FromSeconds(5));
p.Latitude = location.Coordinate.Point.Position.Latitude;
p.Longitude = location.Coordinate.Point.Position.Longitude;
obla = location.Coordinate.Point.Position.Latitude;
oblo = location.Coordinate.Point.Position.Longitude;
if (prev_location.Latitude == 0)
{
prev_location = p;
}
positions.Add(new BasicGeoposition() {
Latitude = location.Coordinate.Latitude,
Longitude = location.Coordinate.Longitude
});
myMap.Children.Remove(myCircle);
myCircle = new Ellipse();
myCircle.Fill = new SolidColorBrush(Colors.Green);
myCircle.Height = 17;
myCircle.Width = 17;
myCircle.Opacity = 50;
myMap.Children.Add(myCircle);
MapControl.SetLocation(myCircle, location.Coordinate.Point);
MapControl.SetNormalizedAnchorPoint(myCircle, new Point(0, 0));
routeKM += (Distance(p, prev_location));
Distance_txt.Text = routeKM.ToString("f2") + " km";
prev_location = p;
}
catch
{
}
}
This is my double (routeKM) counted with the Haversine formula:
public double Distance(Position pos1, Position pos2)
{
var R = 6371d; // Radius of the earth in km
var dLat = Deg2Rad(pos2.Latitude - pos1.Latitude); // deg2rad below
var dLon = Deg2Rad(pos2.Longitude - pos1.Longitude);
var a = Math.Sin(dLat / 2d) * Math.Sin(dLat / 2d) +
Math.Cos(Deg2Rad(pos1.Latitude)) * Math.Cos(Deg2Rad(pos2.Latitude)) *
Math.Sin(dLon / 2d) * Math.Sin(dLon / 2d);
var c = 2d * Math.Atan2(Math.Sqrt(a), Math.Sqrt(1d - a));
var d = R * c; // Distance in km
return d;
}
double Deg2Rad(double deg)
{
return deg * (Math.PI / 180d);
}
So my question is: how to improve the accuracy? I'm not happy that my car's counter shows even 2km less than my app.
By doing DesiredAccuracy = PositionAccuracy.High you already suggested Windows Phone OS to look for highest accuracy.
I think you should change your distance finding logic and see if it works.
Use GeoCoordinate.GetDistanceTo as suggested in following answer.
https://stackoverflow.com/a/6366657/744616
Related
I am using Gmap nuget package to load the map in my project. i have loaded the map, after that i want to draw the circle around the selected Lat Long.
i drawn the circles and those circles are in correct distance also (i have verified).
The issue is : as i am changing latitude and longitude to to view another location map, the map is zooming automatically.
As latitude number is increases the map is zooming in and as latitude number decreases zooming out is taking place.
I am not using any zoom in/out function only changing latitude is acting like this
Eg : Bangalore Latitude:13.095170176351234 and longitude : 77.59453327374854
For above lat long assume its in Default Zoom.
Now I am going Switch the lat long to Dhelhi Location to View Dhelhi Map
Latitude : 28.55635085552201 and Longitude : 77.09994706900083
while loading Dhelhi Map we can see the Map Zoom In taking place**
absorve Circle Width,Height and Map while loading To Dhelhi location we can see the Zoom In because latitude number is bigger than bangalore Latitude number
private static double localLatitude = 13.095170176351234;
private static double localLongitude = 77.59453327374854;
private void mapView_Loaded(object sender, RoutedEventArgs e)
{
GMaps.Instance.Mode = AccessMode.ServerAndCache;
mapView.MapProvider=GMap.NET.MapProviders.OpenStreetMapProvider.Instance;
mapView.MinZoom = 12;
mapView.MaxZoom = 17;
mapView.Zoom = 14;
mapView.MouseWheelZoomType = MouseWheelZoomType.ViewCenter;
mapView.CanDragMap = false;
mapView.DragButton = MouseButton.Left;
mapView.Position = new PointLatLng(localLatitude, localLongitude);
mapView.IsHitTestVisible = true;
mapView.ShowCenter = true;
CreateCircle(localLatitude, localLongitude, 30, 0.4, 4);
CreateCircle(localLatitude, localLongitude, 500, 0.4);
CreateCircle(localLatitude, localLongitude, 1000, 0.4);
CreateCircle(localLatitude, localLongitude, 1500, 0.4);
CreateCircle(localLatitude, localLongitude, 2000, 0.4);
CreateCircle(localLatitude, localLongitude, 2150, 0.4);
}
private void CreateCircle(double rLat, double rLon, double rangeRadius, double opacity, double ColorIndex = 1)
{
//double radius = 2150; // in mtr //todo get from erc
double radius = rangeRadius;
//int ColorIndex = 1;
PointLatLng point = new PointLatLng(rLat, rLon);
//GMapMarker markers = new GMapMarker(point);
int segments = 1080;
List<PointLatLng> pointillist = new List<PointLatLng>();
for (int i = 0; i < segments; i++)
{
pointillist.Add(FindPointAtDistanceFrom(point, i * (Math.PI / 180), radius / 1000));
}
var rangeCircles = new GMapPolygon(pointillist)
{
Tag = "RangeCircle"
};
mapView.RegenerateShape(rangeCircles);
switch (ColorIndex)
{
case 1:
((Path)rangeCircles.Shape).Fill = Brushes.LightGray;
break;
case 2:
((Path)rangeCircles.Shape).Fill = Brushes.Orange;
break;
case 3:
((Path)rangeCircles.Shape).Fill = Brushes.Aqua;
break;
case 4:
((Path)rangeCircles.Shape).Fill = Brushes.Red;
break;
default:
System.Windows.MessageBox.Show("No search zone found!");
break;
}
((Path)rangeCircles.Shape).Stroke = Brushes.Black;
((Path)rangeCircles.Shape).StrokeThickness = 1;
((Path)rangeCircles.Shape).Opacity = opacity;
mapView.Markers.Add(rangeCircles);
}
private static GMap.NET.PointLatLng FindPointAtDistanceFrom(GMap.NET.PointLatLng startPoint, double initialBearingRadians, double distanceKilometres)
{
const double radiusEarthKilometres = 6371.01;
var distRatio = distanceKilometres / radiusEarthKilometres;
var distRatioSine = Math.Sin(distRatio);
var distRatioCosine = Math.Cos(distRatio);
var startLatRad = DegreesToRadians(startPoint.Lat);
var startLonRad = DegreesToRadians(startPoint.Lng);
var startLatCos = Math.Cos(startLatRad);
var startLatSin = Math.Sin(startLatRad);
var endLatRads = Math.Asin((startLatSin * distRatioCosine) + (startLatCos * distRatioSine * Math.Cos(initialBearingRadians)));
var endLonRads = startLonRad + Math.Atan2(Math.Sin(initialBearingRadians) * distRatioSine * startLatCos, distRatioCosine - startLatSin * Math.Sin(endLatRads));
return new GMap.NET.PointLatLng(RadiansToDegrees(endLatRads), RadiansToDegrees(endLonRads));
}
private static double DegreesToRadians(double degrees)
{
const double degToRadFactor = Math.PI / 180;
return degrees * degToRadFactor;
}
private static double RadiansToDegrees(double radians)
{
const double radToDegFactor = 180 / Math.PI;
return radians * radToDegFactor;
}
It might be the mapView.ShowCenter = true; property that is causing the automatic zoom adjustments. Requiring the center point to always be visible probably causes your map to zoom to try to show both the new marker and the center in the same visible map window.
So I have an acceleration sensor that gives me acceleration data. The device is currently resting at a certain position, so the data looks like this (with some noise per axis):
ACCELX = 264
ACCELY = -43
ACCELZ = 964
Then there's a 3D model represting the device, and "all I want" is this 3D model to represent the real device's orientation. In my attempts to understand the usage of quaternions in .NET, here's the code I've gobbled up:
/* globals */
Vector3D PrevVector = new Vector3D(0, 0, 0);
ModelVisual3D model; // initialized and model file loaded
private async void TimerEvent()
{
RotateTransform3D rot = new RotateTransform3D();
QuaternionRotation3D q = new QuaternionRotation3D();
double x = 0, y = 0, z = 0;
List<Reading> results = await Device.ReadSensor();
foreach (Reading r in results)
{
switch (r.Type)
{
case "RPF_SEN_ACCELX":
x = r.Value;
break;
case "RPF_SEN_ACCELY":
y = r.Value;
break;
case "RPF_SEN_ACCELZ":
z = r.Value;
break;
}
}
double angle = Vector3D.AngleBetween(new Vector3D(x, y, z), PrevVector);
q.Quaternion = new Quaternion(new Vector3D(x, y, z), angle);
rot.Rotation = q;
model.Transform = rot;
PrevVector = new Vector3D(x, y, z);
}
Moving my real device does yield changes in the reported values, but the model on the screen just twitches in what seems to me random directions, little more far than from the noise and seemingly unrelated to how I rotate the real device. I'm fairly sure I'm constructing and using quaternions incorrectly. How would I do it right?
This is .NET with WPF. There's also HelixToolkit.WPF available, but I haven't seen any function to create quaternions from acceleration data in there. Higher level frameworks such as Unreal Engine or Unity are NOT available for this project.
Is your sensor output rotation value accumulative or differences? Sometimes the output rotation is differences, and you need previous rotation value plus the differences to calculate the current new one.
You can try to save the previous quaternion and add current quaterion with previous quaternion to get the new accumulated rotation.
Turns out my issue was of a completely different nature: I had to utilize a class called Transform3DGroup. This is how the code must be altered to enable a rotation around the Z axis:
/* globals */
ModelVisual3D model; // initialized and model file loaded
Transform3DGroup tg = new Transform3DGroup();
private async void TimerEvent()
{
RotateTransform3D rot = new RotateTransform3D();
QuaternionRotation3D q = new QuaternionRotation3D();
double x = 0, y = 0, z = 0;
List<Reading> results = await Device.ReadSensor();
foreach (Reading r in results)
{
switch (r.Type)
{
case "RPF_SEN_ACCELX":
x = r.Value;
break;
case "RPF_SEN_ACCELY":
y = r.Value;
break;
case "RPF_SEN_ACCELZ":
z = r.Value;
angle = GetAngle(x, y).ToDegrees();
q.Quaternion = new Quaternion(new Vector3D(0, 0, 1), angle);
break;
}
rot.Rotation = q;
tg.Children.Clear();
tg.Children.Add(rot);
model.Transform = tg; // Use Transform3DGroup!
}
}
I haven't found any documentation on the obligatory use of Transform3DGroup.
For the sake of completion, here are the internals of GetAngle(), as derived by Bill Wallis on Math Overflow:
private double GetAngle(double x, double y)
{
if (x > 0)
{
return 2 * Math.PI - Math.Atan(y / x);
}
else if (x < 0)
{
return Math.PI - Math.Atan(y / x);
}
else // x == 0
{
return 2 * Math.PI - Math.Sign(y) * Math.PI / 2;
}
}
And the extensions to the double type to transform doubles between radians and degrees (outside of class, within namespace):
public static class NumericExtensions
{
public static double ToRadians(this double val)
{
return (Math.PI / 180) * val;
}
public static double ToDegrees(this double val)
{
return (180 / Math.PI) * val;
}
}
I tried making this Mandelbrot fractal generator, but when I run this, I get an output like a circle.
Not sure exactly why this happens. I think something may be wrong with my coloring, but even if so the shape is also incorrect.
public static Bitmap Generate(
int width,
int height,
double realMin,
double realMax,
double imaginaryMin,
double imaginaryMax,
int maxIterations,
int bound)
{
var bitmap = new FastBitmap(width, height);
var planeWidth = Math.Abs(realMin) + Math.Abs(realMax); // Total width of the plane.
var planeHeight = Math.Abs(imaginaryMin) + Math.Abs(imaginaryMax); // Total height of the plane.
var realStep = planeWidth / width; // Amount to step by on the real axis per pixel.
var imaginaryStep = planeHeight / height; // Amount to step by on the imaginary axis per pixel.
var realScaling = width / planeWidth;
var imaginaryScaling = height / planeHeight;
var boundSquared = bound ^ 2;
for (var real = realMin; real <= realMax; real += realStep) // Loop through the real axis.
{
for (var imaginary = imaginaryMin; imaginary <= imaginaryMax; imaginary += imaginaryStep) // Loop through the imaginary axis.
{
var z = Complex.Zero;
var c = new Complex(real, imaginary);
var iterations = 0;
for (; iterations < maxIterations; iterations++)
{
z = z * z + c;
if (z.Real * z.Real + z.Imaginary * z.Imaginary > boundSquared)
{
break;
}
}
if (iterations == maxIterations)
{
bitmap.SetPixel(
(int)((real + Math.Abs(realMin)) * realScaling),
(int)((imaginary + Math.Abs(imaginaryMin)) * imaginaryScaling),
Color.Black);
}
else
{
var nsmooth = iterations + 1 - Math.Log(Math.Log(Complex.Abs(z))) / Math.Log(2);
var color = MathHelper.HsvToRgb(0.95f + 10 * nsmooth, 0.6, 1.0);
bitmap.SetPixel(
(int)((real + Math.Abs(realMin)) * realScaling),
(int)((imaginary + Math.Abs(imaginaryMin)) * imaginaryScaling),
color);
}
}
}
return bitmap.Bitmap;
}
Here's one error:
var boundSquared = bound ^ 2;
This should be:
var boundSquared = bound * bound;
The ^ operator means xor.
I'm creating a program in wpf that draws polyline as well as a line an offset away which is parallel. It works perfectly for the first set of parallel lines, but on each following line the right line is off angled(Shown in red) .
Code so far:
private void DrawingCanvas_MouseLeftButtonUp(object sender, MouseButtonEventArgs e) {
if (polylineLeft != null) {
var canvas = (Canvas)sender;
leftSegment.Points[1] = e.GetPosition(canvas);
var distance = (leftSegment.Points[0] - leftSegment.Points[1]).Length;
if (distance >= 20) {
polylineLeft.Points.Add(leftSegment.Points[1]);
//calculate second line
var L = Math.Sqrt((leftSegment.Points[0].X - leftSegment.Points[1].X) *
(leftSegment.Points[0].X - leftSegment.Points[1].X) +
(leftSegment.Points[0].Y - leftSegment.Points[1].Y) *
(leftSegment.Points[0].Y - leftSegment.Points[1].Y));
var x1p = leftSegment.Points[0].X + width * (leftSegment.Points[1].Y-leftSegment.Points[0].Y) / L;
var x2p = leftSegment.Points[1].X + width * (leftSegment.Points[1].Y-leftSegment.Points[0].Y) / L;
var y1p = leftSegment.Points[0].Y + width * (leftSegment.Points[0].X-leftSegment.Points[1].X) / L;
var y2p = leftSegment.Points[1].Y + width * (leftSegment.Points[0].X-leftSegment.Points[1].X) / L;
if (!initialLeftPoint) {
polylineRight.Points.Clear();
polylineRight.Points.Add(new Point(x1p, y1p));
initialLeftPoint = true;
}
polylineRight.Points.Add(new Point(x2p, y2p));
leftSegment.Points[0] = leftSegment.Points[1];
rightSegment.Points[0] = rightSegment.Points[1];
} else {
if (polylineLeft.Points.Count < 2) {
canvas.Children.Remove(polylineLeft);
}
polylineLeft = null;
polylineRight = null;
leftSegment.Points.Clear();
rightSegment.Points.Clear();
canvas.Children.Remove(leftSegment);
canvas.Children.Remove(rightSegment);
}
}
}
How do I ensure that on my second line, (Red) it is parallel with the main line (Green)?
One part of the problem is quite easy to solve with the help of the WPF Vector structure. Given a line segment between the two Points p1 and p2, you could calculate the normal vector like this:
Point p1 = ...
Point p2 = ...
var v = p2 - p1;
var n = new Vector(v.Y, -v.X);
n.Normalize();
// now n is a Vector of length 1, perpendicular to the line p1-p2
You could now create a parallel line segment (given by Points p3 and p4) like this:
var distance = 20d;
var p3 = p1 + n * distance;
var p4 = p3 + v;
However, the above code creates a parallel line segment of the same length as the original one. This may not be exactly what you want, as I guess you want to create a "parallel polyline". If that is the case, things get a bit more complicated because you would also have to calculate the intersections between adjacent segments of the parallel polyline. It may even happen that some of these segments disappear during these calculations.
Everyone,
here is a link to a small python app:
http://en.wikipedia.org/wiki/File:Beta-skeleton.svg
I think I've correctly converted it. (Source at bottom of post)
But, the Math.Acos always returns NaN. Is there a difference between the python version of acos and Math.Acos?
private Random rnd = new Random();
private double scale = 5;
private double radius = 10;
private double beta1 = 1.1;
private double beta2 = 0.9;
private double theta1;
private double theta2;
private Point[] points = new Point[10];
public MainWindow()
{
InitializeComponent();
for (int i = 0; i < 100; i++ )
{
points[i] = new Point((rnd.NextDouble() * scale),
(rnd.NextDouble() * scale));
}
theta1 = Math.Asin(1/beta1);
theta2 = Math.PI - Math.Asin(beta2);
}
private double Dot(Point p, Point q, Point r)
{
var pr = new Point();
var qr = new Point();
//(p[0]-r[0])
pr.X = p.X-r.X;
//(p[1]-r[1])
pr.Y = p.Y-r.Y;
//(q[0]-r[0])
qr.X = q.X-r.X;
//(q[1]-r[1])
qr.Y = q.Y-r.Y;
return (pr.X*qr.X) + (pr.Y*qr.Y);
}
private double Sharp(Point p,Point q)
{
double theta = 0;
foreach(var pnt in points)
{
if(pnt!=p && pnt!=q)
{
var dotpq = Dot(p, q, pnt);
double t = Math.Acos(dotpq);
double u = Math.Pow((dotpq * dotpq), 0.5);
var tempVal = t/u;
theta = Math.Max(theta, tempVal);
}
}
return theta;
}
private void DrawPoint(Point p)
{
var e = new Ellipse
{
Width = radius/2,
Height = radius/2,
Stroke = Brushes.Red,
Visibility = Visibility.Visible
};
Canvas.SetTop(e, p.Y + radius);
Canvas.SetLeft(e, p.X + radius);
MyCanvas.Children.Add(e);
}
private void DrawEdge1(Point p,Point q)
{
var l = new Line
{
X1 = p.X,
Y1 = p.Y,
X2 = q.X,
Y2 = q.Y,
Stroke = Brushes.Black,
Width = 1,
Visibility = Visibility.Visible
};
MyCanvas.Children.Add(l);
}
private void DrawEdge2(Point p,Point q)
{
var l = new Line
{
X1 = p.X,
Y1 = p.Y,
X2 = q.X,
Y2 = q.Y,
Stroke = Brushes.Blue,
Width = 1,
Visibility = Visibility.Visible
};
MyCanvas.Children.Add(l);
}
private void Window_Loaded(object sender, RoutedEventArgs e)
{
foreach (var p in points)
{
foreach (var q in points)
{
var theta = Sharp(p, q);
if(theta < theta1) DrawEdge1(p, q);
else if(theta < theta2) DrawEdge2(p, q);
}
}
}
What you need to do to get the angle from the dot product is to take away the lengths before you acos.
What python has:
prq = acos(dot(p,q,r) / (dot(p,p,r)*dot(q,q,r))**0.5)
What you're doing is not dividing in the Acos, but dividing after.
so:
int r = pnt;
int ppr = Dot(p,p,r);
int qqr = Dot(q,q,r);
int pqr = Dot(p,q,r);
double u = Math.Acos(pqr / Math.Sqrt(ppr * qqr));
Of course change the variables, I was just trying to keep it similar to the python to help you understand :)
I think it's due to your translation of the Python expression (dot(p,q,r) / (dot(p,p,r) * dot(q,q,r)) **0.5). Exponentiation in Python has one of the lowest operators precedency-wise, so the square-root is being taken of the subterm dot(p,q,r) / (dot(p,p,r) * dot(q,q,r)). In your C# version, when calculating the value of the double 'u', you're only taking the square-root of the product of the last two terms, i.e. the (dotpq * dotpq).
The question really is what is the value of dotpq when the function gets called. It has to be a double value between -1 and 1 as stated in the docs.