I purchased the book Numerical Methods, Algorithms and Tools in C# by Waldemar Dos Passos.
On page 463, there is the method:
public static double FactorialLn(int n)
{
if(n < 0)
{
throw new Exception("Input value must be > 0");
}
else
{
return GammaLn(n+1.0);
}
}
I found the namespace where the function GammaLn resides: MicrosoftResearch.Infer.Maths
but Visual Studio 2010 does not recognize it and I was unable to find it in the .NET reference.
Please help me get the program to compile.
Thank you in advance.
I finally found the dource code of the GammaLn method itself here: http://seungwon.tistory.com/9
One does need nothing else when one possesses the very real thing, as Andrey correctly pointed out, but until then ...
I reproduce the method here for future reference, in case the site above stops from functioning.
/// <summary>
/// http://seungwon.tistory.com/9
/// GammaLn函数
/// </summary>
/// <param name="x"></param>
/// <returns></returns>
public static double GammaLn(double x)
{
double result = 0.0;
double d1 = -5.772156649015328605195174e-1;
double[,] p1 = {{4.945235359296727046734888e0},{ 2.018112620856775083915565e2},
{2.290838373831346393026739e3},{ 1.131967205903380828685045e4},
{2.855724635671635335736389e4},{ 3.848496228443793359990269e4},
{2.637748787624195437963534e4},{ 7.225813979700288197698961e3}};
double[,] q1 = {{6.748212550303777196073036e1},{ 1.113332393857199323513008e3},
{7.738757056935398733233834e3},{ 2.763987074403340708898585e4},
{5.499310206226157329794414e4},{ 6.161122180066002127833352e4},
{3.635127591501940507276287e4},{ 8.785536302431013170870835e3}};
double d2 = 4.227843350984671393993777e-1;
double[,] p2 = {{4.974607845568932035012064e0},{ 5.424138599891070494101986e2},
{1.550693864978364947665077e4},{ 1.847932904445632425417223e5},
{1.088204769468828767498470e6},{ 3.338152967987029735917223e6},
{5.106661678927352456275255e6},{ 3.074109054850539556250927e6}};
double[,] q2 = {{1.830328399370592604055942e2},{ 7.765049321445005871323047e3},
{1.331903827966074194402448e5},{ 1.136705821321969608938755e6},
{5.267964117437946917577538e6},{ 1.346701454311101692290052e7},
{1.782736530353274213975932e7},{ 9.533095591844353613395747e6}};
double d4 = 1.791759469228055000094023e0;
double[,] p4 = {{1.474502166059939948905062e4},{ 2.426813369486704502836312e6},
{1.214755574045093227939592e8},{ 2.663432449630976949898078e9},
{2.940378956634553899906876e10},{ 1.702665737765398868392998e11},
{4.926125793377430887588120e11},{5.606251856223951465078242e11}};
double[,] q4 = {{2.690530175870899333379843e3},{ 6.393885654300092398984238e5},
{4.135599930241388052042842e7},{ 1.120872109616147941376570e9},
{1.488613728678813811542398e10},{1.016803586272438228077304e11},
{3.417476345507377132798597e11},{ 4.463158187419713286462081e11}};
double[,] c = {{-1.910444077728e-03},{ 8.4171387781295e-04},
{-5.952379913043012e-04},{ 7.93650793500350248e-04},
{-2.777777777777681622553e-03},{ 8.333333333333333331554247e-02},
{ 5.7083835261e-03}};
double eps = 2.2204e-016;
if (x <= 0)
{
//报错!
}
else
{
double xden = 0.0;
double xnum = 0.0;
result = x;
if (x > 0 && x <= eps)
{
result = -Math.Log(x);
}
else if ((x > eps) && (x <= 0.5))
{
double y = x;
xden = 1;
xnum = 0;
for (int i = 0; i < 8; i++)
{
xnum = xnum * y + p1[i, 0];
xden = xden * y + q1[i, 0];
}
result = -Math.Log(y) + (y * (d1 + y * (xnum / xden)));
}
else if ((x > 0.5) && (x <= 0.6796875))
{
double xm1 = (x - 0.5) - 0.5;
xden = 1;
xnum = 0;
for (int i = 0; i < 8; i++)
{
xnum = xnum * xm1 + p2[i, 0];
xden = xden * xm1 + q2[i, 0];
}
result = -Math.Log(x) + xm1 * (d2 + xm1 * (xnum / xden));
}
else if ((x > 0.6796875) && (x <= 1.5))
{
double xm1 = (x - 0.5) - 0.5;
xden = 1;
xnum = 0;
for (int i = 0; i < 8; i++)
{
xnum = xnum * xm1 + p1[i, 0];
xden = xden * xm1 + q1[i, 0];
}
result = xm1 * (d1 + xm1 * (xnum / xden));
}
else if ((x > 1.5) && (x <= 4))
{
double xm2 = x - 2;
xden = 1;
xnum = 0;
for (int i = 0; i < 8; i++)
{
xnum = xnum * xm2 + p2[i, 0];
xden = xden * xm2 + q2[i, 0];
}
result = xm2 * (d2 + xm2 * (xnum / xden));
}
else if ((x > 4) && (x <= 12))
{
double xm4 = x - 4;
xden = -1;
xnum = 0;
for (int i = 0; i < 8; i++)
{
xnum = xnum * xm4 + p4[i, 0];
xden = xden * xm4 + q4[i, 0];
}
result = d4 + xm4 * (xnum / xden);
}
else if (x > 12)
{
double y = x;
double r = c[6, 0];// 等于:double r = repmat(c[6, 0], 1)[0,0];
double ysq = y * y;
for (int i = 0; i < 6; i++)
{
r = r / ysq + c[i, 0];
}
r = r / y;
double corr = Math.Log(y);
double spi = 0.9189385332046727417803297;
result = r + spi - 0.5 * corr + y * (corr - 1);
}
}
return result;
}
Related
Long story short, the following is based on mesh generation and terrain heights
In my code everything seems to be fine. I have redone all of it and the same issue comes up, I don't know why.
All I know is that the last iteration of the Z for loop ain't giving the right height.
I could just set z-1 and have the game run on the rest of the map
but I feel that this will ruin my next phase of development and so I beg for hints on where might the issue be.
the code for the perlin noise is as follows
public static class PerlinFilter
{
public static float[] Filter(List<int> keyX, List<int> keyZ, int indexX = 0, int indexZ = 0, int sizeX = 100, int sizeZ = 100, int nOctaves = 3, float fBias = .6f)
{
float[] filtered = new float[(sizeX + 1) * (sizeZ + 1)];
for (int z = indexZ; z < sizeZ; z++)
{
for (int x = indexX; x < sizeX; x++)
{
float fNoise = 0.0f;
float fScale = 1.0f;
float fScaleAcc = 0.0f;
for (int o = 0; o < nOctaves; o++)
{
int nPitch = sizeZ >> o;
int nSampleX1 = x / nPitch * nPitch;
int nSampleZ1 = z / nPitch * nPitch;
int nSampleX2 = (nSampleX1 + nPitch) % sizeX;
int nSampleZ2 = (nSampleZ1 + nPitch) % sizeX;
float fBlendX = (float)(x - nSampleX1) / nPitch;
float fBlendZ = (float)(z - nSampleZ1) / nPitch;
float fSampleT = (1.0f - fBlendX) * keyX[nSampleZ1 * sizeX + nSampleX1] + fBlendX * keyZ[nSampleZ1 * sizeZ + nSampleX2];
float fSampleB = (1.0f - fBlendX) * keyX[nSampleZ2 * sizeX + nSampleX1] + fBlendX * keyZ[nSampleZ2 * sizeZ + nSampleX2];
fNoise += (fBlendZ * (fSampleB - fSampleT) + fSampleT) * fScale;
fScaleAcc += fScale;
fScale /= fBias;
}
filtered[z * sizeX + x] = fNoise / fScaleAcc;
}
}
return filtered;
}
}
and the code for the mesh is as follows:
void Generate(bool flat)
{
vertices = new Vector3[(x + 1) * (z + 1)];
for (int i = 0, iz = 0; iz <= z; iz++)
{
for (int ix = 0; ix <= x; ix++, i++)
{
if (flat)
vertices[i] = new Vector3(ix, 0, iz);
else
vertices[i] = new Vector3(ix, PerlinHeight(ix, iz), iz);
}
}
triangles = new int[x * z * 6];
for (int tris = 0, vert = 0, iz = 0; iz < z; iz++, vert++)
{
for (int ix = 0; ix < x; ix++, vert++, tris += 6)
{
triangles[tris + 0] = vert + 0;
triangles[tris + 1] = vert + x + 1;
triangles[tris + 2] = vert + 1;
triangles[tris + 3] = vert + 1;
triangles[tris + 4] = vert + x + 1;
triangles[tris + 5] = vert + x + 2;
}
}
}
public int PerlinHeight(int _x, int _z) => (int)(keyPerlin[_z * x + _x] * 1f);
keyPerlin is the result of the PerlinFilter.Filter.
If it becomes obvious that I need to add more code here let me know, this was done a few months ago, and I have been working on other parts of the game while accepting the z-1 workaround, but at this point I really need to sort this issue out.
In your noise filter generation you do
float[] filtered = new float[(sizeX + 1) * (sizeZ + 1)];
for (int z = indexZ; z < sizeZ; z++)
{
for (int x = indexX; x < sizeX; x++)
{
...
=> You don't fill the complete array with valid values! The last row for z== SizeZ and x==SizeX keeps the default value 0.
While in the mesh you do
vertices = new Vector3[(x + 1) * (z + 1)];
for (int i = 0, iz = 0; iz <= z; iz++)
{
for (int ix = 0; ix <= x; ix++, i++)
{
...
=> You set the last row of vertices all to 0.
I am calculating values by using weights and bias from MATLAB trained ANN. trying to code a sigmoid simulation equation, but for some reason C# calculations vary too much than that of MATLAB. i.e. error is too high. I tried to check each step of the equation and found out the specific part that is creating the problem (Emphasized part), but I don't know how to solve this issue, if someone could help, would be a huge favour.
1+(purelin(net.LW{2}×(tansig(net.IW{1}×(1-(abs(2×([inputs]-1)))))+net.b{1}))+net.b{2}))/2
//Normalization of Data
public double Normalization(double x, double xMAx, double xMin)
{
double xNorm = 0.0;
xNorm = (x - xMin) / (xMAx - xMin);
if (xNorm < 0)
xNorm = 0;
if (xNorm > 1)
xNorm = 1;
xNorm = Math.Round(xNorm, 4);
return xNorm;
}
// Equation to calculate ANN based Output Values
public double MetrixCalc(double[] Pn, double[,] W1, double[] W2, double[] b1, double b2, double maxValue, double minValue)
{
double FinalValue = 0;
double[] PnCalc1 = new double[Pn.Length];
double[] PnCalc2 = new double[W1.Length / Pn.Length];
for (int i = 0; i < Pn.Length; i++)
{
PnCalc1[i] = 1 - Math.Abs(2 * (Pn[i] - 1));
}
for (int i = 0; i < (W1.Length / Pn.Length); i++)
{
double PnCalc = 0.0;
for (int j = 0; j < Pn.Length; j++)
{
PnCalc = PnCalc + (W1[i, j] * PnCalc1[j]);
}
PnCalc2[i] = PnCalc;
}
for (int i = 0; i < PnCalc2.Length; i++)
{
//PnCalc2[i] = Math.Tanh(PnCalc2[i] + b1[i]);
PnCalc2[i] = PnCalc2[i] + b1[i];
PnCalc2[i] = 2.0 / (1 + Math.Exp(-2 * (PnCalc2[i]))) - 1;
PnCalc2[i] = Math.Round(PnCalc2[i], 4);
}
double FinalCalc = 0.0;
for (int i = 0; i < PnCalc2.Length; i++)
{
*FinalCalc = FinalCalc + (W2[i] * (PnCalc2[i]));*
//FinalValue = FinalCalc;
}
FinalValue = FinalCalc + b2;
FinalValue = 1 + FinalValue;
FinalValue = (1 + FinalValue) / 2.0;
FinalValue = (FinalValue * (maxValue - minValue)) + minValue;
FinalValue = Math.Round(FinalValue, 4);
FinalValue = Math.Abs(FinalValue);
return FinalValue;
}
Problem is solved.
Problem was with the weights matrix copied from MATLAB. debugging mode saved my life. :)
I got an array of data voltages and I want to get the RMS value from the FFT that has been applied before to that data. I've seen that RMS in time domain should be equal to RMS(fft) / sqrt(nFFT) from Parseval's Theorem, but gives me different results. I'm using these functions:
1)FFT
public static VectorDPoint FFT(double[] trama, double samplingFreq)
{
double fs = samplingFreq; // Sampling frequency
double t1 = 1 / fs; // Sample time
int l = trama.Length; // Length of signal
// Time vector
//Vector t = Normal(0, l, 1) * t1;
//// Values vector
//Vector y = new Vector(trama);
// We just use half of the data as the other half is simetric. The middle is found in NFFT/2 + 1
int nFFT = (int)Math.Pow(2, NextPow2(l));
if (nFFT > 655600)
{ }
// Create complex array for FFT transformation. Use 0s for imaginary part
Complex[] samples = new Complex[nFFT];
for (int i = 0; i < nFFT; i++)
{
if (i >= trama.Length)
{
samples[i] = new MathNet.Numerics.Complex(0, 0);
}
else
{
samples[i] = new MathNet.Numerics.Complex(trama[i], 0);
}
}
ComplexFourierTransformation fft = new ComplexFourierTransformation(TransformationConvention.Matlab);
fft.TransformForward(samples);
ComplexVector s = new ComplexVector(samples);
s = s / l;
Vector f = (fs / 2.0) * Linspace(0, 1, (nFFT / 2) + 1);
VectorDPoint result = new VectorDPoint();
for (int i = 0; i < (nFFT / 2) + 1; i++)
{
result.Add(new DPoint(f[i], 2 * s[i].Modulus));
}
s = null;
f = null;
samples = null;
return result;
2) RMS
public static double RMSCalculate(double[] channelValues, int samplesNumber, double sampleRate, DateTime currentDate)
{
double[] times = new double[channelValues.Length];
double sampleTime = 0.0;
double period = 0;
times[0] = currentDate.Second + currentDate.Millisecond / 1000.0;
sampleTime = 1 / sampleRate; //s
// Limited samples
for (int i = 1; i < channelValues.Length; i++)
{
times[i] = times[i - 1] + sampleTime;
}
DPoint RMSValues = new DPoint();
RMSValues.Y = 0;
if (channelValues.Length == 1)
{
double x = channelValues[0];
double y = channelValues[0];
RMSValues = new DPoint(x, Math.Abs(y));
}
else
{
for (int i = 0; i < times.Length - 1; i++)
{
period = 0;
if (i + 1 < times.Length)
{
RMSValues.Y += channelValues[i + 1] * channelValues[i + 1] * (times[i + 1] - times[i]);
}
}
period = times[times.Length - 1] - times[0];
RMSValues.Y = RMSValues.Y / period;
RMSValues.Y = Math.Sqrt(RMSValues.Y);
}
return RMSValues.Y;
}
For example, I have set up a formula to find my Xnew[k+1], Ynew[k+1] and Anew[k+1].
How do I pass the value to a 3 by 1 matrix if I want my
index 1,1 be Xnew[k+1],
index 1,2 be Ynew[k+1],
index 1,3 be Anew[k+1].
Here's what I got so far.
for (k = 0; k < 5; k++)
{
Xnew[k+1] = cX + (T * MPCV[k]) * Math.Cos(cA);
Ynew[k+1] = cY + (T * MPCV[k]) * Math.Sin(cA);
Anew[k+1] = cA + (T * MPCW[k]);
double[,] qK = new double[3, 1];
int i, j;
for (i = 0; i < 3; i++)
{
for (j = 0; j < 1; j++)
{
qK[i, j] = 1;
}
}
}
Thank you for the help.
Suposing Xnew[k+1], Ynew[k+1] and Anew[k+1] are doubles:
for ( k = 0; k < 5; k++ ) {
Xnew[k + 1] = cX + (T * MPCV[k]) * Math.Cos(cA);
Ynew[k + 1] = cY + (T * MPCV[k]) * Math.Sin(cA);
Anew[k + 1] = cA + (T * MPCW[k]);
double[,] qk = { { Xnew[k + 1] , Ynew[k + 1] , Anew[k + 1] } };
}
This will make you a 1x3 matrix (arrays start in 0) with:
qk[0,0] = Xnew[k+1]
qk[0,1] = Ynew[k+1]
qk[0,2] = Anew[k+1]
But if you want a 3x1 matrix use instead.
double[,] qk = { { Xnew[k + 1] }, {Ynew[k + 1] }, {Anew[k + 1] } };
That will give you:
qk[0,0] = Xnew[k+1]
qk[1,0] = Ynew[k+1]
qk[2,0] = Anew[k+1]
im tryn to add a new adaptive threshold method to the AForge.Imaging.Filters
I have added the new cs file under
Sources\Imaging\Filters\Adaptive Binarization
Below is the code for the thresholding methods
namespace AForge.Imaging.Filters
{
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Imaging;
using AForge.Imaging;
class SauvolaAdaptiveThresholding : BaseInPlacePartialFilter
{
private const float DEFAULT_WeightingFactor = 0.3f;
private const short DEFAULT_WindowSize = 40;
private float weightingFactor ;
private short windowSize ;
// private format translation dictionary
private Dictionary<PixelFormat, PixelFormat> formatTranslations = new Dictionary<PixelFormat, PixelFormat>();
/// <summary>
/// Format translations dictionary.
/// </summary>
public override Dictionary<PixelFormat,PixelFormat> FormatTranslations
{
get { return formatTranslations; }
}
public float WeightingFactor
{
get{return this.weightingFactor;}
set{this.weightingFactor = value;}
}
public short WindowSize
{
get{return this.windowSize;}
set{this.windowSize = value;}
}
public SauvolaAdaptiveThresholding() :
this(DEFAULT_WeightingFactor,DEFAULT_WindowSize) { }
public SauvolaAdaptiveThresholding(float _weightFact , short _wsize)
{
this.WeightingFactor = _weightFact;
this.WindowSize =_wsize;
// initialize format translation dictionary
formatTranslations[PixelFormat.Format8bppIndexed] = PixelFormat.Format8bppIndexed;
formatTranslations[PixelFormat.Format16bppGrayScale] = PixelFormat.Format16bppGrayScale;
}
protected override unsafe void ProcessFilter(UnmanagedImage image, Rectangle rect)
{
int whalf = windowSize >> 1; //half of windowsize
whalf = windowSize >> 1;
byte* ptr = (byte*)image.ImageData.ToPointer(); //input
// Calculate the integral image, and integral of the squared image
ulong[,] integral_image = new ulong[image.Width , image.Height];
ulong[,] rowsum_image = new ulong[image.Width , image.Height];
ulong[,] integral_sqimg = new ulong[image.Width , image.Height];
ulong[,] rowsum_sqimg = new ulong[image.Width , image.Height];
int xmin, ymin, xmax, ymax, index;
double diagsum, idiagsum, diff, sqdiagsum, sqidiagsum, sqdiff, area;
double mean, std, threshold;
for (int j = 0; j < image.Height; j++)
{
rowsum_image[0, j] = (ulong)*(ptr + j);
rowsum_sqimg[0, j] = rowsum_image[0, j] * rowsum_image[0, j];
}
for (int i = 1; i < image.Width; i++)
{
for (int j = 0; j < image.Height; j++)
{
index = j * image.Width + i;
rowsum_image[i, j] = rowsum_image[i - 1, j] + (ulong)*(ptr + index);
rowsum_sqimg[i, j] = rowsum_sqimg[i - 1, j] + (ulong)(*(ptr + index) * *(ptr + index));
}
}
for (int i = 0; i < image.Width; i++)
{
integral_image[i, 0] = rowsum_image[i, 0];
integral_sqimg[i, 0] = rowsum_sqimg[i, 0];
}
for (int i = 0; i < image.Width; i++)
{
for (int j = 1; j < image.Height; j++)
{
integral_image[i, j] = integral_image[i, j - 1] + rowsum_image[i, j];
integral_sqimg[i, j] = integral_sqimg[i, j - 1] + rowsum_sqimg[i, j];
}
}
//Calculate the mean and standard deviation using the integral image
for (int i = 0; i < image.Width; i++)
{
for (int j = 0; j < image.Height; j++)
{
xmin = Math.Max(0, i - whalf);//max(0, i - whalf);
ymin = Math.Max(0, j - whalf);
xmax = Math.Min(image.Width - 1, i + whalf);
ymax = Math.Min(image.Height - 1, j + whalf);
area = (xmax - xmin + 1) * (ymax - ymin + 1);
if (xmin == 0 && ymin == 0)
{ // Point at origin
diff = integral_image[xmax, ymax];
sqdiff = integral_sqimg[xmax, ymax];
}
else if (xmin == 0 && ymin != 0)
{ // first column
diff = integral_image[xmax, ymax] - integral_image[xmax, ymin - 1];
sqdiff = integral_sqimg[xmax, ymax] - integral_sqimg[xmax, ymin - 1];
}
else if (xmin != 0 && ymin == 0)
{ // first row
diff = integral_image[xmax, ymax] - integral_image[xmin - 1, ymax];
sqdiff = integral_sqimg[xmax, ymax] - integral_sqimg[xmin - 1, ymax];
}
else
{ // rest of the image
diagsum = integral_image[xmax, ymax] + integral_image[xmin - 1, ymin - 1];
idiagsum = integral_image[xmax, ymin - 1] + integral_image[xmin - 1, ymax];
diff = diagsum - idiagsum;
sqdiagsum = integral_sqimg[xmax, ymax] + integral_sqimg[xmin - 1, ymin - 1];
sqidiagsum = integral_sqimg[xmax, ymin - 1] + integral_sqimg[xmin - 1, ymax];
sqdiff = sqdiagsum - sqidiagsum;
}
mean = diff / area;
std = Math.Sqrt((sqdiff - diff * diff / area) / (area - 1));
threshold = mean * (1 + WeightingFactor * ((std / 128) - 1));
if ((double)*(ptr + (j * image.Width + i)) < threshold)//if (gray_image[i, j] < threshold)
*(ptr + (j * image.Width + i)) = 0;
else
*(ptr + (j * image.Width + i)) = 255;
}
}
}
}
}
but when i build the project and use the AForge.Imaging.dll, the above class isn't available. When i try to use the object browser i see all the other classes except for the one i added newly.
Can someone please tell me what am i doing wrong?
It needs to be public
public class SauvolaAdaptiveThresholding : BaseInPlacePartialFilter
You can find more info about the C# access modifiers here: http://msdn.microsoft.com/en-us/library/ms173121.aspx