I'm a hobby programmer.
I tried to ask this question earlier on a very unstructured way (Sorry again), now I try to ask on the proper way.
I wrote the following code that seems to work unreliably.
The code was written like this for several reasons. I know it's messy but it should still work. To explain why I wrote it like this would mean that I need to explain several weeks' of work that is quite extensive. Please accept that this is at least the least worse option I could figure out. In the below sample I removed all sections of the code that are not needed to reproduce the error.
What this program does in a nutshell:
The purpose is to check a large number of parameter combinations for a program that receives streaming data. I simulate the original process to test parameter combinations.
First data is read from files that represents recorded streaming data.
Then the data is aggregated.
Then I build a list of parameters to test for.
Finally I run the code for each parameter combination in parallel.
Inside the parallel part I calculate a financial indicator called the bollinger bands. This is a moving average with adding +/- standard deviation. This means the upper line and the lower line should only be equal when variable bBandDelta = 0. However sometimes it happens that CandleList[slot, w][ctr].bollingerUp is equal to CandleList[slot, w][ctr].bollingerDown even when bBandDelta is not 0.
As a result I don't understand how can line 277 kick in. It seems that sometimes the program fails to write to the CandleList[slot, w][ctr]. However this should not be possible because (1) I lock the list and (2) I use ConcurrentBag. Could I have some help please?
Source files are here.
The code is:
using System;
using System.IO;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Collections.Concurrent;
namespace Justfortest
{
class tick : IComparable<tick> //Data element to represent a tick
{
public string disp_name; //ticker ID
public DateTime? trd_date; //trade date
public TimeSpan? trdtim_1; //trade time
public decimal trdprc_1; //price
public int? trdvol_1; //tick volume
public int CompareTo(tick other)
{
if (this.trdprc_1 == other.trdprc_1)
{
return other.trdprc_1.CompareTo(this.trdprc_1); //Return the later item
}
return this.trdprc_1.CompareTo(other.trdprc_1); //Return the earlier item
}
}
class candle : IComparable<candle> //Data element to represent a candle and all chart data calculated on candle level
{
public int id = 0;
public DateTime? openDate;
public TimeSpan? openTime;
public DateTime? closeDate;
public TimeSpan? closeTime;
public decimal open = 0;
public decimal high = 0;
public decimal low = 0;
public decimal close = 0;
public int? volume = 0;
public decimal totalPrice = 0;
public decimal bollingerUp = 0; //Bollinger upper line
public decimal bollingerDown = 0; //Bollinger below line
public int CompareTo(candle other)
{
if (totalPrice == other.totalPrice)
{
return other.totalPrice.CompareTo(totalPrice); //Return the later item
}
return totalPrice.CompareTo(other.totalPrice); //Return the earlier item
}
}
class param : IComparable<param> //Data element represent a trade event signal
{
public int par1;
public int bollPar;
public int par2;
public int par3;
public int par4;
public int par5;
public int par6;
public decimal par7;
public decimal par8;
public decimal par9;
public decimal par10;
int IComparable<param>.CompareTo(param other)
{
throw new NotImplementedException();
}
}
class programCLass
{
void myProgram()
{
Console.WriteLine("Hello");
Console.WindowWidth = 180;
string[] sources = new string[]
{
#"C:\test\source\sourceW1.csv",
#"C:\test\source\sourceW2.csv",
};
List<candle>[] sourceCandleList = new List<candle>[sources.Count()];
List<param> paramList = new List<param>(10000000);
var csvAnalyzer = new StringBuilder();
{
List<tick>[] updatelist = new List<tick>[sources.Count()];
Console.WriteLine("START LOAD");
for (var i = 0; i < sources.Count(); i++)
{
var file = sources[i];
updatelist[i] = new List<tick>();
// ---------- Read CSV file ----------
var reader = new StreamReader(File.OpenRead(file));
while (!reader.EndOfStream)
{
var line = reader.ReadLine();
var values = line.Split(',');
tick update = new tick();
update.disp_name = values[0].ToString();
update.trd_date = Convert.ToDateTime(values[1]);
update.trdtim_1 = TimeSpan.Parse(values[2]);
update.trdprc_1 = Convert.ToDecimal(values[3]);
update.trdvol_1 = Convert.ToInt32(values[4]);
updatelist[i].Add(update);
}
Console.WriteLine(i);
}
Console.WriteLine("END LOAD"); // All files are in the memory
// Aggreagate
Console.WriteLine("AGGREGATE START");
int tickAggr = 500;
for (var w = 0; w < sources.Count(); w++)
{
sourceCandleList[w] = new List<candle>();
List<tick> FuturesList = new List<tick>();
foreach (var update in updatelist[w])
{
tick t = new tick();
t.disp_name = update.disp_name.ToString();
t.trd_date = update.trd_date;
t.trdtim_1 = update.trdtim_1;
t.trdprc_1 = Convert.ToDecimal(update.trdprc_1);
t.trdvol_1 = update.trdvol_1;
// Add new tick to the list
FuturesList.Add(t);
if (FuturesList.Count == Math.Truncate(FuturesList.Count / (decimal)tickAggr) * tickAggr)
{
candle c = new candle();
c.openDate = FuturesList[FuturesList.Count - tickAggr].trd_date;
c.openTime = FuturesList[FuturesList.Count - tickAggr].trdtim_1;
c.closeDate = FuturesList.Last().trd_date;
c.closeTime = FuturesList.Last().trdtim_1;
c.open = FuturesList[FuturesList.Count - tickAggr].trdprc_1;
c.high = FuturesList.GetRange(FuturesList.Count - tickAggr, tickAggr).Max().trdprc_1;
c.low = FuturesList.GetRange(FuturesList.Count - tickAggr, tickAggr).Min().trdprc_1;
c.close = FuturesList.Last().trdprc_1;
c.volume = FuturesList.GetRange(FuturesList.Count - tickAggr, tickAggr).Sum(tick => tick.trdvol_1);
c.totalPrice = (c.open + c.high + c.low + c.close) / 4;
sourceCandleList[w].Add(c);
if (sourceCandleList[w].Count == 1)
{
c.id = 0;
}
else
{
c.id = sourceCandleList[w][sourceCandleList[w].Count - 2].id + 1;
}
}
}
FuturesList.Clear();
}
Console.WriteLine("AGGREGATE END");
for (var i = 0; i < sources.Count(); i++)
{
updatelist[i].Clear();
}
}
Console.WriteLine("BUILD PARAMLIST");
for (int par1 = 8; par1 <= 20; par1 += 4) // parameter deployed
{
for (int bollPar = 10; bollPar <= 25; bollPar += 5) // parameter deployed
{
for (int par2 = 6; par2 <= 18; par2 += 4) // parameter deployed
{
for (int par3 = 14; par3 <= 20; par3 += 3) // parameter deployed
{
for (int par4 = 10; par4 <= 20; par4 += 5) // parameter deployed
{
for (int par5 = 4; par5 <= 10; par5 += 2) // parameter deployed
{
for (int par6 = 5; par6 <= 30; par6 += 5)
{
for (decimal par7 = 1.0005M; par7 <= 1.002M; par7 += 0.0005M)
{
for (decimal par8 = 1.002M; par8 <= 1.0048M; par8 += 0.0007M)
{
for (decimal par9 = 0.2M; par9 <= 0.5M; par9 += 0.1M)
{
for (decimal par10 = 0.5M; par10 <= 2; par10 += 0.5M)
{
param p = new param();
p.par1 = par1;
p.bollPar = bollPar;
p.par2 = par2;
p.par3 = par3;
p.par4 = par4;
p.par5 = par5;
p.par6 = par6;
p.par7 = par7;
p.par8 = par8;
p.par9 = par9;
p.par10 = par10;
paramList.Add(p);
}
}
}
}
}
}
}
}
}
}
}
Console.WriteLine("END BUILD PARAMLIST, scenarios to test:{0}", paramList.Count);
var sourceCount = sources.Count();
sources = null;
Console.WriteLine("Start building pools");
int maxThreads = 64;
ConcurrentBag<int> pool = new ConcurrentBag<int>();
List<candle>[,] CandleList = new List<candle>[maxThreads, sourceCount];
for (int i = 0; i <= maxThreads - 1; i++)
{
pool.Add(i);
for (int w = 0; w <= sourceCount - 1; w++)
{
CandleList[i, w] = sourceCandleList[w].ConvertAll(p => p);
}
}
Console.WriteLine("End building pools");
int pItemsProcessed = 0;
Parallel.ForEach(paramList,
new ParallelOptions { MaxDegreeOfParallelism = maxThreads },
p =>
{
int slot = 1000;
while (!pool.TryTake(out slot));
var bollPar = p.bollPar;
decimal bollingerMiddle = 0;
double bBandDeltaX = 0;
for (var w = 0; w < sourceCount; w++)
{
lock (CandleList[slot, w])
{
for (var ctr = 0; ctr < CandleList[slot, w].Count; ctr++)
{
CandleList[slot, w][ctr].bollingerUp = 0; //Bollinger upper line
CandleList[slot, w][ctr].bollingerDown = 0; //Bollinger below line
//Bollinger Bands Calculation
if (ctr + 1 >= bollPar)
{
bollingerMiddle = 0;
bBandDeltaX = 0;
for (int i = 0; i <= bollPar - 1; i++)
{
bollingerMiddle = bollingerMiddle + CandleList[slot, w][ctr - i].totalPrice;
}
bollingerMiddle = bollingerMiddle / bollPar; //average
for (int i = 0; i <= bollPar - 1; i++)
{
bBandDeltaX = bBandDeltaX + (double)Math.Pow(System.Convert.ToDouble(CandleList[slot, w][ctr - i].totalPrice) - System.Convert.ToDouble(bollingerMiddle), 2);
}
bBandDeltaX = bBandDeltaX / bollPar;
decimal bBandDelta = (decimal)Math.Sqrt(System.Convert.ToDouble(bBandDeltaX));
CandleList[slot, w][ctr].bollingerUp = bollingerMiddle + 2 * bBandDelta;
CandleList[slot, w][ctr].bollingerDown = bollingerMiddle - 2 * bBandDelta;
if (CandleList[slot, w][ctr].bollingerUp == CandleList[slot, w][ctr].bollingerDown)
{
Console.WriteLine("?! Items processed=" + pItemsProcessed + " bollPar=" + bollPar + " ctr=" + ctr + " bollingerMiddle=" + bollingerMiddle + " bBandDeltaX=" + bBandDeltaX + " bBandDelta=" + bBandDelta + " bollingerUp=" + CandleList[slot, w][ctr].bollingerUp + " bollingerDown=" + CandleList[slot, w][ctr].bollingerDown);
}
}
// REMOVED Further calculations happen here
}
// REMOVED Some evaluations happen here
}
}
// REMOVED Some more evaluations happen here
Interlocked.Increment(ref pItemsProcessed);
pool.Add(slot);
});
}
static void Main(string[] args)
{
var P = new programCLass();
P.myProgram();
}
}
}
Related
You may think I am crazy, but I decided to write a neural network from scratch in C# for studying purposes. Please, be patient, I still have little experience)). English is not my first language, so I am sorry for it in advance.
I started with a program for handwritten digit recognizing with the MNIST database. I've read through a book about the algorithms inside the process and wrote this code.
public class NeuralNetwork
{
private List<Matrix<double>> weights = new List<Matrix<double>>();
private List<Vector<double>> biases = new List<Vector<double>>();
private Random random = new Random();
private List<Image> test_data;
private int[] layer_sizes;
public NeuralNetwork(params int[] layers)
{
layer_sizes = layers;
for (int i = 0; i < layers.Length - 1; i++)
{
var weigthLayer = Matrix<double>.Build.Dense(layers[i + 1], layers[i], (k, j) => random.NextDouble());
weights.Add(weigthLayer);
}
for (int i = 1; i < layers.Length; i++)
{
var biasesLayer = Vector<double>.Build.Dense(layers[i], (k) => random.NextDouble());
biases.Add(biasesLayer);
}
}
public Vector<double> FeedForward(Vector<double> a)
{
for (int i = 0; i < weights.Count; i++)
{
a = Sigmoid(weights[i].Multiply(a) + biases[i]);
}
return Sigmoid(a);
}
public void SGD(ITrainingDataProvider dataProvider, int epochs, int chuck_size, double eta)
{
test_data = new MnistReader().ReadTestData();
Console.WriteLine("SGD algorithm started");
var training_data = dataProvider.ReadTrainingData();
Console.WriteLine("Training data has beeen read");
Console.WriteLine($"Training data test: {Test(training_data)}%");
Console.WriteLine($"Test data test: {Test(test_data)}%");
for (int epoch = 0; epoch < epochs; epoch++)
{
training_data = training_data.OrderBy(item => random.Next()).ToList();
List<List<Image>> chunks = training_data.ChunkBy(chuck_size);
foreach (List<Image> chunk in chunks)
{
ProcessChunk(chunk, eta);
}
Console.WriteLine($"Epoch: {epoch + 1}/{epochs}");
Console.WriteLine($"Training data test: {Test(training_data)}%");
Console.WriteLine($"Test data test: {Test(test_data)}%");
}
Console.WriteLine("Done!");
}
private double Test(List<Image> data)
{
int count = 0;
foreach (Image im in data)
{
var output = FeedForward(im.DataToVector());
int number = output.MaximumIndex();
if (number == (int)im.Label)
{
count++;
}
}
return (double)count / data.Count * 100;
}
private void ProcessChunk(List<Image> chunk, double eta)
{
Delta[] deltas = new Delta[chunk.Count];
for (int i = 0; i < chunk.Count; i++)
{
Image image = chunk[i];
var input = image.DataToVector();
var desired_output = Vector<double>.Build.Dense(layer_sizes[layer_sizes.Length - 1]);
desired_output[(int)image.Label] = 1;
Delta delta = BackPropagation(input, desired_output);
deltas[i] = delta;
}
Delta sum = deltas[0];
for (int i = 1; i < deltas.Length; i++)
{
sum += deltas[i];
}
Delta average_delta = sum / deltas.Length;
for (int i = 0; i < layer_sizes.Length - 1; i++)
{
weights[i] += average_delta.d_weights[i].Multiply(eta);
biases[i] += average_delta.d_biases[i].Multiply(eta);
}
}
private Delta BackPropagation(Vector<double> input, Vector<double> desired_output)
{
List<Vector<double>> activations = new List<Vector<double>>();
List<Vector<double>> zs = new List<Vector<double>>();
Vector<double> a = input;
activations.Add(input);
for (int i = 0; i < layer_sizes.Length - 1; i++)
{
var z = weights[i].Multiply(a) + biases[i];
zs.Add(z);
a = Sigmoid(z);
activations.Add(a);
}
List<Vector<double>> errors = new List<Vector<double>>();
List<Matrix<double>> delta_weights = new List<Matrix<double>>();
List<Vector<double>> delta_biases = new List<Vector<double>>();
var error = CDerivative(activations[activations.Count - 1], desired_output).HProd(SigmoidDerivative(zs[^1]));
errors.Add(error);
int steps = 0;
for (int i = layer_sizes.Length - 2; i >= 1; i--)
{
var layer_error = weights[i].Transpose().Multiply(errors[steps]).HProd(SigmoidDerivative(zs[i - 1]));
errors.Add(layer_error);
steps++;
}
errors.Reverse();
for (int i = layer_sizes.Length - 1; i >= 1; i--)
{
var delta_layer_weights = (errors[i - 1].ToColumnMatrix() * activations[i - 1].ToRowMatrix()).Multiply(-1);
delta_weights.Add(delta_layer_weights);
var delta_layer_biases = errors[i - 1].Multiply(-1);
delta_biases.Add(delta_layer_biases);
}
delta_biases.Reverse();
delta_weights.Reverse();
return new Delta { d_weights = delta_weights, d_biases = delta_biases };
}
private Vector<double> CDerivative(Vector<double> x, Vector<double> y)
{
return x - y;
}
private Vector<double> Sigmoid(Vector<double> x)
{
for (int i = 0; i < x.Count; i++)
{
x[i] = 1.0 / (1.0 + Math.Exp(-x[i]));
}
return x;
}
private Vector<double> SigmoidDerivative(Vector<double> x)
{
for (int i = 0; i < x.Count; i++)
{
x[i] = Math.Exp(-x[i]) / Math.Pow(1.0 + Math.Exp(-x[i]), 2);
}
return x;
}
}
Delta class. A simple DTO to store weights and biases changes in a single object.
public class Delta
{
public List<Matrix<double>> d_weights { get; set; }
public List<Vector<double>> d_biases { get; set; }
public static Delta operator +(Delta d1, Delta d2)
{
Delta result = d1;
for (int i = 0; i < d2.d_weights.Count; i++)
{
result.d_weights[i] += d2.d_weights[i];
}
for (int i = 0; i < d2.d_biases.Count; i++)
{
result.d_biases[i] += d2.d_biases[i];
}
return result;
}
public static Delta operator /(Delta d1, double d)
{
Delta result = d1;
for (int i = 0; i < d1.d_weights.Count; i++)
{
result.d_weights[i] /= d;
}
for (int i = 0; i < d1.d_biases.Count; i++)
{
result.d_biases[i] /= d;
}
return result;
}
}
Everything ended up working fine, however complex networks with 1 or more hidden layers don't show any significant results. They are getting best 70% accuracy and then the learning curve drops. The accuracy returns to its 20-30%. Typically, the graph looks like a square root function, but in my case it is more like a turned around quadratic parabola the graph of my tries with different amounts of neurons in the first hidden layer
After a few tries, I found out, that without any hidden layers the algorithm works just fine. It learns up to 90% of accuracy and then the graph never falls down. Apparently, the bug is somewhere in back-propagation algorithm. It doesn't cause any problems with only input and output layers, but it does, when I add a hidden layer.
I have been trying to find the problem for a long time and I hope that someone, smarter than me, will be able to help.
Thanks in advance!
I have class library with a bunch of static methods. While trying to call one of them, I experience unhandled StackOverflowException somewhere in the ListDictionaryInternal class.
I tried enabling .Net Framework (v 4.5.2) stepping, surrounding call with try/catch block, and executing it step by step. When I place continue statement after Appendix A comment, then comment it while debugging, method works as expected. Otherwise I cannot even hit breakpoint at the start at the method. I also tried to call method with all parameters set to null, but it did not help either.
public static List<CalcSector> Split(List<CalcSector> calibration, List<ProfilePoint> profile, List<MeasurementPoint> additionalPoints)
{
double lengthCorridor = 10d;
double lengthEpsilon = 1d;
if (!(calibration?.Any() ?? false)) throw new ArgumentNullException(nameof(calibration), "Empty calibration table");
if (!(profile?.Any() ?? false)) throw new ArgumentNullException(nameof(profile), "Empty profile points collection");
for (int i = 0; i < calibration.Count - 1; i++)
if (Math.Abs(calibration[i].EndDistance - calibration[i + 1].StartDistance) > lengthEpsilon)
throw new ArgumentException($"calibration[{i}]", "Calibration table integrity is compromised");
List<CalcSector> result = new List<CalcSector>();
List<ProfilePoint> SummitPoints = new List<ProfilePoint>();
calibration.ForEach(x => result.Add(x));
profiles = profile.OrderBy(x => x.Distance).ToList();
//
if (additionalPoints?.Any() ?? false)
foreach (MeasurementPoint mp in additionalPoints.Where(x => x.Id != int.MinValue && x.Id != int.MaxValue))
for (int i = 0; i < result.Count; i++)
if (Math.Abs(mp.Distance - result[i].StartDistance) > lengthEpsilon && Math.Abs(mp.Distance - result[i].EndDistance) > lengthEpsilon && mp.Distance > result[i].StartDistance && mp.Distance < result[i].EndDistance)
{
CalcSector c = new CalcSector()
{
StartDistance = mp.Distance,
StartHeight = BinaryHeightSearch(mp.Distance),
StartPointId = mp.Id,
EndDistance = result[i].EndDistance,
EndHeight = result[i].EndHeight,
Length = result[i].EndDistance - mp.Distance,
Thickness = result[i].Thickness,
};
result[i].EndDistance = mp.Distance;
result[i].EndHeight = c.StartHeight;
result[i].EndPointId = mp.Id;
c.Volume = result[i].Volume / result[i].Length * c.Length;
result[i].Length -= c.Length;
result[i].Volume -= c.Volume;
result.Insert(i + 1, c);
break;
}
else if (Math.Abs(mp.Distance - result[i].StartDistance) < lengthEpsilon)
result[i].StartPointId = mp.Id;
else if (Math.Abs(mp.Distance - result[i].EndDistance) < lengthEpsilon)
result[i].EndPointId = mp.Id;
int start = 0;
int end = 0;
bool hasSpikes = true;
while (hasSpikes)
{
hasSpikes = false;
//Appendix A
for (int j = 0; j < result.Count; j++)
{
result[j].z = -1d * (result[j].StartHeight - result[j].EndHeight) / (result[j].EndDistance - result[j].StartDistance);
result[j].sI = start = BinaryProfileSearch(result[j].StartDistance);
result[j].eI = end = BinaryProfileSearch(result[j].EndDistance);
for (int i = start + 1; i < end; i++)
if (Math.Abs(result[j].z * (profiles[i].Distance - result[j].StartDistance) + result[j].StartHeight - profiles[i].Height) > lengthCorridor)
{
int maxIndex = -1;
double maxH = double.MinValue;
int minIndex = -1;
double minH = double.MaxValue;
for (; start < end; start++)
{
if (Math.Abs(result[j].z * (profiles[start].Distance - result[j].StartDistance) + result[j].StartHeight - profiles[start].Height) <= lengthCorridor)
continue;
if (result[j].z * (profiles[i].Distance - result[j].StartDistance) + result[j].StartHeight - profiles[i].Height > maxH)
{
maxH = profiles[start].Height;
maxIndex = start;
}
if (result[j].z * (profiles[i].Distance - result[j].StartDistance) + result[j].StartHeight - profiles[i].Height < minH)
{
minH = profiles[start].Height;
minIndex = start;
}
}
int target = Math.Min(maxIndex, minIndex);
CalcSector c = new CalcSector()
{
StartDistance = profiles[target].Distance,
StartHeight = profiles[target].Height,
sI = target,
EndDistance = result[j].EndDistance,
EndHeight = result[j].EndHeight,
EndPointId = result[j].EndPointId,
eI = result[j].eI,
Length = result[j].EndDistance - profiles[target].Distance,
Thickness = result[j].Thickness,
};
result[j].EndDistance = c.StartDistance;
result[j].EndHeight = c.StartHeight;
result[j].EndPointId = null;
result[j].eI = target;
result[j].z = -1d * (result[j].StartHeight - result[j].EndHeight) / (result[j].EndDistance - result[j].StartDistance);
c.Volume = result[j].Volume / result[j].Length * c.Length;
result[j].Length -= c.Length;
result[j].Volume -= c.Volume;
result.Insert(j + 1, c);
hasSpikes = true;
break;
}
}
}
for (int j = 0; j < result.Count; j++)
{
result[j].Diameter = 1000d * Math.Sqrt(4d * result[j].Volume / Constants["PI"] / result[j].Length);
result[j].OrdNum = j;
}
result.First().StartPointId = int.MinValue;
result.Last().EndPointId = int.MaxValue;
for (int i = 1; i < profiles.Count - 1; i++)
if (profiles[i - 1].Height < profiles[i].Height && profiles[i].Height > profiles[i + 1].Height)
SummitPoints.Add(profiles[i]);
return result;
}
public class CalcSector
{
public int OrdNum;
public double StartDistance;
public double StartHeight;
public int? StartPointId;
public double EndDistance;
public double EndHeight;
public int? EndPointId;
public double Length;
public double Volume;
public double Diameter;
public double Thickness;
public int sI;
public int eI;
public double z;
}
public class ProfilePoint
{
public double Distance;
public double Height;
}
public class MeasurementPoint
{
public int Id;
public double Distance;
}
I expect this method to split some of the original CalcSectors into smaller ones, but all I have is this unhandled fatal exception.
Added:
private static int BinaryProfileSearch(double distance)
{
if (profiles == null || profiles.Count == 0)
return -1;
//assuming that profile points are already ordered by distance
if (distance <= profiles.First().Distance)
return 0;
if (distance >= profiles.Last().Distance)
return profiles.Count - 1;
int first = 0;
int last = profiles.Count - 1;
while (first + 1 < last)
{
int mid = (first + last) / 2;
if (distance <= profiles[mid].Distance)
last = mid;
else
first = mid + 1;
}
if (distance - profiles[first].Distance > profiles[last].Distance - distance)
return last;
else
return first;
}
The solution came quite unexpected: invalid value of 8087 control word. Next line changes it back.
_controlfp(0x9001F, 0xFFFFF);
I have a process for a sort of genetic nesting algorithm that I am trying to multi-thread. The process looks something like the following.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace ConsoleApp1
{
class Program
{
static void Main(string[] args)
{
CurrentNest = new CuttingRun();
for (int i = 0; i < 80; i++)
{
double w = GetRandomNumber(24, 50);
double h = GetRandomNumber(10, 15);
CurrentNest.PartList.Add(new LwCube { Width = w, Height = h, PartID = i });
}
//Task.Run(() =>
//{
// Parallel.For(0, 64, (i) => Parallel_Nest());
//});
while (true)
{
Parallel_Nest();
}
//Console.ReadKey();
}
public static double GetRandomNumber(double minimum, double maximum)
{
Random random = new Random();
return random.NextDouble() * (maximum - minimum) + minimum;
}
public static CuttingRun CurrentNest { get; set; }
public static void Parallel_Nest()
{
Random random = new Random();
int randomNumber = random.Next(2000, 10000);
var retVal = Nester.Nest_Parts(CurrentNest, randomNumber);
CurrentNest.Iterations++;
if (CurrentNest.SavedList.Count > 0)
{
if (retVal.Count < CurrentNest.SavedList.Count)
{
CurrentNest.SavedList = retVal;
}
}
else
{
CurrentNest.SavedList = retVal;
}
Console.WriteLine(CurrentNest.Iterations.ToString() + " " + CurrentNest.SavedList.Count.ToString());
if (CurrentNest.SavedList != retVal)
{
retVal.Clear();
}
}
}
//Models
public class LwSheet
{
public LwSheet(double width, double height)
{
SheetWidth = width;
SheetHeight = height;
FreeRectangles.Add(new LwCube { Width = width, Height = height, X = 0, Y = 0 });
}
public List<LwCube> UsedRectangles = new List<LwCube>();
public List<LwCube> FreeRectangles = new List<LwCube>();
public double SheetWidth { get; set; }
public double SheetHeight { get; set; }
public double TotalUsed { get; set; }
public bool Place_Part(LwCube prt)
{
bool retVal = false;
LwCube bestNode = FindPositionForBestAreaFit(prt);
//if the bestNode has a height then add our parts to the list
if (bestNode.Height > 0)
{
bestNode.PartID = prt.PartID;
int numRectanglesToProcess = FreeRectangles.Count;
for (int i = 0; i < numRectanglesToProcess; ++i)
{
if (SplitFreeNode(FreeRectangles[i], ref bestNode))
{
FreeRectangles.RemoveAt(i);
--i;
--numRectanglesToProcess;
}
}
PruneFreeList();
UsedRectangles.Add(bestNode);
retVal = true;
}
return retVal;
}
bool SplitFreeNode(LwCube freeNode, ref LwCube usedNode)
{
// Test with SAT if the rectangles even intersect.
if (usedNode.X >= freeNode.X + freeNode.Width || usedNode.X + usedNode.Width <= freeNode.X ||
usedNode.Y >= freeNode.Y + freeNode.Height || usedNode.Y + usedNode.Height <= freeNode.Y)
return false;
if (usedNode.X < freeNode.X + freeNode.Width && usedNode.X + usedNode.Width > freeNode.X)
{
// New node at the top side of the used node.
if (usedNode.Y > freeNode.Y && usedNode.Y < freeNode.Y + freeNode.Height)
{
LwCube newNode = new LwCube { Width = freeNode.Width, X = freeNode.X, Y = freeNode.Y };
newNode.Height = usedNode.Y - newNode.Y;
FreeRectangles.Add(newNode);
}
// New node at the bottom side of the used node.
if (usedNode.Y + usedNode.Height < freeNode.Y + freeNode.Height)
{
LwCube newNode = new LwCube { Width = freeNode.Width, X = freeNode.X };
newNode.Y = usedNode.Y + usedNode.Height;
newNode.Height = freeNode.Y + freeNode.Height - (usedNode.Y + usedNode.Height);
FreeRectangles.Add(newNode);
}
}
if (usedNode.Y < freeNode.Y + freeNode.Height && usedNode.Y + usedNode.Height > freeNode.Y)
{
// New node at the left side of the used node.
if (usedNode.X > freeNode.X && usedNode.X < freeNode.X + freeNode.Width)
{
LwCube newNode = new LwCube { Height = freeNode.Height, X = freeNode.X, Y = freeNode.Y };
newNode.Width = usedNode.X - newNode.X;
FreeRectangles.Add(newNode);
}
// New node at the right side of the used node.
if (usedNode.X + usedNode.Width < freeNode.X + freeNode.Width)
{
LwCube newNode = new LwCube { Height = freeNode.Height, Y = freeNode.Y };
newNode.X = usedNode.X + usedNode.Width;
newNode.Width = freeNode.X + freeNode.Width - (usedNode.X + usedNode.Width);
FreeRectangles.Add(newNode);
}
}
return true;
}
void PruneFreeList()
{
for (int i = 0; i < FreeRectangles.Count; ++i)
for (int j = i + 1; j < FreeRectangles.Count; ++j)
{
if (IsContainedIn(FreeRectangles[i], FreeRectangles[j]))
{
FreeRectangles.RemoveAt(i);
--i;
break;
}
if (IsContainedIn(FreeRectangles[j], FreeRectangles[i]))
{
FreeRectangles.RemoveAt(j);
--j;
}
}
}
bool IsContainedIn(LwCube a, LwCube b)
{
return a.X >= b.X && a.Y >= b.Y
&& a.X + a.Width <= b.X + b.Width
&& a.Y + a.Height <= b.Y + b.Height;
}
LwCube FindPositionForBestAreaFit(LwCube prt)
{
LwCube bestNode = new LwCube();
var bestAreaFit = SheetWidth * SheetHeight;
for (int i = 0; i < FreeRectangles.Count; ++i)
{
double areaFit = FreeRectangles[i].Width * FreeRectangles[i].Height - prt.Width * prt.Height;
// Try to place the rectangle in upright (non-flipped) orientation.
if (FreeRectangles[i].Width >= prt.Width && FreeRectangles[i].Height >= prt.Height)
{
if (areaFit < bestAreaFit)
{
bestNode.X = FreeRectangles[i].X;
bestNode.Y = FreeRectangles[i].Y;
bestNode.Height = prt.Height;
bestNode.Width = prt.Width;
bestAreaFit = areaFit;
}
}
}
return bestNode;
}
}
public class LwCube
{
public int PartID { get; set; }
public double Width { get; set; }
public double Height { get; set; }
public double X { get; set; }
public double Y { get; set; }
}
public class CuttingRun
{
public List<LwCube> PartList = new List<LwCube>();
public List<LwSheet> SavedList = new List<LwSheet>();
public List<LwSheet> Sheets = new List<LwSheet>();
public int Iterations { get; set; }
}
//Actions
public static class Nester
{
public static List<LwSheet> Nest_Parts(CuttingRun cuttingRun, int loopCount)
{
var SheetList = new List<LwSheet>();
List<LwCube> partList = new List<LwCube>();
partList.AddRange(cuttingRun.PartList);
while (partList.Count > 0)
{
LwSheet newScore = new LwSheet(97, 49);
List<LwCube> addingParts = new List<LwCube>();
foreach (var prt in partList)
{
addingParts.Add(new LwCube { Width = prt.Width, Height = prt.Height, PartID = prt.PartID });
}
if (addingParts.Count > 0)
{
var sheets = new ConcurrentBag<LwSheet>();
Parallel.For(0, loopCount, (i) =>
{
var hmr = new LwSheet(97, 49);
Add_Parts_To_Sheet(hmr, addingParts);
sheets.Add(hmr);
});
//for (int i = 0; i < loopCount; i++)
//{
// var hmr = new LwSheet(97, 49);
// Add_Parts_To_Sheet(hmr, addingParts, addToLarge, addToMedium);
// sheets.Add(hmr);
//}
addingParts.Clear();
var bestSheet = sheets.Where(p => p != null).OrderByDescending(p => p.TotalUsed).First();
sheets = null;
newScore = bestSheet;
foreach (var ur in newScore.UsedRectangles)
{
partList.Remove(partList.Single(p => p.PartID == ur.PartID));
}
SheetList.Add(newScore);
}
}
return SheetList;
}
public static void Add_Parts_To_Sheet(LwSheet sh, List<LwCube> parts)
{
var myList = new List<LwCube>();
myList.AddRange(parts);
myList.Shuffle();
foreach (var prt in myList)
{
sh.Place_Part(prt);
}
myList.Clear();
foreach (var ur in sh.UsedRectangles)
{
sh.TotalUsed += ur.Width * ur.Height;
}
}
[ThreadStatic] private static Random Local;
public static Random ThisThreadsRandom
{
get { return Local ?? (Local = new Random(unchecked(Environment.TickCount * 31 + System.Threading.Thread.CurrentThread.ManagedThreadId))); }
}
public static void Shuffle<T>(this IList<T> list)
{
int n = list.Count;
while (n > 1)
{
n--;
int k = ThisThreadsRandom.Next(n + 1);
T value = list[k];
list[k] = list[n];
list[n] = value;
}
}
}
}
I have tried using parallel for loops on each of the loops to try and speed up the process. I have also tried changing them to tasks and used task.WhenAll. However I am only able to use around 25% of my CPU. If I start the program 4 different times, I can use 100%.
I am wondering if anyone has any ideas on how I could use 100% of my CPU without starting the program more than once?
EDIT: After adding a scaled down working version I also commented out one of the parallel loops and one of the normal loops to show where I put them in the code.
However I am only able to use around 25% of my CPU. If I start the program 4 different times, I can use 100%. I am wondering if anyone has any ideas on how I could use 100% of my CPU without starting the program more than once?
Your code appears to be a mixture of asynchronous (presumably I/O-bound) and parallel (presumably CPU-bound) portions. I say "appears to be" because we can't say for sure where the problem is since this is not a minimal reproducible example.
But, if that assumption is correct, then the reason your CPU is underutilized is simple: the parallel CPU-bound portions are waiting for their input data from the asynchronous I/O-bound portions. The only way to fix that is to run the I/O-bound portions concurrently. Move your I/O-bound code as early in the pipeline as possible, and then be sure to run the I/O-bound portions as concurrently as possible. E.g., if you have to call a WebApi for each item, call it as soon as you have the item; or if you're reading items from a database, try to read as many in a batch as possible. This is to minimize the amount of time that the CPU-bound portions have to wait for their data.
"Asynchronous Parallel ForEach" is rarely a good tool for this kind of problem. I would either look into TPL Dataflow or build your own pipeline using Channels.
At the end of the day, it is possible that the algorithm as a whole is I/O-bound. In that case, there isn't a whole lot you can do: only one CPU would be used because the I/O couldn't even keep up with that single CPU, and in that case using more CPUs wouldn't provide any benefit.
I am using file watcher monitoring the file output, and then dynamically display the results using d3 (dynamic data display). Here is more details:
Data are series of tiff file generated one by one quickly (20-25 miliseconds per file generation);
Once there is a file coming in, event is fired, and the file is processed (some stats calculation);
The results will be sent to a d3 plotter, which will dynamically display the results on a charter.
When I close the d3 charter window, the file watcher catcher pretty much all the events, but here is my problem:
However, when I leave the d3 charter window open, even when there is not any stats calculation, the file watcher drop many events (some big gap occurs).
Here is what we have tried:
Increase the watcher buffer, but, it still drop events as long as the d3 charter window is open.
Using C++ dll to calculate stats, but it looks even more slow.
So I am wondering:
Do d3 plotter and file watcher conflict/interfere with each other?
Can I possibly use them together to to real time plot?
Is there anyway to get around my problem?
Any pointers are appreciated.
Here is the code for d3 plotter, including constructor, plotter initiator, and plotter update:
#region Constructor
public SignalStatsDisplay()
{
InitializeComponent();
// timeDomainPlotter.Legend.Remove();
_initialChildrenCount = timeDomainPlotter.Children.Count;
int count = timeDomainPlotter.Children.Count;
//do not remove the initial children
if (count > _initialChildrenCount)
{
for (int i = count - 1; i >= _initialChildrenCount; i--)
{
timeDomainPlotter.Children.RemoveAt(i);
}
}
_nMaxStatsOneChannel = Enum.GetNames(typeof(Window1.ROISignalList)).Length;
_curveBrush = new Brush[_nMaxStatsOneChannel];
_statsEnable = new int[_nMaxStatsOneChannel];
for (int i = 0; i < _nMaxStatsOneChannel; i++)
{
_curveBrush[i] = new SolidColorBrush((Color)ColorConverter.ConvertFromString(_colorList[i]));
_statsEnable[i] = 0;
}
_nActiveStatsOneChannel = 0;
}
#endregion Constructor
public void InitiateSignalAnalysisPlot()
{
_statsName = Enum.GetNames(typeof(Window1.ROISignalList));
int count = 0;
_statsEnableIndex = new int[_nActiveStatsOneChannel];
for (int i = 0; i < _nMaxStatsOneChannel; i++) // assign color
{
if (_statsEnable[i] == 1)
{
_statsEnableIndex[count] = i;
count++;
}
}
if (_nActiveChannel > 0) // timeDomainPlotter init
{
_dataX = new List<double[]>();
_dataY = new List<double[]>();
double[] dataXOneCh = new double[_signalLength];
double[] dataYOneCh = new double[_signalLength];
dataXOneCh[0] = 0;
dataYOneCh[0] = 0;
for (int i = 0; i < _nActiveChannel; i++)
{
for (int j = 0; j < _nActiveStatsOneChannel; j++)
{
_dataX.Add(dataXOneCh); // data x-y mapping init
_dataY.Add(dataYOneCh);
EnumerableDataSource<double> xOneCh = new EnumerableDataSource<double>(dataXOneCh);
EnumerableDataSource<double> yOneCh = new EnumerableDataSource<double>(dataYOneCh);
xOneCh.SetXMapping(xVal => xVal);
yOneCh.SetXMapping(yVal => yVal);
CompositeDataSource dsOneCh = new CompositeDataSource(xOneCh, yOneCh);
LineAndMarker<MarkerPointsGraph> lam = timeDomainPlotter.AddLineGraph(dsOneCh,
new Pen(_curveBrush[_statsEnableIndex[j]], 2),
new CirclePointMarker { Size = 5, Fill = _curveBrush[_statsEnableIndex[j]] },
new PenDescription(_statsName[_statsEnableIndex[j]]));
}
}
Action FitToView = delegate()
{
timeDomainPlotter.FitToView();
};
this.Dispatcher.Invoke(System.Windows.Threading.DispatcherPriority.Normal, FitToView);
}
else
{
return;
}
}
public void RedrawSignalAnalysisPlot()
{
int startIndex = _initialChildrenCount;
if ((_nActiveStatsOneChannel > 0) && (_dataX != null) && (_dataY != null))
{
CompositeDataSource[] dsCh = new CompositeDataSource[_nActiveStatsOneChannel];
int m, n;
int index;
for (int i = 0; i < _nActiveChannel; i++)
{
for (int j = 0; j < _nActiveStatsOneChannel; j++)
{
index = i * _nActiveStatsOneChannel + j;
if (_dataX[index].Length == _dataY[index].Length)
{
EnumerableDataSource<double> xOneCh = new EnumerableDataSource<double>(_dataX[index]);
xOneCh.SetXMapping(xVal => xVal);
EnumerableDataSource<double> yOneCh = new EnumerableDataSource<double>(_dataY[index]);
yOneCh.SetYMapping(yVal => yVal);
CompositeDataSource ds = new CompositeDataSource(xOneCh, yOneCh);
Action UpdateData = delegate()
{
m = i * 2;
n = j * 2;
// ((LineGraph)timeDomainPlotter.Children.ElementAt(startIndex + n + m * _nActiveStatsOneChannel)).DataSource = ds;
// ((LineGraph)timeDomainPlotter.Children.ElementAt(startIndex + n + m * _nActiveStatsOneChannel)).LinePen
// = new Pen(new SolidColorBrush(_curveBrush[j]), 1);
((MarkerPointsGraph)timeDomainPlotter.Children.ElementAt(startIndex + n + 1 + m * _nActiveStatsOneChannel)).DataSource = ds;
// ((MarkerPointsGraph)timeDomainPlotter.Children.ElementAt(startIndex + n + 1 + m * _nActiveStatsOneChannel)).Marker
// = new CirclePointMarker { Size = 5, Fill = Brushes.Green };
};
this.Dispatcher.Invoke(System.Windows.Threading.DispatcherPriority.Normal, UpdateData);
}
}
}
/* Action PlotFitToView = delegate()
{
timeDomainPlotter.FitToView();
};
this.Dispatcher.Invoke(System.Windows.Threading.DispatcherPriority.Normal, PlotFitToView);*/
}
}
Here is how files are monitored:
Where events (file written) are filed
void tiffWatcher_EventChanged(object sender, WatcherExEventArgs e)
{
string fileName = ((FileSystemEventArgs)(e.Arguments)).FullPath;
string fileExt = StringExtension.GetLast(fileName, 4);
if (!IsFileLocked(fileName))
{
Action EventFinished = delegate()
{
CreateListViewItem(fileName, "Finished", DateTime.Now.ToString("HH:mm:ss.fff"));
};
listView1.Dispatcher.Invoke(System.Windows.Threading.DispatcherPriority.Normal, EventFinished);
_tiffName.Add( fileName);
_tiffFilledCount++;
}
}
tiff data processing, and data communication to d3 plotter:
void tiffTimer_Tick(object sender, EventArgs e)
{
//throw new NotImplementedException();
byte[] image = new byte[_signalManager.ImgWidth * _signalManager.ImgHeight];
if (_tiffFilledCount - _tiffProcessedCount >= 1)
{
string fileName = _tiffName[_tiffProcessedCount++];
char filePre = fileName[49];
int indexBeigin = fileName.LastIndexOf("_");
int indexEnd = fileName.LastIndexOf(".");
_signalIndex = Convert.ToInt32(fileName.Substring(indexBeigin + 1, indexEnd - indexBeigin - 1)) - 1; // 0 based//
_deltaT = ExtractTiffDeltaT(fileName, "DeltaT=", 1);
_channelIndex = (int)Enum.Parse(typeof(ChannelList), Convert.ToString(filePre));
TIFFIImageIO.LoadTIFF(fileName, ref image);
_signalManager.Image = image;
for (int i = 0; i < _nActiveStatsOneChannel; i++)
{
_signalManager.GetSignal( _signalDisplay.StatsEnableIndex[i], ref _signal);
UpdateSignal(_channelIndex, i, _tiffProcessedCount-1, _deltaT, _signal);
}
// if( _tiffProcessedCount % 5 == 0)
_signalDisplay.SetData(_XList, _YList, true);
}
}
I use a Timer.Tick to process the file every 50-100 miliseconds, still testing.
The way I handled it was to setup a Timer, cyclically handling calculation, display. So far, it works fine. Of course there will be many events are missing, but we handle large amount of points (thousands of them), so it is OK to hundreds to do the plot. Here is the code:
private Timer _tiffTimer;
void Window1_Loaded(object sender, RoutedEventArgs e)
{
//throw new NotImplementedException();
_tiffTimer = new Timer();
_tiffTimer.Interval = 50; // change interval to change performance
_tiffTimer.Tick += new EventHandler(tiffTimer_Tick);
_tiffTimer.Start();
}
void tiffTimer_Tick(object sender, EventArgs e)
{
//do your stuff here
}
I am trying to create a neural network for the function y=e^(-(x-u)^2)/(2*o^2)) where u = 50 and o = 15.
I must train my neural network so I can find the 2 x's for each y. I have created the folling code, it seems to learn it nicely, but once I test the outputs go I only get numbers around 0.99 to 1 where I should get 25 and 75 and I just can't see why. My best guess is that my error correction is wrong, but can't find the error. The neural network uses back-propagation.
The test code and training set
class Program
{
static void Main(string[] args)
{
args = new string[] {
"c:\\testTrain.csv",
"c:\\testValues.csv"
};
// Output File
string fileTrainPath = null;
string fileValuesPath = null;
if (args.Length > 0)
{
fileTrainPath = args[0];
if (File.Exists(fileTrainPath))
File.Delete(fileTrainPath);
fileValuesPath = args[1];
if (File.Exists(fileValuesPath))
File.Delete(fileValuesPath);
}
double learningRate = 0.1;
double u = 50;
double o = 15;
Random rand = new Random();
Network net = new Network(1, 8, 4, 2);
NetworkTrainer netTrainer = new NetworkTrainer(learningRate, net);
List<TrainerSet> TrainerSets = new List<TrainerSet>();
for(int i = 0; i <= 20; i++)
{
double random = rand.NextDouble();
TrainerSets.Add(new TrainerSet(){
Inputs = new double[] { random },
Outputs = getX(random, u, o)
});
}
// Train Network
string fileTrainValue = String.Empty;
for (int i = 0; i <= 10000; i++)
{
if (i == 5000)
{ }
double error = netTrainer.RunEpoch(TrainerSets);
Console.WriteLine("Epoch " + i + ": Error = " + error);
if(fileTrainPath != null)
fileTrainValue += i + "," + learningRate + "," + error + "\n";
}
if (fileTrainPath != null)
File.WriteAllText(fileTrainPath, fileTrainValue);
// Test Network
string fileValuesValue = String.Empty;
for (int i = 0; i <= 100; i++)
{
double y = rand.NextDouble();
double[] dOutput = getX(y, u, o);
double[] Output = net.Compute(new double[] { y });
if (fileValuesPath != null)
fileValuesValue += i + "," + y + "," + dOutput[0] + "," + dOutput[1] + "," + Output[0] + "," + Output[1] + "\n";
}
if (fileValuesPath != null)
File.WriteAllText(fileValuesPath, fileValuesValue);
}
public static double getResult(int x, double u, double o)
{
return Math.Exp(-Math.Pow(x-u,2)/(2*Math.Pow(o,2)));
}
public static double[] getX(double y, double u, double o)
{
return new double[] {
u + Math.Sqrt(2 * Math.Pow(o, 2) * Math.Log(1/y)),
u - Math.Sqrt(2 * Math.Pow(o, 2) * Math.Log(1/y)),
};
}
}
The code behind the network
public class Network
{
protected int inputsCount;
protected int layersCount;
protected NetworkLayer[] layers;
protected double[] output;
public int Count
{
get
{
return layers.Count();
}
}
public NetworkLayer this[int index]
{
get { return layers[index]; }
}
public Network(int inputsCount, params int[] neuronsCount)
{
this.inputsCount = Math.Max(1, inputsCount);
this.layersCount = Math.Max(1, neuronsCount.Length);
layers = new NetworkLayer[neuronsCount.Length];
for (int i = 0; i < layersCount; i++)
layers[i] = new NetworkLayer(neuronsCount[i],
(i == 0) ? inputsCount : neuronsCount[i - 1]);
}
public virtual double[] Compute(double[] input)
{
output = input;
foreach (NetworkLayer layer in layers)
output = layer.Compute(output);
return output;
}
}
public class NetworkLayer
{
protected int inputsCount = 0;
protected int neuronsCount = 0;
protected Neuron[] neurons;
protected double[] output;
public Neuron this[int index]
{
get { return neurons[index]; }
}
public int Count
{
get { return neurons.Length; }
}
public int Inputs
{
get { return inputsCount; }
}
public double[] Output
{
get { return output; }
}
public NetworkLayer(int neuronsCount, int inputsCount)
{
this.inputsCount = Math.Max( 1, inputsCount );
this.neuronsCount = Math.Max( 1, neuronsCount );
neurons = new Neuron[this.neuronsCount];
output = new double[this.neuronsCount];
// create each neuron
for (int i = 0; i < neuronsCount; i++)
neurons[i] = new Neuron(inputsCount);
}
public virtual double[] Compute(double[] input)
{
// compute each neuron
for (int i = 0; i < neuronsCount; i++)
output[i] = neurons[i].Compute(input);
return output;
}
}
public class Neuron
{
protected static Random rand = new Random((int)DateTime.Now.Ticks);
public int Inputs;
public double[] Input;
public double[] Weights;
public double Output = 0;
public double Threshold;
public double Error;
public Neuron(int inputs)
{
this.Inputs = inputs;
Weights = new double[inputs];
for (int i = 0; i < inputs; i++)
Weights[i] = rand.NextDouble() * 0.5;
}
public double Compute(double[] inputs)
{
Input = inputs;
double e = 0.0;
for (int i = 0; i < inputs.Length; i++)
e += Weights[i] * inputs[i];
e -= Threshold;
return (Output = sigmoid(e));
}
private double sigmoid(double value)
{
return (1 / (1 + Math.Exp(-1 * value)));
//return 1 / (1 + Math.Exp(-value));
}
}
My Trainer
public class NetworkTrainer
{
private Network network;
private double learningRate = 0.1;
public NetworkTrainer(double a, Network network)
{
this.network = network;
this.learningRate = a;
}
public double Run(double[] input, double[] output)
{
network.Compute(input);
return CorrectErrors(output);
}
public double RunEpoch(List<TrainerSet> sets)
{
double error = 0.0;
for (int i = 0, n = sets.Count; i < n; i++)
error += Run(sets[i].Inputs, sets[i].Outputs);
// return summary error
return error;
}
private double CorrectErrors(double[] desiredOutput)
{
double[] errorLast = new double[desiredOutput.Length];
NetworkLayer lastLayer = network[network.Count - 1];
for (int i = 0; i < desiredOutput.Length; i++)
{
// S(p)=y(p)*[1-y(p)]*(yd(p)-y(p))
lastLayer[i].Error = lastLayer[i].Output * (1-lastLayer[i].Output)*(desiredOutput[i] - lastLayer[i].Output);
errorLast[i] = lastLayer[i].Error;
}
// Calculate errors
for (int l = network.Count - 2; l >= 0; l--)
{
for (int n = 0; n < network[l].Count; n++)
{
double newError = 0;
for (int np = 0; np < network[l + 1].Count; np++)
{
newError += network[l + 1][np].Weights[n] * network[l + 1][np].Error;
}
network[l][n].Error = newError;
}
}
// Update Weights
// w = w + (a * input * error)
for (int l = network.Count - 1; l >= 0; l--)
{
for (int n = 0; n < network[l].Count; n++)
{
for (int i = 0; i < network[l][n].Inputs; i++)
{
// deltaW = a * y(p) * s(p)
double deltaW = learningRate * network[l][n].Output * network[l][n].Error;
network[l][n].Weights[i] += deltaW;
}
}
}
double returnError = 0;
foreach (double e in errorLast)
returnError += e;
return returnError;
}
}
For regression problems your output layer should have the identity (or at least a linear) activation function. This way you don't have to scale your output. The derivative of the identity function is 1 and thus the derivative dE/da_i for the output layer is y-t (lastLayer[i].Output - desiredOutput[i]).