why a list<long[]> variable automatically changes with another variable? - c#

I have two variables: long[] nextState and List<long[]> TabuList.
I use this code to add items to TabuList:
TabuList.add(nextState);
or this:
TabuList.insert(Index, nexState);
but the problem is that after either of these operations, all of TabuList’s items automatically convert to the current value of nextState.
my complete code is:
class TabuSearch
{
private long[] current { get; set; }
private double Delta;
private Random rnd = new Random();
int foundLists = 0;
public TabuSearch()
{
current = new long[Convert.ToInt32(num_list1)];
}
public long[] TabuMOSA3Objectives(long[] c)
{
assign(current, c);
long[] nextState = new long[Convert.ToInt32(num_list1)];
List<long[]> TabuList = new List<long[]>();
double proba;
double alpha = 0.969;
double temperature = 500.0;
double epsilon = 0.0001;
short domination_st;
int iter = 0;
while (temperature > epsilon)
{
iter++;
Delta = 1;
assign(nextState, GenerateNextState(primaryList, current));
domination_st = CheckDomination3Objective(nextState, current);
try { var tmp = TabuList.Find(x => x == nextState); if (tmp == null) foundLists = 0; else foundLists = tmp.Count(); }
catch { }
if (foundLists == 0)
{
if (domination_st > 0)
{
assign(current, nextState);
}
else // domination_st < 0
{
proba = rnd.NextDouble();
if (proba < 1 / (1 + Math.Exp(Delta * temperature)))
{
assign(current, nextState);
}
else
{
if (TabuList.Count == 10)
TabuList.RemoveAt(0);
assign(nextState, TabuList);
}
}
}
//cooling proces on every iteration
temperature *= alpha;
}
return current;
}
}
static void assign(long[] c, long[] n)
{
for (int i = 0; i < c.Length; i++)
c[i] = n[i];
}
static void assign(long[] item, List<long[]> list)
{
list.Add(item);
}

Related

C# SPOJ time optimalization

I'm trying to get the job done MOHIBPIZ - PIZZA (https://www.spoj.com/problems/MOHIBPIZ/).
I'm already sitting on it the second day, I've tried everything I can and found on the internet. The last chance before giving up is to ask you guys
For recudces time I'm using InputOutput class created by davidsekar (https://github.com/davidsekar/C-sharp-Programming-IO/blob/master/ConsoleInOut/InputOutput.cs)
but still I have time "time limit exceeded". :(
I tried with two loops, but the method with the function seems more optimal to me. Thanks in advance for all the hints, suggestions and answers.
This is code (link on ideone: https://ideone.com/):
using System;
using System.IO;
public class Test
{
public static void Main()
{
InputOutput reader = new InputOutput();
StreamWriter _output = new StreamWriter(Console.OpenStandardOutput());
int T = reader.ReadInt();
for (int i = 0; i < T; i++)
{
_output.WriteLine(Recursion(reader.ReadInt()));
}
_output.Flush();
}
private static int Recursion(int x)
{
if(x <= 1)
{
return 2;
}
else
{
return Recursion(x - 1) + x;
}
}
#region Input Output Helper
public class InputOutput : System.IDisposable
{
private System.IO.Stream _readStream, _writeStream;
private int _readIdx, _bytesRead, _writeIdx, _inBuffSize, _outBuffSize;
private readonly byte[] _inBuff, _outBuff;
private readonly bool _bThrowErrorOnEof;
public void SetBuffSize(int n)
{
_inBuffSize = _outBuffSize = n;
}
public InputOutput(bool throwEndOfInputsError = false)
{
_readStream = System.Console.OpenStandardInput();
_writeStream = System.Console.OpenStandardOutput();
_readIdx = _bytesRead = _writeIdx = 0;
_inBuffSize = _outBuffSize = 1 << 22;
_inBuff = new byte[_inBuffSize];
_outBuff = new byte[_outBuffSize];
_bThrowErrorOnEof = throwEndOfInputsError;
}
public void SetFilePath(string strPath)
{
strPath = System.IO.Path.GetFullPath(strPath);
_readStream = System.IO.File.Open(strPath, System.IO.FileMode.Open);
}
public T ReadNumber<T>()
{
byte rb;
while ((rb = GetByte()) < '-')
;
var neg = false;
if (rb == '-')
{
neg = true;
rb = GetByte();
}
dynamic m = (T)Convert.ChangeType(rb - '0', typeof(T));
while (true)
{
rb = GetByte();
if (rb < '0')
break;
m = m * 10 + (rb - '0');
}
return neg ? -m : m;
}
public int ReadInt()
{
byte readByte;
while ((readByte = GetByte()) < '-')
;
var neg = false;
if (readByte == '-')
{
neg = true;
readByte = GetByte();
}
var m = readByte - '0';
while (true)
{
readByte = GetByte();
if (readByte < '0')
break;
m = m * 10 + (readByte - '0');
}
return neg ? -m : m;
}
public string ReadString()
{
return ReadString(' ');
}
public string ReadString(string delimiter)
{
return ReadString(delimiter[0]);
}
public string ReadString(char delimiter)
{
byte readByte;
while ((readByte = GetByte()) <= delimiter)
;
System.Text.StringBuilder sb = new System.Text.StringBuilder();
do
{
sb.Append((char)readByte);
} while ((readByte = GetByte()) > delimiter);
return sb.ToString();
}
[System.Runtime.CompilerServices.MethodImpl(System.Runtime.CompilerServices.MethodImplOptions.AggressiveInlining)]
private byte GetByte()
{
if (_readIdx >= _bytesRead)
{
_readIdx = 0;
_bytesRead = _readStream.Read(_inBuff, 0, _inBuffSize);
if (_bytesRead >= 1)
return _inBuff[_readIdx++];
if (_bThrowErrorOnEof)
throw new System.Exception("End Of Input");
_inBuff[_bytesRead++] = 0;
}
return _inBuff[_readIdx++];
}
public void WriteToBuffer(string s)
{
foreach (var b in System.Text.Encoding.ASCII.GetBytes(s))
{
if (_writeIdx == _outBuffSize)
Flush();
_outBuff[_writeIdx++] = b;
}
}
public void WriteLineToBuffer(string s)
{
WriteToBuffer(s);
if (_writeIdx == _outBuffSize)
Flush();
_outBuff[_writeIdx++] = 10;
}
public void WriteToBuffer(int c)
{
byte[] temp = new byte[10];
int tempidx = 0;
if (c < 0)
{
if (_writeIdx == _outBuffSize)
Flush();
_outBuff[_writeIdx++] = (byte)'-';
c = -c;
}
do
{
temp[tempidx++] = (byte)((c % 10) + '0');
c /= 10;
} while (c > 0);
for (int i = tempidx - 1; i >= 0; i--)
{
if (_writeIdx == _outBuffSize)
Flush();
_outBuff[_writeIdx++] = temp[i];
}
}
public void WriteLineToBuffer(int c)
{
WriteToBuffer(c);
if (_writeIdx == _outBuffSize)
Flush();
_outBuff[_writeIdx++] = 10;
}
private void Flush()
{
_writeStream.Write(_outBuff, 0, _writeIdx);
_writeStream.Flush();
_writeIdx = 0;
}
public void Dispose()
{
Flush();
_writeStream.Close();
_readStream.Close();
}
}
#endregion Input Output Helper
}
As far as I can see, you have a well known Circle Division problem; see also A000124 sequence:
number of pieces after n cuts are (n * n + n + 2) / 2
That's why we can put O(1) time and space complexity
Code:
private static int Solution(int n) => (int)(((long)n * n + n + 2) / 2);
Here I've put (long) n in case n * n exceeds int.MaxValue, when (n * n + n + 2) / 2 doesn't.
Edit: I've implemented int Solution(int n) method which is based on current code int Recursion(int x) signature; but if there're tests for large n we are going to have integer overflow.
In this case
private static long Solution(long n) =>
1 + (n % 2 == 0 ? n / 2 * (n + 1) : (n + 1) / 2 * n);
In case of arbitrary n we have to use BigInteger:
using System.Numerics;
...
private static BigInteger Solution(BigInteger n) =>
1 + (n * n + n) / 2;

How to convert this recursive function to an iterative one?

I am having trouble converting this method from recursive to iterative. The problem that I am having specifically is I do not know how to convert a recursive call that is the condition of an if statement.
This needs to be done because the data sets that I am using are causing stack overflow exceptions.
There will have to be a stack of indices that are currently arguments being used for the recursive calls, but beyond that, I do not know what to do.
public static IEnumerable<BipartiteMatch> MaximumBipartiteMatch(int m, int n, Func<int, int, bool> isMapped)
{
var matches = new int[n];
for (var i = 0; i < n; ++i)
{
matches[i] = -1;
}
for (var x = 0; x < m; x++)
{
BipartiteMatch(x, n, new bool[n], matches, isMapped);
}
for (var index = 0; index < n; index++)
{
yield return new BipartiteMatch(matches[index], index);
}
}
private static bool BipartiteMatch(int x, int n, bool[] seen, int[] matches, Func<int, int, bool> isMapped)
{
for (var y = 0; y < n; y++)
{
if (seen[y] || !isMapped(x, y)) continue;
seen[y] = true;
//HERE:
if (matches[y] >= 0 && !BipartiteMatch(matches[y], n, seen, matches, isMapped)) continue;
matches[y] = x;
return true;
}
return false;
}
If matches[y] >= 0, then we need to push the value of matches[y] to a stack, but I am not sure how to loop it so it simulates recursion.
My attempt (it is buggy):
internal static class MaximumMatchingAlgorithm
{
internal static IEnumerable<BipartiteMatch> Solve(int m, int n, Func<int, int, bool> isMapped)
{
const int invalid = -1;
var mappings = new Stack<int>[m];
var matches = new int[n];
for (var index = 0; index < n; index++)
{
matches[index] = invalid;
}
for (var x = 0; x < m; x++)
{
var mapping = mappings[x] = new Stack<int>(n);
for (var y = 0; y < n; y++)
{
if (isMapped(x, y))
{
mapping.Push(y);
}
}
var currentX = x;
while (mapping.TryPop(out var y))
{
var tempX = matches[y];
var otherMapping = tempX != invalid ? mappings[tempX] : null;
if (otherMapping == null)
{
matches[y] = currentX;
break;
}
if (otherMapping.Count == 0) continue;
matches[y] = currentX;
currentX = tempX;
mapping = otherMapping;
}
}
for (var index = 0; index < n; index++)
{
yield return new BipartiteMatch(matches[index], index);
}
}
}
UPDATE:
Here is my second attempt after #EricLippert's comments. I created a State value to store where the loop stops so it can simulate the pause that would occur during recursion. There is still a bug somewhere, but I think this may be getting closer.
public struct State
{
public int X { get; set; }
public int Y { get; set; }
public bool Result { get; set; }
}
public static void BipartiteMatch(int x, int n, bool[] seen, int[] matches, Func<int, int, bool> isMapped)
{
var stack = new Stack<State>();
stack.Push(new State {X = x, Y = -1});
while (stack.TryPop(out var state))
{
if (state.Y != -1 && state.Result)
{
matches[state.Y] = state.X;
}
else
{
for (var y = state.Y != -1 ? state.Y : 0; y < n; y++)
{
if (seen[y] || !isMapped(state.X, y)) continue;
seen[y] = true;
if (matches[y] >= 0)
{
stack.Push(new State {X = state.X, Y = y});
stack.Push(new State {X = matches[y], Y = -1});
break;
}
if (stack.TryPop(out state))
{
stack.Push(new State {X = state.X, Y = state.Y, Result = true});
break;
}
matches[y] = state.X;
return;
}
}
}
}
I think I may have figured it out, but I would like a second opinion before I say it is all good.
The logic here is each time recursion would be used, push the current state of the loop to the stack along with the state that can answer whether or not the previously stacked state is valid. If the answer to the question is true, the entire stack is unwound and the method terminates, otherwise, continue searching.
public readonly struct State
{
public State(int x, int y = 0)
{
X = x;
Y = y;
}
public int X { get; }
public int Y { get; }
}
public static void BipartiteMatch(int x, int n, bool[] seen, int[] matches, Func<int, int, bool> isMapped)
{
var stack = new Stack<State>(new[] {new State(x)});
while (stack.TryPop(out var state))
{
for (var y = state.Y; y < n; y++)
{
if (seen[y] || !isMapped(state.X, y)) continue;
seen[y] = true;
if (matches[y] >= 0)
{
stack.Push(new State(state.X, y));
stack.Push(new State(matches[y]));
break;
}
matches[y] = state.X;
while (stack.TryPop(out state))
{
matches[state.Y] = state.X;
}
break;
}
}
}

How can I find the Least Common Multiple of two fractions?

Precondition: The fractions are expressed in simplest form and denom != 0.
Improper fractions are allowed.
Postcondition: Returns the LCM (as a fraction) of the two fractions.
Using System.Numerics.BigInteger for my numerator and denominator.
This is what I currently have, but I think this works only for BigInteger...not my custom made Fraction class:
public static Fraction LCM(Fraction a, Fraction b)
{
Fraction frac1, frac2;
if (a > b)
{
frac1 = a; frac2 = b;
}
else
{
frac1 = b; frac2= a;
}
for (Fraction i = new Fraction(); i < frac2; i = i.Add(new Fraction()))
{
if (frac1.Multiply(i).Divide(frac2).Simplify().num == 1)
{
return i.Multiply(frac1);
}
}
return frac1.Multiply(frac2);
}
new Fraction() returns 1/1
Fraction Class:
public class Fraction
{
public BigInteger num, denom;
public Fraction power, coef;
public Fraction()
{
num = denom = 1;
}
public Fraction(BigInteger n)
{
num = n;
denom = 1;
}
public Fraction(BigInteger num, BigInteger denom)
{
this.num = num;
this.denom = denom;
if (GCD(BigInteger.Abs(num), BigInteger.Abs(denom)) != 1 || (num > 0 && denom < 0)
|| (num < 0 && denom < 0))
{
BigInteger n = Simplify().num;
BigInteger d = Simplify().denom;
this.num = n;
this.denom = d;
}
}
public Fraction(BigInteger num, BigInteger denom, Fraction power)
{
Fraction n = new Fraction(num, denom);
coef = new Fraction();
this.num = n.num;
this.denom = n.denom;
this.power = power.Simplify();
if (power.num != 1)
{
Fraction n2 = n.Pow(power.num);
this.num = n2.num;
this.denom = n2.denom;
this.power = new Fraction(BigInteger.One, power.denom);
}
if (this.power.denom != 1)
{
Fraction n3 = SimplifyRadical();
this.num = n3.num;
this.denom = n3.denom;
if (n3.coef != null)
{
coef = new Fraction(n3.coef.num, n3.coef.denom);
}
if (n3.power != null)
{
this.power = n3.power.Simplify();
}
}
}
public Fraction(Fraction n, Fraction power)
{
BigInteger newNum = n.num;
BigInteger newDenom = n.denom;
Fraction n2 = new Fraction(newNum, newDenom, power);
num = n2.num;
denom = n2.denom;
this.power = n2.power;
if (n2.coef != null)
{
coef = n2.coef;
}
}
public Fraction Multiply(Fraction f)
{
Fraction product;
if (power != null || f.power != null)
{
if (power == null)
{
power = new Fraction(1);
}
if (f.power == null)
{
f.power = new Fraction(1);
}
Fraction LCMPower = LCM(power, f.power);
product = new Fraction(new Fraction(num, denom).Pow(power.Multiply(LCMPower.Reciprocate()).num).Multiply(new Fraction(f.num, f.denom).Pow(f.power.Multiply(LCMPower.Reciprocate()).num)), LCMPower);
}
else
{
product = new Fraction(BigInteger.Multiply(num, f.num), BigInteger.Multiply(denom, f.denom));
}
if(coef != null || f.coef != null)
{
if(coef != null && f.coef != null)
{
product.coef = coef.Multiply(f.coef);
}
else if(coef != null && f.coef == null)
{
product.coef = coef;
}
else if (coef == null && f.coef != null)
{
product.coef = f.coef;
}
}
return product;
}
public Fraction Divide(Fraction f)
{
return Multiply(f.Reciprocate());
}
public Fraction Reciprocate()
{
if (power != null)
{
return new Fraction(denom, num, power);
}
else
{
return new Fraction(denom, num);
}
}
public Fraction Raise(Fraction p)
{
Fraction newP = p;
if (p.num < 0)
{
Fraction n = Reciprocate();
num = n.num;
denom = n.denom;
newP = new Fraction(BigInteger.Abs(p.num), BigInteger.Abs(p.denom));
}
if (power != null)
{
return new Fraction(num, denom, power.Multiply(newP));
}
else
{
return new Fraction(num, denom, p);
}
}
public Fraction Pow(BigInteger p)
{
BigInteger newNum = num;
BigInteger newDenom = denom;
bool reverse = false;
if (p < 0)
{
p = -p;
reverse = true;
}
else if (p == 0)
{
return new Fraction();
}
if (power != null)
{
if (reverse)
{
return new Fraction(newDenom, newNum, power.Multiply(new Fraction(p)));
}
else
{
return new Fraction(newNum, newDenom, power.Multiply(new Fraction(p)));
}
}
else
{
for (int i = 1; i < p; i++)
{
newNum = BigInteger.Multiply(newNum, num);
newDenom = BigInteger.Multiply(newDenom, denom);
}
if (reverse)
{
return new Fraction(newDenom, newNum);
}
else
{
return new Fraction(newNum, newDenom);
}
}
}
public bool IsOne()
{
Fraction ONE = new Fraction();
return num == 1 && denom == 1;
}
public bool IsZero()
{
Fraction ZERO = new Fraction(0);
return num == 0;
}
public static List<BigInteger> PrimeFactorization(BigInteger Fraction)
{
BigInteger prime = new BigInteger(2);
List<BigInteger> primesArr = new List<BigInteger>();
while (Fraction >= prime * prime)
{
if (Fraction % prime == 0)
{
primesArr.Add(prime);
Fraction /= prime;
}
else
{
prime++;
}
}
primesArr.Add(Fraction);
return primesArr;
}
public static BigInteger GCD(BigInteger num1, BigInteger num2)
{
while (num1 != 0 && num2 != 0)
{
if (num1 > num2)
{
num1 %= num2;
}
else
{
num2 %= num1;
}
}
if (num1 == 0)
{
return num2;
}
else
{
return num1;
}
}
public static Fraction LCM(Fraction a, Fraction b)
{
a = a.Simplify();
b = b.Simplify();
Fraction num1, num2;
if (a > b)
{
num1 = a; num2 = b;
}
else
{
num1 = b; num2 = a;
}
for (Fraction i = new Fraction(); i < num2; i = i.Add(new Fraction()))
{
if (num1.Multiply(i).Divide(num2).Simplify().num == 1)
{
return i.Multiply(num1);
}
}
return num1.Multiply(num2);
}
public static bool operator >(Fraction num1, Fraction num2)
{
return Comparison(num1, num2) > 0;
}
public static bool operator <(Fraction num1, Fraction num2)
{
return Comparison(num1, num2) < 0;
}
public static bool operator >=(Fraction num1, Fraction num2)
{
return Comparison(num1, num2) >= 0;
}
public static bool operator <=(Fraction num1, Fraction num2)
{
return Comparison(num1, num2) <= 0;
}
public static int Comparison(Fraction num1, Fraction num2)
{
Fraction newNum1 = new Fraction();
Fraction newNum2 = new Fraction();
newNum1.num = num1.num * num2.denom;
newNum1.denom = num1.denom * num2.denom;
newNum2.num = num2.num * num1.denom;
newNum2.denom = num2.denom * num1.denom;
if (newNum1.num < newNum2.num)
{
return -1;
}
else if (newNum1.num == newNum2.num)
{
return 0;
}
else if (newNum1.num > newNum2.num)
{
return 1;
}
return 0;
}
public Fraction Simplify()
{
bool nnum = false, ndenom = false;
if (num < 0)
{
nnum = true;
num = -num;
}
if (denom < 0)
{
ndenom = true;
denom = -denom;
}
BigInteger divisor = GCD(num, denom);
BigInteger numNew = num / divisor;
BigInteger denomNew = denom / divisor;
if (nnum)
{
numNew = -numNew;
}
if (ndenom)
{
numNew = -numNew;
}
if (power != null)
{
numNew = SimplifyPower().num;
denomNew = SimplifyPower().denom;
Fraction powerNew = SimplifyPower().power;
return new Fraction(numNew, denomNew, powerNew);
}
return new Fraction(numNew, denomNew);
}
private Fraction SimplifyPower()
{
return new Fraction(Pow(power.num), new Fraction(BigInteger.One, power.denom));
}
private Fraction SimplifyRadical()
{
Fraction final = new Fraction();
final.coef = new Fraction();
List<BigInteger> pN = PrimeFactorization(num);
List<BigInteger> pD = PrimeFactorization(denom);
List<BigInteger> pNDupes = FractionOfDupes(pN);
List<BigInteger> pDDupes = FractionOfDupes(pD);
int index = 0;
int position = 0;
for (int i = 0; i < pNDupes.Count; i++)
{
if (pNDupes[i] >= power.denom)
{
BigInteger pow = pNDupes[i] / power.denom;
BigInteger powLeft = pNDupes[i] - power.denom * pow;
final.coef = final.coef.Multiply(new Fraction(pN[position]).Raise(new Fraction(pow)));
final.num *= new Fraction(pN[index], 1, new Fraction(powLeft, 1)).num;
position += (int)pNDupes[i];
}
else
{
final.num *= new Fraction(pN[index], 1, new Fraction(pNDupes[i], 1)).num;
}
index += 2;
}
index = 0;
position = 0;
for (int i = 0; i < pDDupes.Count; i++)
{
if (pDDupes[i] >= power.denom)
{
BigInteger pow = pDDupes[i] / power.denom;
BigInteger powLeft = pDDupes[i] - power.denom * pow;
final.coef = final.coef.Divide(new Fraction(pD[position]).Raise(new Fraction(pow)));
final.denom *= new Fraction(pD[index], 1, new Fraction(powLeft, 1)).num;
position += (int)pDDupes[i];
}
else
{
final.denom *= new Fraction(pD[index], 1, new Fraction(pDDupes[i], 1)).num;
}
index += 2;
}
final.power = power;
return final;
}
private List<BigInteger> NumberOfDupes(List<BigInteger> a)
{
List<BigInteger> l = new List<BigInteger>();
List<BigInteger> final = new List<BigInteger>();
foreach (BigInteger el in a)
{
l.Add(el);
}
int index = 0;
for (int i = 0; i < l.Count; i++)
{
BigInteger ele = l[i];
final.Add(1);
l.RemoveAt(i);
for (int j = 0; j < l.Count; j++)
{
if (ele == l[j])
{
final[index]++;
l.RemoveAt(j);
j--;
}
}
i--;
index++;
}
return final;
}
public void Print(bool printLn)
{
if (coef != null && !(coef.num == 1 && coef.denom == 1))
{
PrintHelper(coef);
if (!(num == 1 && denom == 1))
{
Console.Write("*");
}
}
if (!IsOne() || coef == null)
{
PrintHelper(this);
}
if (power != null && !power.IsOne() && !IsOne())
{
Console.ForegroundColor = ConsoleColor.Green;
Console.Write("^");
PrintHelper(power);
}
if (printLn)
{
Console.WriteLine();
}
Console.ForegroundColor = ConsoleColor.Gray;
}
private void PrintHelper(Fraction n)
{
Console.ForegroundColor = ConsoleColor.Green;
Console.Write("[");
Console.ForegroundColor = ConsoleColor.Gray;
Console.Write(n.num);
Console.ForegroundColor = ConsoleColor.Green;
if (n.denom != 1)
{
Console.Write("/");
Console.ForegroundColor = ConsoleColor.Gray;
Console.Write(n.denom);
Console.ForegroundColor = ConsoleColor.Green;
Console.Write("]");
}
else
{
Console.Write("]");
}
}
public Fraction Add(Fraction a)
{
Fraction newNum1 = new Fraction(Multiply(new Fraction(a.denom, a.denom)).num, denom * a.denom);
Fraction newNum2 = new Fraction(a.Multiply(new Fraction(denom, denom)).num, denom * a.denom);
return new Fraction(newNum1.num + newNum2.num, denom * a.denom);
}
}
Your integer algorithm is right, but it's based on integer arithmetic, on the idea a division, sometimes results in a natural number, and sometimes in a natural number and a Rest. This is not possible with fraction. You can dived them as you want - you will always get a new fraction - there will never be a Rest.
So this algorithm is not applicable. I found a formula, that makes a conversion:
lcm(a/b, c/d) = lcm(a,b,c,d)/lcm(b,d).
lcm(a,b,c,d)=lcm(lcm(a,b),lcm(c,d)).
So all together is
lcm(lcm(a,b),lcm(c,d))/lcm(b,d)
So you got 4 calls to the integer version of lcm, to do one lcm for a fraction.
Do not Modify your int-version of LCM.
The Fraction version would be
public static BigInteger LCM(Fraction a, Fraction b)
{
return lcm(lcm(a.num, a.denom), lcm(b.num, b.denom))/lcm(a.denom, b.denom);
}
But this will only find least natural number common multiple.
The result of lcm(1/4, 1/8) will be 1, not 1/4 as one may assume.

c# Multi Threading only using 25% of CPU

I have a process for a sort of genetic nesting algorithm that I am trying to multi-thread. The process looks something like the following.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
namespace ConsoleApp1
{
class Program
{
static void Main(string[] args)
{
CurrentNest = new CuttingRun();
for (int i = 0; i < 80; i++)
{
double w = GetRandomNumber(24, 50);
double h = GetRandomNumber(10, 15);
CurrentNest.PartList.Add(new LwCube { Width = w, Height = h, PartID = i });
}
//Task.Run(() =>
//{
// Parallel.For(0, 64, (i) => Parallel_Nest());
//});
while (true)
{
Parallel_Nest();
}
//Console.ReadKey();
}
public static double GetRandomNumber(double minimum, double maximum)
{
Random random = new Random();
return random.NextDouble() * (maximum - minimum) + minimum;
}
public static CuttingRun CurrentNest { get; set; }
public static void Parallel_Nest()
{
Random random = new Random();
int randomNumber = random.Next(2000, 10000);
var retVal = Nester.Nest_Parts(CurrentNest, randomNumber);
CurrentNest.Iterations++;
if (CurrentNest.SavedList.Count > 0)
{
if (retVal.Count < CurrentNest.SavedList.Count)
{
CurrentNest.SavedList = retVal;
}
}
else
{
CurrentNest.SavedList = retVal;
}
Console.WriteLine(CurrentNest.Iterations.ToString() + " " + CurrentNest.SavedList.Count.ToString());
if (CurrentNest.SavedList != retVal)
{
retVal.Clear();
}
}
}
//Models
public class LwSheet
{
public LwSheet(double width, double height)
{
SheetWidth = width;
SheetHeight = height;
FreeRectangles.Add(new LwCube { Width = width, Height = height, X = 0, Y = 0 });
}
public List<LwCube> UsedRectangles = new List<LwCube>();
public List<LwCube> FreeRectangles = new List<LwCube>();
public double SheetWidth { get; set; }
public double SheetHeight { get; set; }
public double TotalUsed { get; set; }
public bool Place_Part(LwCube prt)
{
bool retVal = false;
LwCube bestNode = FindPositionForBestAreaFit(prt);
//if the bestNode has a height then add our parts to the list
if (bestNode.Height > 0)
{
bestNode.PartID = prt.PartID;
int numRectanglesToProcess = FreeRectangles.Count;
for (int i = 0; i < numRectanglesToProcess; ++i)
{
if (SplitFreeNode(FreeRectangles[i], ref bestNode))
{
FreeRectangles.RemoveAt(i);
--i;
--numRectanglesToProcess;
}
}
PruneFreeList();
UsedRectangles.Add(bestNode);
retVal = true;
}
return retVal;
}
bool SplitFreeNode(LwCube freeNode, ref LwCube usedNode)
{
// Test with SAT if the rectangles even intersect.
if (usedNode.X >= freeNode.X + freeNode.Width || usedNode.X + usedNode.Width <= freeNode.X ||
usedNode.Y >= freeNode.Y + freeNode.Height || usedNode.Y + usedNode.Height <= freeNode.Y)
return false;
if (usedNode.X < freeNode.X + freeNode.Width && usedNode.X + usedNode.Width > freeNode.X)
{
// New node at the top side of the used node.
if (usedNode.Y > freeNode.Y && usedNode.Y < freeNode.Y + freeNode.Height)
{
LwCube newNode = new LwCube { Width = freeNode.Width, X = freeNode.X, Y = freeNode.Y };
newNode.Height = usedNode.Y - newNode.Y;
FreeRectangles.Add(newNode);
}
// New node at the bottom side of the used node.
if (usedNode.Y + usedNode.Height < freeNode.Y + freeNode.Height)
{
LwCube newNode = new LwCube { Width = freeNode.Width, X = freeNode.X };
newNode.Y = usedNode.Y + usedNode.Height;
newNode.Height = freeNode.Y + freeNode.Height - (usedNode.Y + usedNode.Height);
FreeRectangles.Add(newNode);
}
}
if (usedNode.Y < freeNode.Y + freeNode.Height && usedNode.Y + usedNode.Height > freeNode.Y)
{
// New node at the left side of the used node.
if (usedNode.X > freeNode.X && usedNode.X < freeNode.X + freeNode.Width)
{
LwCube newNode = new LwCube { Height = freeNode.Height, X = freeNode.X, Y = freeNode.Y };
newNode.Width = usedNode.X - newNode.X;
FreeRectangles.Add(newNode);
}
// New node at the right side of the used node.
if (usedNode.X + usedNode.Width < freeNode.X + freeNode.Width)
{
LwCube newNode = new LwCube { Height = freeNode.Height, Y = freeNode.Y };
newNode.X = usedNode.X + usedNode.Width;
newNode.Width = freeNode.X + freeNode.Width - (usedNode.X + usedNode.Width);
FreeRectangles.Add(newNode);
}
}
return true;
}
void PruneFreeList()
{
for (int i = 0; i < FreeRectangles.Count; ++i)
for (int j = i + 1; j < FreeRectangles.Count; ++j)
{
if (IsContainedIn(FreeRectangles[i], FreeRectangles[j]))
{
FreeRectangles.RemoveAt(i);
--i;
break;
}
if (IsContainedIn(FreeRectangles[j], FreeRectangles[i]))
{
FreeRectangles.RemoveAt(j);
--j;
}
}
}
bool IsContainedIn(LwCube a, LwCube b)
{
return a.X >= b.X && a.Y >= b.Y
&& a.X + a.Width <= b.X + b.Width
&& a.Y + a.Height <= b.Y + b.Height;
}
LwCube FindPositionForBestAreaFit(LwCube prt)
{
LwCube bestNode = new LwCube();
var bestAreaFit = SheetWidth * SheetHeight;
for (int i = 0; i < FreeRectangles.Count; ++i)
{
double areaFit = FreeRectangles[i].Width * FreeRectangles[i].Height - prt.Width * prt.Height;
// Try to place the rectangle in upright (non-flipped) orientation.
if (FreeRectangles[i].Width >= prt.Width && FreeRectangles[i].Height >= prt.Height)
{
if (areaFit < bestAreaFit)
{
bestNode.X = FreeRectangles[i].X;
bestNode.Y = FreeRectangles[i].Y;
bestNode.Height = prt.Height;
bestNode.Width = prt.Width;
bestAreaFit = areaFit;
}
}
}
return bestNode;
}
}
public class LwCube
{
public int PartID { get; set; }
public double Width { get; set; }
public double Height { get; set; }
public double X { get; set; }
public double Y { get; set; }
}
public class CuttingRun
{
public List<LwCube> PartList = new List<LwCube>();
public List<LwSheet> SavedList = new List<LwSheet>();
public List<LwSheet> Sheets = new List<LwSheet>();
public int Iterations { get; set; }
}
//Actions
public static class Nester
{
public static List<LwSheet> Nest_Parts(CuttingRun cuttingRun, int loopCount)
{
var SheetList = new List<LwSheet>();
List<LwCube> partList = new List<LwCube>();
partList.AddRange(cuttingRun.PartList);
while (partList.Count > 0)
{
LwSheet newScore = new LwSheet(97, 49);
List<LwCube> addingParts = new List<LwCube>();
foreach (var prt in partList)
{
addingParts.Add(new LwCube { Width = prt.Width, Height = prt.Height, PartID = prt.PartID });
}
if (addingParts.Count > 0)
{
var sheets = new ConcurrentBag<LwSheet>();
Parallel.For(0, loopCount, (i) =>
{
var hmr = new LwSheet(97, 49);
Add_Parts_To_Sheet(hmr, addingParts);
sheets.Add(hmr);
});
//for (int i = 0; i < loopCount; i++)
//{
// var hmr = new LwSheet(97, 49);
// Add_Parts_To_Sheet(hmr, addingParts, addToLarge, addToMedium);
// sheets.Add(hmr);
//}
addingParts.Clear();
var bestSheet = sheets.Where(p => p != null).OrderByDescending(p => p.TotalUsed).First();
sheets = null;
newScore = bestSheet;
foreach (var ur in newScore.UsedRectangles)
{
partList.Remove(partList.Single(p => p.PartID == ur.PartID));
}
SheetList.Add(newScore);
}
}
return SheetList;
}
public static void Add_Parts_To_Sheet(LwSheet sh, List<LwCube> parts)
{
var myList = new List<LwCube>();
myList.AddRange(parts);
myList.Shuffle();
foreach (var prt in myList)
{
sh.Place_Part(prt);
}
myList.Clear();
foreach (var ur in sh.UsedRectangles)
{
sh.TotalUsed += ur.Width * ur.Height;
}
}
[ThreadStatic] private static Random Local;
public static Random ThisThreadsRandom
{
get { return Local ?? (Local = new Random(unchecked(Environment.TickCount * 31 + System.Threading.Thread.CurrentThread.ManagedThreadId))); }
}
public static void Shuffle<T>(this IList<T> list)
{
int n = list.Count;
while (n > 1)
{
n--;
int k = ThisThreadsRandom.Next(n + 1);
T value = list[k];
list[k] = list[n];
list[n] = value;
}
}
}
}
I have tried using parallel for loops on each of the loops to try and speed up the process. I have also tried changing them to tasks and used task.WhenAll. However I am only able to use around 25% of my CPU. If I start the program 4 different times, I can use 100%.
I am wondering if anyone has any ideas on how I could use 100% of my CPU without starting the program more than once?
EDIT: After adding a scaled down working version I also commented out one of the parallel loops and one of the normal loops to show where I put them in the code.
However I am only able to use around 25% of my CPU. If I start the program 4 different times, I can use 100%. I am wondering if anyone has any ideas on how I could use 100% of my CPU without starting the program more than once?
Your code appears to be a mixture of asynchronous (presumably I/O-bound) and parallel (presumably CPU-bound) portions. I say "appears to be" because we can't say for sure where the problem is since this is not a minimal reproducible example.
But, if that assumption is correct, then the reason your CPU is underutilized is simple: the parallel CPU-bound portions are waiting for their input data from the asynchronous I/O-bound portions. The only way to fix that is to run the I/O-bound portions concurrently. Move your I/O-bound code as early in the pipeline as possible, and then be sure to run the I/O-bound portions as concurrently as possible. E.g., if you have to call a WebApi for each item, call it as soon as you have the item; or if you're reading items from a database, try to read as many in a batch as possible. This is to minimize the amount of time that the CPU-bound portions have to wait for their data.
"Asynchronous Parallel ForEach" is rarely a good tool for this kind of problem. I would either look into TPL Dataflow or build your own pipeline using Channels.
At the end of the day, it is possible that the algorithm as a whole is I/O-bound. In that case, there isn't a whole lot you can do: only one CPU would be used because the I/O couldn't even keep up with that single CPU, and in that case using more CPUs wouldn't provide any benefit.

Neural Network, Getting output less then 1

I am trying to create a neural network for the function y=e^(-(x-u)^2)/(2*o^2)) where u = 50 and o = 15.
I must train my neural network so I can find the 2 x's for each y. I have created the folling code, it seems to learn it nicely, but once I test the outputs go I only get numbers around 0.99 to 1 where I should get 25 and 75 and I just can't see why. My best guess is that my error correction is wrong, but can't find the error. The neural network uses back-propagation.
The test code and training set
class Program
{
static void Main(string[] args)
{
args = new string[] {
"c:\\testTrain.csv",
"c:\\testValues.csv"
};
// Output File
string fileTrainPath = null;
string fileValuesPath = null;
if (args.Length > 0)
{
fileTrainPath = args[0];
if (File.Exists(fileTrainPath))
File.Delete(fileTrainPath);
fileValuesPath = args[1];
if (File.Exists(fileValuesPath))
File.Delete(fileValuesPath);
}
double learningRate = 0.1;
double u = 50;
double o = 15;
Random rand = new Random();
Network net = new Network(1, 8, 4, 2);
NetworkTrainer netTrainer = new NetworkTrainer(learningRate, net);
List<TrainerSet> TrainerSets = new List<TrainerSet>();
for(int i = 0; i <= 20; i++)
{
double random = rand.NextDouble();
TrainerSets.Add(new TrainerSet(){
Inputs = new double[] { random },
Outputs = getX(random, u, o)
});
}
// Train Network
string fileTrainValue = String.Empty;
for (int i = 0; i <= 10000; i++)
{
if (i == 5000)
{ }
double error = netTrainer.RunEpoch(TrainerSets);
Console.WriteLine("Epoch " + i + ": Error = " + error);
if(fileTrainPath != null)
fileTrainValue += i + "," + learningRate + "," + error + "\n";
}
if (fileTrainPath != null)
File.WriteAllText(fileTrainPath, fileTrainValue);
// Test Network
string fileValuesValue = String.Empty;
for (int i = 0; i <= 100; i++)
{
double y = rand.NextDouble();
double[] dOutput = getX(y, u, o);
double[] Output = net.Compute(new double[] { y });
if (fileValuesPath != null)
fileValuesValue += i + "," + y + "," + dOutput[0] + "," + dOutput[1] + "," + Output[0] + "," + Output[1] + "\n";
}
if (fileValuesPath != null)
File.WriteAllText(fileValuesPath, fileValuesValue);
}
public static double getResult(int x, double u, double o)
{
return Math.Exp(-Math.Pow(x-u,2)/(2*Math.Pow(o,2)));
}
public static double[] getX(double y, double u, double o)
{
return new double[] {
u + Math.Sqrt(2 * Math.Pow(o, 2) * Math.Log(1/y)),
u - Math.Sqrt(2 * Math.Pow(o, 2) * Math.Log(1/y)),
};
}
}
The code behind the network
public class Network
{
protected int inputsCount;
protected int layersCount;
protected NetworkLayer[] layers;
protected double[] output;
public int Count
{
get
{
return layers.Count();
}
}
public NetworkLayer this[int index]
{
get { return layers[index]; }
}
public Network(int inputsCount, params int[] neuronsCount)
{
this.inputsCount = Math.Max(1, inputsCount);
this.layersCount = Math.Max(1, neuronsCount.Length);
layers = new NetworkLayer[neuronsCount.Length];
for (int i = 0; i < layersCount; i++)
layers[i] = new NetworkLayer(neuronsCount[i],
(i == 0) ? inputsCount : neuronsCount[i - 1]);
}
public virtual double[] Compute(double[] input)
{
output = input;
foreach (NetworkLayer layer in layers)
output = layer.Compute(output);
return output;
}
}
public class NetworkLayer
{
protected int inputsCount = 0;
protected int neuronsCount = 0;
protected Neuron[] neurons;
protected double[] output;
public Neuron this[int index]
{
get { return neurons[index]; }
}
public int Count
{
get { return neurons.Length; }
}
public int Inputs
{
get { return inputsCount; }
}
public double[] Output
{
get { return output; }
}
public NetworkLayer(int neuronsCount, int inputsCount)
{
this.inputsCount = Math.Max( 1, inputsCount );
this.neuronsCount = Math.Max( 1, neuronsCount );
neurons = new Neuron[this.neuronsCount];
output = new double[this.neuronsCount];
// create each neuron
for (int i = 0; i < neuronsCount; i++)
neurons[i] = new Neuron(inputsCount);
}
public virtual double[] Compute(double[] input)
{
// compute each neuron
for (int i = 0; i < neuronsCount; i++)
output[i] = neurons[i].Compute(input);
return output;
}
}
public class Neuron
{
protected static Random rand = new Random((int)DateTime.Now.Ticks);
public int Inputs;
public double[] Input;
public double[] Weights;
public double Output = 0;
public double Threshold;
public double Error;
public Neuron(int inputs)
{
this.Inputs = inputs;
Weights = new double[inputs];
for (int i = 0; i < inputs; i++)
Weights[i] = rand.NextDouble() * 0.5;
}
public double Compute(double[] inputs)
{
Input = inputs;
double e = 0.0;
for (int i = 0; i < inputs.Length; i++)
e += Weights[i] * inputs[i];
e -= Threshold;
return (Output = sigmoid(e));
}
private double sigmoid(double value)
{
return (1 / (1 + Math.Exp(-1 * value)));
//return 1 / (1 + Math.Exp(-value));
}
}
My Trainer
public class NetworkTrainer
{
private Network network;
private double learningRate = 0.1;
public NetworkTrainer(double a, Network network)
{
this.network = network;
this.learningRate = a;
}
public double Run(double[] input, double[] output)
{
network.Compute(input);
return CorrectErrors(output);
}
public double RunEpoch(List<TrainerSet> sets)
{
double error = 0.0;
for (int i = 0, n = sets.Count; i < n; i++)
error += Run(sets[i].Inputs, sets[i].Outputs);
// return summary error
return error;
}
private double CorrectErrors(double[] desiredOutput)
{
double[] errorLast = new double[desiredOutput.Length];
NetworkLayer lastLayer = network[network.Count - 1];
for (int i = 0; i < desiredOutput.Length; i++)
{
// S(p)=y(p)*[1-y(p)]*(yd(p)-y(p))
lastLayer[i].Error = lastLayer[i].Output * (1-lastLayer[i].Output)*(desiredOutput[i] - lastLayer[i].Output);
errorLast[i] = lastLayer[i].Error;
}
// Calculate errors
for (int l = network.Count - 2; l >= 0; l--)
{
for (int n = 0; n < network[l].Count; n++)
{
double newError = 0;
for (int np = 0; np < network[l + 1].Count; np++)
{
newError += network[l + 1][np].Weights[n] * network[l + 1][np].Error;
}
network[l][n].Error = newError;
}
}
// Update Weights
// w = w + (a * input * error)
for (int l = network.Count - 1; l >= 0; l--)
{
for (int n = 0; n < network[l].Count; n++)
{
for (int i = 0; i < network[l][n].Inputs; i++)
{
// deltaW = a * y(p) * s(p)
double deltaW = learningRate * network[l][n].Output * network[l][n].Error;
network[l][n].Weights[i] += deltaW;
}
}
}
double returnError = 0;
foreach (double e in errorLast)
returnError += e;
return returnError;
}
}
For regression problems your output layer should have the identity (or at least a linear) activation function. This way you don't have to scale your output. The derivative of the identity function is 1 and thus the derivative dE/da_i for the output layer is y-t (lastLayer[i].Output - desiredOutput[i]).

Categories