My routine is as follows (the SpeedyBitmap class just locks each bitmap for faster pixel data access):
private static bool TestRange(int number, int lower, int upper) {
return number >= lower && number <= upper;
}
public static List<Point> Search(Bitmap toSearch, Bitmap toFind, double percentMatch = 0.85, byte rTol = 2, byte gTol = 2, byte bTol = 2) {
List<Point> points = new List<Point>();
Stopwatch stopwatch = new Stopwatch();
stopwatch.Start();
int findArea = toFind.Width * toFind.Height;
int allowedMismatches = findArea - (int)(findArea * percentMatch);
int mismatches = 0;
using (SpeedyBitmap speedySearch = new SpeedyBitmap(toSearch)) {
using (SpeedyBitmap speedyFind = new SpeedyBitmap(toFind)) {
for (int i = 0; i < speedySearch.Height - speedyFind.Height + 1; i++) {
for (int j = 0; j < speedySearch.Width - speedyFind.Width + 1; j++) {
for (int k = 0; k < speedyFind.Height; k++) {
for (int l = 0; l < speedyFind.Width; l++) {
Color searchColor = speedySearch[j + l, i + k];
Color findColor = speedyFind[l, k];
if (!TestRange(searchColor.R, findColor.R - rTol, findColor.R + rTol) ||
!TestRange(searchColor.G, findColor.G - gTol, findColor.G + gTol) ||
!TestRange(searchColor.B, findColor.B - bTol, findColor.B + bTol)) {
mismatches++;
if (mismatches > allowedMismatches) {
mismatches = 0;
goto notFound;
}
}
}
}
points.Add(new Point(j, i));
continue;
notFound:
;
}
}
}
}
Console.WriteLine(stopwatch.ElapsedMilliseconds);
return points;
}
Searching a moderately sized image (1000x1000) takes over 20 seconds. Removing the percent match takes it down to a few hundred milliseconds, so I think I've established where the major bottleneck is.
How can I make this run faster? Perhaps I could flatten the two-dimensional arrays down to one-dimensional arrays and then apply some sort of sequence check on the two to possibly match up the longest sequence of where the toFind bitmap data appears with the respective tolerances and match percent. If this would be a good solution, how would I begin to implement it?
Related
I am working on a project that compares the time bubble and selection sort take. I made two separate programs and combined them into one and now bubble sort is running much faster than selection sort. I checked to make sure that the code wasn't just giving me 0s because of some conversion error and was running as intended. I am using System.Diagnostics; to measure the time. I also checked that the machine was not the problem, I ran it on Replit and got similar results.
{
class Program
{
public static int s1 = 0;
public static int s2 = 0;
static decimal bubblesort(int[] arr1)
{
int n = arr1.Length;
var sw1 = Stopwatch.StartNew();
for (int i = 0; i < n - 1; i++)
{
for (int j = 0; j < n - i - 1; j++)
{
if (arr1[j] > arr1[j + 1])
{
int tmp = arr1[j];
// swap tmp and arr[i] int tmp = arr[j];
arr1[j] = arr1[j + 1];
arr1[j + 1] = tmp;
s1++;
}
}
}
sw1.Stop();
// Console.WriteLine(sw1.ElapsedMilliseconds);
decimal a = Convert.ToDecimal(sw1.ElapsedMilliseconds);
return a;
}
static decimal selectionsort(int[] arr2)
{
int n = arr2.Length;
var sw1 = Stopwatch.StartNew();
// for (int e = 0; e < 1000; e++)
// {
for (int x = 0; x < arr2.Length - 1; x++)
{
int minPos = x;
for (int y = x + 1; y < arr2.Length; y++)
{
if (arr2[y] < arr2[minPos])
minPos = y;
}
if (x != minPos && minPos < arr2.Length)
{
int temp = arr2[minPos];
arr2[minPos] = arr2[x];
arr2[x] = temp;
s2++;
}
}
// }
sw1.Stop();
// Console.WriteLine(sw1.ElapsedMilliseconds);
decimal a = Convert.ToDecimal(sw1.ElapsedMilliseconds);
return a;
}
static void Main(string[] args)
{
Console.WriteLine("Enter the size of n");
int n = Convert.ToInt32(Console.ReadLine());
Random rnd = new System.Random();
decimal bs = 0M;
decimal ss = 0M;
int s = 0;
int[] arr1 = new int[n];
int tx = 1000; //tx is a variable that I can use to adjust sample size
decimal tm = Convert.ToDecimal(tx);
for (int i = 0; i < tx; i++)
{
for (int a = 0; a < n; a++)
{
arr1[a] = rnd.Next(0, 1000000);
}
ss += selectionsort(arr1);
bs += bubblesort(arr1);
}
bs = bs / tm;
ss = ss / tm;
Console.WriteLine("Bubble Sort took " + bs + " miliseconds");
Console.WriteLine("Selection Sort took " + ss + " miliseconds");
}
}
}
What is going on? What is causing bubble sort to be fast or what is slowing down Selection sort? How can I fix this?
I found that the problem was that the Selection Sort was looping 1000 times per method run in addition to the 1000 runs for sample size, causing the method to perform significantly worse than bubble sort. Thank you guys for help and thank you TheGeneral for showing me the benchmarking tools. Also, the array that was given as a parameter was a copy instead of a reference, as running through the loop manually showed me that the bubble sort was doing it's job and not sorting an already sorted array.
To solve your initial problem you just need to copy your arrays, you can do this easily with ToArray():
Creates an array from a IEnumerable.
ss += selectionsort(arr1.ToArray());
bs += bubblesort(arr1.ToArray());
However let's learn how to do a more reliable benchmark with BenchmarkDotNet:
BenchmarkDotNet Nuget
Official Documentation
Given
public class Sort
{
public static void BubbleSort(int[] arr1)
{
int n = arr1.Length;
for (int i = 0; i < n - 1; i++)
{
for (int j = 0; j < n - i - 1; j++)
{
if (arr1[j] > arr1[j + 1])
{
int tmp = arr1[j];
// swap tmp and arr[i] int tmp = arr[j];
arr1[j] = arr1[j + 1];
arr1[j + 1] = tmp;
}
}
}
}
public static void SelectionSort(int[] arr2)
{
int n = arr2.Length;
for (int x = 0; x < arr2.Length - 1; x++)
{
int minPos = x;
for (int y = x + 1; y < arr2.Length; y++)
{
if (arr2[y] < arr2[minPos])
minPos = y;
}
if (x != minPos && minPos < arr2.Length)
{
int temp = arr2[minPos];
arr2[minPos] = arr2[x];
arr2[x] = temp;
}
}
}
}
Benchmark code
[SimpleJob(RuntimeMoniker.Net50)]
[MemoryDiagnoser()]
public class SortBenchmark
{
private int[] data;
[Params(100, 1000)]
public int N;
[GlobalSetup]
public void Setup()
{
var r = new Random(42);
data = Enumerable
.Repeat(0, N)
.Select(i => r.Next(0, N))
.ToArray();
}
[Benchmark]
public void Bubble() => Sort.BubbleSort(data.ToArray());
[Benchmark]
public void Selection() => Sort.SelectionSort(data.ToArray());
}
Usage
static void Main(string[] args)
{
BenchmarkRunner.Run<SortBenchmark>();
}
Results
Method
N
Mean
Error
StdDev
Bubble
100
8.553 us
0.0753 us
0.0704 us
Selection
100
4.757 us
0.0247 us
0.0231 us
Bubble
1000
657.760 us
7.2581 us
6.7893 us
Selection
1000
300.395 us
2.3302 us
2.1796 us
Summary
What have we learnt? Your bubble sort code is slower ¯\_(ツ)_/¯
It looks like you're passing in the sorted array into Bubble Sort. Because arrays are passed by reference, the sort that you're doing on the array is editing the same contents of the array that will be eventually passed into bubble sort.
Make a second array and pass the second array into bubble sort.
I have a matrix with two values (0,1), I have to count the number of "1" in this matrix, so I tried to check all elements but for [1000,1000] matrix, it takes too long, and another problem is, I should do this many times for different matrices, so I hope anyone could help me with a faster mode.
here is my code:
for (int i = 0; i < matrix.height; i++)
{
for (int j = 0; j < matrix.width; j++)
{
if (matrix[j, i] == 1)
{
count++;
}
}
}
You actually have multiple options if you implement matrix class yourself:
public class BoleanMatrix
{
public bool this[int i, int j] {get;set;}
}
Cache it. Its easy. Upon any modification just update cached value of high bits. Implementation is irrelevant.
public class BoleanMatrix
{
private int _highBitCount = 0;
public bool this[int i, int j]
{
get;
set
{
if(prev != value)
{
if(value)
_highBitCount++;
else
_highBitCount--;
}
//set here
}
}
}
Change implementation to any sparse variant, for example, you can store matrix values as bits in byte[] array. If it is still too much - compress it with Run Length Encoding. It comes with drawbacks such as update and distribution problems of those matrix and usually they much slower than memory wide matrix. Efficient algorithm highly depends on nature of your matrix (distribution of values) and how you use them (multiplication, division, substraction, etc).
Try a parallel for loop ? If it can't be cached.
object mylock = new object();
int count = 0;
Parallel.For(0, matrix.height, i =>
{
int forcount = 0;
for (int j = 0; j < matrix.width; j++)
{
if (matrix[j, i] == 1)
{
forcount++;
}
}
lock (mylock)
{
count += forcount;
}
}
);
Given that you are only storing bits, you can improve the storage usage by storing the bits packed into uint values, which will reduce the amount of space required by a factor of 32 compared to using int for each value.
If you do that, then you can also more efficiently count the number of set bits by using one of many different "Hamming Weight" algorithms.
The drawback of this approach is that it may be slower to access the individual bits using the array BitMatrix indexer, but the computation of the number of set bits is much faster (more than 90 times faster for RELEASE mode builds on my PC).
Here's the sample code; the important class is BitMatrix:
using System;
using System.Diagnostics;
namespace Demo
{
class Program
{
static void Main()
{
int[,] matrix = new int[1000, 1000];
BitMatrix bitMatrix = new BitMatrix(1000, 1000);
// Randomly populate matrices and calculate expected count.
var rng = new Random(985912);
int expected = 0;
for (int r = 0; r < 1000; ++r)
{
for (int c = 0; c < 1000; ++c)
{
if ((rng.Next() & 1) == 0)
continue;
++expected;
matrix[r, c] = 1;
bitMatrix[r, c] = true;
}
}
Console.WriteLine("Expected = " + expected);
// Time the explicit matrix loop.
var sw = Stopwatch.StartNew();
for (int i = 0; i < 1000; ++i)
if (count1(matrix) != expected)
Console.WriteLine("count1() failed");
var elapsed1 = sw.ElapsedTicks;
Console.WriteLine(sw.Elapsed);
// Time the hamming weight approach.
sw.Restart();
for (int i = 0; i < 1000; ++i)
if (bitMatrix.NumSetBits() != expected)
Console.WriteLine("NumSetBits() failed");
var elapsed2 = sw.ElapsedTicks;
Console.WriteLine(sw.Elapsed);
Console.WriteLine("BitMatrix matrix is " + elapsed1 / elapsed2 + " times faster");
}
static int count1(int[,] matrix)
{
int h = 1 + matrix.GetUpperBound(0);
int w = 1 + matrix.GetUpperBound(1);
int c = 0;
for (int i = 0; i < h; ++i)
for (int j = 0; j < w; ++j)
if (matrix[i, j] == 1)
++c;
return c;
}
}
public sealed class BitMatrix
{
public BitMatrix(int rows, int cols)
{
Rows = rows;
Cols = cols;
bits = new uint[(rows*cols+31)/32];
}
public int Rows { get; }
public int Cols { get; }
public int NumSetBits()
{
int count = 0;
foreach (uint i in bits)
count += hammingWeight(i);
return count;
}
public bool this[int row, int col]
{
get
{
int n = row * Cols + col;
int i = n / 32;
int j = n % 32;
uint m = 1u << j;
return (bits[i] & m) != 0;
}
set
{
int n = row * Cols + col;
int i = n / 32;
int j = n % 32;
uint m = 1u << j;
if (value)
bits[i] |= m;
else
bits[i] &= ~m;
}
}
static int hammingWeight(uint i)
{
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return (int)((((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24);
}
readonly uint[] bits;
}
}
If you are running 64-bit code, then it's actually more efficient to use an array of ulong and calculate a 64-bit hamming weight.
When I tried that on my PC, it was more than 120 times faster.
Here's the 64-bit version of BitMatrix:
public sealed class BitMatrix
{
public BitMatrix(int rows, int cols)
{
Rows = rows;
Cols = cols;
bits = new ulong[(rows*cols+63)/64];
}
public int Rows { get; }
public int Cols { get; }
public int NumSetBits()
{
int count = 0;
foreach (ulong i in bits)
count += hammingWeight(i);
return count;
}
public bool this[int row, int col]
{
get
{
int n = row * Cols + col;
int i = n / 64;
int j = n % 64;
ulong m = 1ul << j;
return (bits[i] & m) != 0;
}
set
{
int n = row * Cols + col;
int i = n / 64;
int j = n % 64;
ulong m = 1ul << j;
if (value)
bits[i] |= m;
else
bits[i] &= ~m;
}
}
static int hammingWeight(ulong i)
{
i = i - ((i >> 1) & 0x5555555555555555UL);
i = (i & 0x3333333333333333UL) + ((i >> 2) & 0x3333333333333333UL);
return (int)(unchecked(((i + (i >> 4)) & 0xF0F0F0F0F0F0F0FUL) * 0x101010101010101UL) >> 56);
}
readonly ulong[] bits;
}
Observation: It turns out to be marginally faster to use for() rather than foreach in the loop in NumSetBits(), for example:
public int NumSetBits()
{
int count = 0;
for (var index = 0; index < bits.Length; index++)
count += hammingWeight(bits[index]);
return count;
}
On my PC this changes the performance from 120 times faster to 130 times faster.
Finally: If you want to take advantage of multithreading, you can do so like so (note the use of a Partitioner - this is to increase the block size of data calculated by each thread to make it a lot more efficient):
public int NumSetBits()
{
int count = 0;
var partitioner = Partitioner.Create(0, bits.Length);
Parallel.ForEach(partitioner, (range, loopState) =>
{
int subtotal = 0;
for (int i = range.Item1; i < range.Item2; ++i)
{
subtotal += hammingWeight(bits[i]);
}
Interlocked.Add(ref count, subtotal);
});
return count;
}
With this change, the Hamming approach is almost 200 times faster (and almost 300 times faster for a 2000x2000 matrix), but note that the amount by which it is faster depends on the proportion of 1 bits set.
What's the performance penalty that I can expect if I'm using Lists over Arrays to solve the Longest Increasing Subsequence?
Will the dynamic nature of Lists improve average performance because we're not dealing with sizes we won't actually use?
PS: Any tips on improving performance while still maintaining some readability?
public static int Run(int[] nums)
{
var length = nums.Length;
List<List<int>> candidates = new List<List<int>>();
candidates.Add(new List<int> { nums[0] });
for (int i = 1; i < length; i++)
{
var valueFromArray = nums[i];
var potentialReplacements = candidates.Where(t => t[t.Count-1] > valueFromArray);
foreach (var collection in potentialReplacements)
{
var collectionCount = collection.Count;
if ((collection.Count > 1 && collection[collectionCount - 2] < valueFromArray) || (collectionCount == 1))
{
collection.RemoveAt(collectionCount - 1);
collection.Add(valueFromArray);
}
}
if (!candidates.Any(t => t[t.Count - 1] >= valueFromArray))
{
var newList = new List<int>();
foreach(var value in candidates[candidates.Count - 1])
{
newList.Add(value);
}
newList.Add(nums[i]);
candidates.Add(newList);
}
}
return candidates[candidates.Count - 1].Count;
}
Depending on the solution the results may vary. Arrays are faster when compared with lists of the same size. How more fast? Lets take a look at the c# solution below. This is a simple O(n^2) solution. I coded a version with arrays only and another one with lists only. I'm running it 1000 times and recording the values for both. Then I just print the average improvement of the array version over the list version. I'm getting over 50% improvement on my computer.
Notice that this solution uses arrays and lists with the same sizes always. Than means I never created an array bigger than the size the lists are gonna grow to in the lists version. Once you start creating arrays with a Max size that may not be filled the comparison stops to be fair.
C# code below:
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace hashExample
{
class Program
{
static int RunArray(int[] array)
{
int[] dp = new int[array.Length];
dp[0] = 1;
for (int i = 1; i < array.Length; i++)
{
dp[i] = 1;
for (int j = 0; j < i; j++)
if (array[i] > array[j] && dp[i] < dp[j] + 1)
dp[i] = dp[j] + 1;
}
return dp.Max();
}
static int RunList(List<int> array)
{
List<int> dp = new List<int>(array.Count);
dp.Add(1);
for (int i = 1; i < array.Count; i++)
{
dp.Add(1);
for (int j = 0; j < i; j++)
if (array[i] > array[j] && dp[i] < dp[j] + 1)
dp[i] = dp[j] + 1;
}
return dp.Max();
}
static void Main(string[] args)
{
int arrayLen = 1000;
Random r = new Random();
List<double> values = new List<double>();
Stopwatch clock = new Stopwatch();
Console.WriteLine("Running...");
for (int i = 0; i < 100; i++)
{
List<int> list = new List<int>();
int[] array = new int[arrayLen];
for (int j = 0; j < arrayLen;j++)
{
int e = r.Next();
array[j] = e;
list.Add(e);
}
clock.Restart();
RunArray(array);
clock.Stop();
double timeArray = clock.ElapsedMilliseconds;
clock.Restart();
RunList(list);
clock.Stop();
double timeList = clock.ElapsedMilliseconds;
//Console.WriteLine(Math.Round(timeArray/timeList*100,2) + "%");
values.Add(timeArray / timeList);
}
Console.WriteLine("Arrays are " + Math.Round(values.Average()*100,1) + "% faster");
Console.WriteLine("Done");
}
}
}
Is the following 0-1 Knapsack problem solvable:
'float' positive values and
'float' weights (can be positive or negative)
'float' capacity of the knapsack > 0
I have on average < 10 items, so I'm thinking of using a brute force implementation. However, I was wondering if there is a better way of doing it.
This is a relatively simple binary program.
I'd suggest brute force with pruning. If at any time you exceed the allowable weight, you don't need to try combinations of additional items, you can discard the whole tree.
Oh wait, you have negative weights? Include all negative weights always, then proceed as above for the positive weights. Or do the negative weight items also have negative value?
Include all negative weight items with positive value. Exclude all items with positive weight and negative value.
For negative weight items with negative value, subtract their weight (increasing the knapsack capavity) and use a pseudo-item which represents not taking that item. The pseudo-item will have positive weight and value. Proceed by brute force with pruning.
class Knapsack
{
double bestValue;
bool[] bestItems;
double[] itemValues;
double[] itemWeights;
double weightLimit;
void SolveRecursive( bool[] chosen, int depth, double currentWeight, double currentValue, double remainingValue )
{
if (currentWeight > weightLimit) return;
if (currentValue + remainingValue < bestValue) return;
if (depth == chosen.Length) {
bestValue = currentValue;
System.Array.Copy(chosen, bestItems, chosen.Length);
return;
}
remainingValue -= itemValues[depth];
chosen[depth] = false;
SolveRecursive(chosen, depth+1, currentWeight, currentValue, remainingValue);
chosen[depth] = true;
currentWeight += itemWeights[depth];
currentValue += itemValues[depth];
SolveRecursive(chosen, depth+1, currentWeight, currentValue, remainingValue);
}
public bool[] Solve()
{
var chosen = new bool[itemWeights.Length];
bestItems = new bool[itemWeights.Length];
bestValue = 0.0;
double totalValue = 0.0;
foreach (var v in itemValues) totalValue += v;
SolveRecursive(chosen, 0, 0.0, 0.0, totalValue);
return bestItems;
}
}
Yeah, brute force it. This is an NP-Complete problem, but that shouldn't matter because you will have less than 10 items. Brute forcing won't be problematic.
var size = 10;
var capacity = 0;
var permutations = 1024;
var repeat = 10000;
// Generate items
float[] items = new float[size];
float[] weights = new float[size];
Random rand = new Random();
for (int i = 0; i < size; i++)
{
items[i] = (float)rand.NextDouble();
weights[i] = (float)rand.NextDouble();
if (rand.Next(2) == 1)
{
weights[i] *= -1;
}
}
// solution
int bestPosition= -1;
Stopwatch sw = new Stopwatch();
sw.Start();
// for perf testing
//for (int r = 0; r < repeat; r++)
{
var bestValue = 0d;
// solve
for (int i = 0; i < permutations; i++)
{
var total = 0d;
var weight = 0d;
for (int j = 0; j < size; j++)
{
if (((i >> j) & 1) == 1)
{
total += items[j];
weight += weights[j];
}
}
if (weight <= capacity && total > bestValue)
{
bestPosition = i;
bestValue = total;
}
}
}
sw.Stop();
sw.Elapsed.ToString();
If you can only have positive values then every item with a negative weight must go in.
Then I guess you could calculate Value/Weight Ratio, and brute force the remaining combinations based on that order, once you get one that fits you can skip the rest.
The problem may be that the grading and sorting is actually more expensive than just doing all the calculations.
There will obviously be a different breakeven point based on the size and distribution of the set.
public class KnapSackSolver {
public static void main(String[] args) {
int N = Integer.parseInt(args[0]); // number of items
int W = Integer.parseInt(args[1]); // maximum weight of knapsack
int[] profit = new int[N + 1];
int[] weight = new int[N + 1];
// generate random instance, items 1..N
for (int n = 1; n <= N; n++) {
profit[n] = (int) (Math.random() * 1000);
weight[n] = (int) (Math.random() * W);
}
// opt[n][w] = max profit of packing items 1..n with weight limit w
// sol[n][w] = does opt solution to pack items 1..n with weight limit w
// include item n?
int[][] opt = new int[N + 1][W + 1];
boolean[][] sol = new boolean[N + 1][W + 1];
for (int n = 1; n <= N; n++) {
for (int w = 1; w <= W; w++) {
// don't take item n
int option1 = opt[n - 1][w];
// take item n
int option2 = Integer.MIN_VALUE;
if (weight[n] <= w)
option2 = profit[n] + opt[n - 1][w - weight[n]];
// select better of two options
opt[n][w] = Math.max(option1, option2);
sol[n][w] = (option2 > option1);
}
}
// determine which items to take
boolean[] take = new boolean[N + 1];
for (int n = N, w = W; n > 0; n--) {
if (sol[n][w]) {
take[n] = true;
w = w - weight[n];
} else {
take[n] = false;
}
}
// print results
System.out.println("item" + "\t" + "profit" + "\t" + "weight" + "\t"
+ "take");
for (int n = 1; n <= N; n++) {
System.out.println(n + "\t" + profit[n] + "\t" + weight[n] + "\t"
+ take[n]);
}
}
}
import java.util.*;
class Main{
static int max(inta,int b)
{
if(a>b)
return a;
else
return b;
}
public static void main(String args[])
{
int n,i,cap,j,t=2,w;
Scanner sc=new Scanner(System.in);
System.out.println("Enter the number of values ");
n=sc.nextInt();
int solution[]=new int[n];
System.out.println("Enter the capacity of the knapsack :- ");
cap=sc.nextInt();
int v[]=new int[n+1];
int wt[]=new int[n+1];
System.out.println("Enter the values ");
for(i=1;i<=n;i++)
{
v[i]=sc.nextInt();
}
System.out.println("Enter the weights ");
for(i=1;i<=n;i++)
{
wt[i]=sc.nextInt();
}
int knapsack[][]=new int[n+2][cap+1];
for(i=1;i<n+2;i++)
{
for(j=1;j<n+1;j++)
{
knapsack[i][j]=0;
}
}
/*for(i=1;i<n+2;i++)
{
for(j=wt[1]+1;j<cap+2;j++)
{
knapsack[i][j]=v[1];
}
}*/
int k;
for(i=1;i<n+1;i++)
{
for(j=1;j<cap+1;j++)
{
/*if(i==1||j==1)
{
knapsack[i][j]=0;
}*/
if(wt[i]>j)
{
knapsack[i][j]=knapsack[i-1][j];
}
else
{
knapsack[i][j]=max(knapsack[i-1][j],v[i]+knapsack[i-1][j-wt[i]]);
}
}
}
//for displaying the knapsack
for(i=0;i<n+1;i++)
{
for(j=0;j<cap+1;j++)
{
System.out.print(knapsack[i][j]+" ");
}
System.out.print("\n");
}
w=cap;k=n-1;
j=cap;
for(i=n;i>0;i--)
{
if(knapsack[i][j]!=knapsack[i-1][j])
{
j=w-wt[i];
w=j;
solution[k]=1;
System.out.println("k="+k);
k--;
}
else
{
solution[k]=0;
k--;
}
}
System.out.println("Solution for given knapsack is :- ");
for(i=0;i<n;i++)
{
System.out.print(solution[i]+", ");
}
System.out.print(" => "+knapsack[n][cap]);
}
}
This can be solved using Dynamic Programming. Below code can help you solve the 0/1 Knapsack problem using Dynamic Programming.
internal class knapsackProblem
{
private int[] weight;
private int[] profit;
private int capacity;
private int itemCount;
private int[,] data;
internal void GetMaxProfit()
{
ItemDetails();
data = new int[itemCount, capacity + 1];
for (int i = 1; i < itemCount; i++)
{
for (int j = 1; j < capacity + 1; j++)
{
int q = j - weight[i] >= 0 ? data[i - 1, j - weight[i]] + profit[i] : 0;
if (data[i - 1, j] > q)
{
data[i, j] = data[i - 1, j];
}
else
{
data[i, j] = q;
}
}
}
Console.WriteLine($"\nMax profit can be made : {data[itemCount-1, capacity]}");
IncludedItems();
}
private void ItemDetails()
{
Console.Write("\nEnter the count of items to be inserted : ");
itemCount = Convert.ToInt32(Console.ReadLine()) + 1;
Console.WriteLine();
weight = new int[itemCount];
profit = new int[itemCount];
for (int i = 1; i < itemCount; i++)
{
Console.Write($"Enter weight of item {i} : ");
weight[i] = Convert.ToInt32(Console.ReadLine());
Console.Write($"Enter the profit on the item {i} : ");
profit[i] = Convert.ToInt32(Console.ReadLine());
Console.WriteLine();
}
Console.Write("\nEnter the capacity of the knapsack : ");
capacity = Convert.ToInt32(Console.ReadLine());
}
private void IncludedItems()
{
int i = itemCount - 1;
int j = capacity;
while(i > 0)
{
if(data[i, j] == data[i - 1, j])
{
Console.WriteLine($"Item {i} : Not included");
i--;
}
else
{
Console.WriteLine($"Item {i} : Included");
j = j - weight[i];
i--;
}
}
}
}
I need to optimise code that counts pos/neg values and remove non-qualified values by time.
I have queue of values with time-stamp attached.
I need to discard values which are 1ms old and count negative and positive values. here is pseudo code
list<val> l;
v = q.dequeue();
deleteold(l, v.time);
l.add(v);
negcount = l.count(i => i.value < 0);
poscount = l.count(i => i.value >= 0);
if(negcount == 10) return -1;
if(poscount == 10) return 1;
I need this code in c# working with max speed. No need to stick to the List. In fact arrays separated for neg and pos values are welcome.
edit: probably unsafe arrays will be the best. any hints?
EDIT: thanks for the heads up.. i quickly tested array version vs list (which i already have) and the list is faster: 35 vs 16 ms for 1 mil iterations...
Here is the code for fairness sake:
class Program
{
static int LEN = 10;
static int LEN1 = 9;
static void Main(string[] args)
{
Var[] data = GenerateData();
Stopwatch sw = new Stopwatch();
for (int i = 0; i < 30; i++)
{
sw.Reset();
ArraysMethod(data, sw);
Console.Write("Array: {0:0.0000}ms ", sw.ElapsedTicks / 10000.0);
sw.Reset();
ListMethod(data, sw);
Console.WriteLine("List: {0:0.0000}ms", sw.ElapsedTicks / 10000.0);
}
Console.ReadLine();
}
private static void ArraysMethod(Var[] data, Stopwatch sw)
{
int signal = 0;
int ni = 0, pi = 0;
Var[] n = new Var[LEN];
Var[] p = new Var[LEN];
for (int i = 0; i < LEN; i++)
{
n[i] = new Var();
p[i] = new Var();
}
sw.Start();
for (int i = 0; i < DATALEN; i++)
{
Var v = data[i];
if (v.val < 0)
{
int x = 0;
ni = 0;
// time is not sequential
for (int j = 0; j < LEN; j++)
{
long diff = v.time - n[j].time;
if (diff < 0)
diff = 0;
// too old
if (diff > 10000)
x = j;
else
ni++;
}
n[x] = v;
if (ni >= LEN1)
signal = -1;
}
else
{
int x = 0;
pi = 0;
// time is not sequential
for (int j = 0; j < LEN; j++)
{
long diff = v.time - p[j].time;
if (diff < 0)
diff = 0;
// too old
if (diff > 10000)
x = j;
else
pi++;
}
p[x] = v;
if (pi >= LEN1)
signal = 1;
}
}
sw.Stop();
}
private static void ListMethod(Var[] data, Stopwatch sw)
{
int signal = 0;
List<Var> d = new List<Var>();
sw.Start();
for (int i = 0; i < DATALEN; i++)
{
Var v = data[i];
d.Add(new Var() { time = v.time, val = v.val < 0 ? -1 : 1 });
// delete expired
for (int j = 0; j < d.Count; j++)
{
if (v.time - d[j].time < 10000)
d.RemoveAt(j--);
else
break;
}
int cnt = 0;
int k = d.Count;
for (int j = 0; j < k; j++)
{
cnt += d[j].val;
}
if ((cnt >= 0 ? cnt : -cnt) >= LEN)
signal = 9;
}
sw.Stop();
}
static int DATALEN = 1000000;
private static Var[] GenerateData()
{
Random r = new Random(DateTime.Now.Millisecond);
Var[] data = new Var[DATALEN];
Var prev = new Var() { val = 0, time = DateTime.Now.TimeOfDay.Ticks};
for (int i = 0; i < DATALEN; i++)
{
int x = r.Next(20);
data[i] = new Var() { val = x - 10, time = prev.time + x * 1000 };
}
return data;
}
class Var
{
public int val;
public long time;
}
}
To get negcount and poscount, you are traversing the entire list twice.
Instead, traverse it once (to compute negcount), and then poscount = l.Count - negcount.
Some ideas:
Only count until max(negcount,poscount) becomes 10, then quit (no need to count the rest). Only works if 10 is the maximum count.
Count negative and positive items in 1 go.
Calculate only negcount and infer poscount from count-negcount which is easier to do than counting them both.
Whether any of them are faster than what you have now, and which is fastest, depends among other things on what the data typically looks like. Is it long? Short?
Some more about 3:
You can use trickery to avoid branches here. You don't have to test whether the item is negative, you can add its negativity to a counter. Supposing the item is x and it is an int, x >> 31 is 0 for positive x and -1 for negative x. So counter -= x >> 31 will give negcount.
Edit: unsafe arrays can be faster, but shouldn't be in this case, because the loop would be of the form
for (int i = 0; i < array.Length; i++)
do something with array[i];
Which is optimized by the JIT compiler.