I'm trying to implement a parallel Gauss-Seidel algorithm. Here is my kernel function
#define dx (float2)(1,0)
#define dy (float2)(0,1)
__kernel void rbgs(read_only image2d_t fh, read_only image2d_t vh,
global float *vvh, __global float* hx, __global float* hy,__global int* red)
{
float2 id = (float2)(get_global_id(0) << 1,get_global_id(1));
const float2 sz = (float2)(1,get_global_size(0));
const sampler_t sampler = CLK_ADDRESS_CLAMP_TO_EDGE;
id.x += ((int)(id.y + *red) & 1);
const float s = ((read_imagef(vh,sampler,id-dx).x + read_imagef(vh,sampler,id+dx).x)/(*hy* *hy) + (read_imagef(vh,sampler,id-dy).x +
read_imagef(vh,sampler,id+dy).x)/(*hx* *hx) - read_imagef(fh, sampler,id).x)/(2/(*hx**hx)+2/(*hy**hy));
vvh[(int)dot(id,sz)] = s;
}
It does compile good, but I have problems with passing an Image to kernel
Here's what I try
var rows = u.GetLength(0);
var cols = u.GetLength(1);
var array1d = new float[rows * cols];
var current = 0;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
array1d[current++] = u[i, j];
}
}
rows = fh.GetLength(0);
cols = fh.GetLength(1);
var array1df= new float[rows * cols];
current = 0;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
array1df[current++] = fh[i, j];
}
}
CLCalc.Program.Image2D CLfh = new CLCalc.Program.Image2D(array1df, M, N);
CLCalc.Program.Image2D CLvh = new CLCalc.Program.Image2D(array1d, M, N);
CLfh.WriteToDevice(array1df);
CLvh.WriteToDevice(array1d);
float[] solution = new float[(N+1)*(M+1)];
CLCalc.Program.Variable Stepx = new CLCalc.Program.Variable(new float[] { hx });
CLCalc.Program.Variable Stepy = new CLCalc.Program.Variable(new float[] { hy });
CLCalc.Program.Variable Red = new CLCalc.Program.Variable(new float[] { 1.0f });
CLCalc.Program.Variable Result = new CLCalc.Program.Variable(solution);
CLCalc.Program.MemoryObject[] args = new CLCalc.Program.MemoryObject[] {CLfh, CLvh, Result, Stepx, Stepy, Red} ;
gs.Execute(args, new int[] { 2, 4 });
Result.ReadFromDeviceTo(solution);
CLvh.WriteToDevice(solution);
Red.WriteToDevice(new float[] { 0.0f });
gs.Execute(args, new int[] { 2, 4 });
Result.ReadFromDeviceTo(solution);
for (int m = 0; m < (M + 1) * (N + 1); m++)
u[m / (N+1), m % (N+1)] = solution[m];
return u;
The program fails with Runtime Exception: Vector Length should be 4*width*height. I know that exception was supposed to be thrown because an Image stores data in RGBA format but I do not really understand how I should overcome this problem.
Any help would be appreciated.
Related
This is my first question and I'm relatively new to C#. (Excuse my bad English)
I'm writing a template matching algorithm in C# using the .NET Framework with a WindowsForms Application in Visual Studio. Sadly the new indices and ranges functions from C#8.0, especially the range operator (..), didn't get implemented in the .NET Framework. I know there is a workaround for this as you can see in this thread but it is not supported by Microsoft. So I'm searching for another way to parallelize my elementwise 2dArray (matrix) calculations to make my program faster.
In my program, I'm calculating the differential square (ds) of an area (with the size of my template) inside a 2dArray (my image) and a 2dArray (my template). These values a written to a new 2dAary (DS) which holds all differential squares in the corresponding positions to the image. I can search the indices of DS where the differential square is minimal which is equal to the matching position (highest correspondence between template and image) of the template inside the image.
In python the calculation of DS is very quick using the index range operator (:) and will look like this:
H,W = I.shape # read out Height H & Width W from Image I
h,w = T.shape # read out Height h & Width w from Template T
for i in range(H-h+1):
for j in range(W-w+1):
DS[i,j] = np.sum((I[i:i+h,j:j+w] - T)**2)
But in C# I have to make the calculation of DS elementwise therefore it looks like this and takes for ever:
int Tw = template.Width;
int Th = template.Height;
int Iw = image.Width;
int Ih = image.Height;
int d = 0;
int[,] ds = new int[Tw, Th];
int[,] DS = new int[Iw - Tw + 1, Ih - Th + 1];
for (int y = 0; y < Ih - Th + 1; y++)
{
for (int x = 0; x < Iw - Tw + 1; x++)
{
for (int yT = 0; yT < Th; yT++)
{
for (int xT = 0; xT < Tw; xT++)
{
d = I[x + xT, y + yT] - T[xT, yT];
ds[xt, yt] = d * d;
}
}
int sum = ds.Cast<int>().Sum();
DS[x, y] = sum;
}
}
I know that I could use threads but that would be a little complex for me.
Or maybe I could use CUDA with my Nvidia GPU to speed things up a little.
But I am asking you and myself is there another way to parallelize (optimize) my elementwise 2dArray calculations?
I look forward to any help.
Many thanks in advance!!!
EDIT:
Here I have a working example of my code for a .NET Framework Console App. As you can see I make a lot of elementwise 2d and 3d Array calculations which I would like to process in parallel (or perform them faster in any other way):
using System;
using System.Drawing;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace TemplateMatcher_Console
{
class Program
{
public static int[,,] bitmapToMatrix(Bitmap bmp)
{
int[,,] I = new int[bmp.Width, bmp.Height, 3];
for (int y = 0; y < bmp.Height; y++)
{
for (int x = 0; x < bmp.Width; x++)
{
Color pix = bmp.GetPixel(x, y);
I[x, y, 0] = Convert.ToInt32(pix.R);
I[x, y, 1] = Convert.ToInt32(pix.G);
I[x, y, 2] = Convert.ToInt32(pix.B);
}
}
return I;
}
public static int[] indexOfMiniumValue(int[,] matrix)
{
int value = 0;
int minValue = 999999999;
int minFirstIndex = 0;
int minSecondIndex = 0;
int[] ij = new int[2];
for (int i = 0; i < matrix.GetLength(0); i++)
{
for (int j = 0; j < matrix.GetLength(1); j++)
{
value = matrix[i, j];
if (value < minValue)
{
minValue = value;
minFirstIndex = i;
minSecondIndex = j;
}
}
}
ij[0] = minFirstIndex;
ij[1] = minSecondIndex;
return ij;
}
public static void Print2DArray<T>(T[,] matrix)
{
for (int i = 0; i < matrix.GetLength(0); i++)
{
for (int j = 0; j < matrix.GetLength(1); j++)
{
Console.Write(matrix[i, j] + "\t");
}
Console.WriteLine();
}
}
static void Main(string[] args)
{
// Deklaration & Eingabe
Console.WriteLine("Type the filepath for your image and then press Enter");
string im = Console.ReadLine();
Console.WriteLine("\nType the filepath for your template and then press Enter");
string temp = Console.ReadLine();
Bitmap template = new Bitmap(#temp);
Bitmap image = new Bitmap(#im);
int Tw = template.Width;
int Th = template.Height;
int Iw = image.Width;
int Ih = image.Height;
int[,] ds = new int[Tw, Th];
int[,] DS = new int[Iw - Tw + 1, Ih - Th + 1];
int[,,] DS_rgb = new int[Iw - Tw + 1, Ih - Th + 1, 3];
int[] xy = new int[2];
// Verarbeitung
// int[,,] I = Array.ConvertAll(image_I, new Converter<byte, int>(Convert.ToInt32));
int[,,] I = bitmapToMatrix(image);
int[,,] T = bitmapToMatrix(template);
for (int rgb = 0; rgb < 3; rgb++)
{
for (int y = 0; y < Ih - Th + 1; y++)
{
for (int x = 0; x < Iw - Tw + 1; x++)
{
//DS_rgb[x, y, rgb] = (I[x .. x + template.Width, y .. y + template.Height, rgb] - T[0 .. template.Width, 0 .. template.Height, rgb]);
for (int yT = 0; yT < Th; yT++)
{
for (int xT = 0; xT < Tw; xT++)
{
ds[xT, yT] = (I[x + xT, y + yT, rgb] - T[xT, yT, rgb]) * (I[x + xT, y + yT, rgb] - T[xT, yT, rgb]);
}
}
int sum = ds.Cast<int>().Sum();
DS_rgb[x, y, rgb] = sum;
}
}
}
//DS[.., ..] = DS_rgb[.., .., 0] + DS_rgb[.., .., 1] + DS_rgb[.., .., 2];
for (int y = 0; y < Ih - Th + 1; y++)
{
for (int x = 0; x < Iw - Tw + 1; x++)
{
DS[x, y] = DS_rgb[x, y, 0] + DS_rgb[x, y, 1] + DS_rgb[x, y, 2];
}
}
//xy = DS.FindIndex(z => z == Math.Min(DS));
xy = indexOfMiniumValue(DS);
// Ausgabe
// Ausgeben der Matrix DS
/*
Console.WriteLine("\nMatrix with all differtial squares:");
Print2DArray(DS);
*/
Console.WriteLine($"\nPosition of your template in your image (upper left corner): ({xy[0]}, {xy[1]})");
Console.Write("\nPress any key to close the TemplateMatcher console app...");
Console.ReadKey();
}
}
}
I am trying to perform Cross Correlation on these 2 images in C#:
Image, Template
Matlab says the result is supposed to look like: Matlab result
But this is my result: My result
Here is my Cross Correlation function:
public static Signal2D CrossCorrelation2D(Signal2D signal, Signal2D pulse) {
return InverseFFT2D(FFT2D(signal) * FFT2D(pulse).GetConjugate());
}
Here is my FFT2D:
public static Signal2D FFT2D(Signal2D signal) {
Signal2D result = new Signal2D(signal.Height, signal.Width);
for (int i = 0; i < result.Height; i++)
result[i] = new ComplexNumber[signal[i].Length];
//rows
for (int n = 0; n < signal.Height; n++) {
result[n] = FFT(signal[n]);
}
//columns
for (int i = 0; i < signal[0].Length; i++) {
ComplexNumber[] col = new ComplexNumber[signal.Height];
for (int j = 0; j < col.Length; j++) {
col[j] = result[j][i];
}
col = FFT(col);
for (int j = 0; j < col.Length; j++) {
result[j][i] = col[j];
}
}
return result;
}
Here is my FFT:
public static Signal FFT(Signal signal) {
int N = signal.Length;
if (N == 1)
return signal;
if ((N & (N - 1)) != 0)
throw new ArgumentOutOfRangeException("signal length must be a power of 2");
Signal evenArr = new Signal(N / 2);
Signal oddArr = new Signal(N / 2);
for (int i = 0; i < N / 2; i++) {
evenArr[i] = signal[2 * i];
}
evenArr = FFT(evenArr);
for (int i = 0; i < N / 2; i++) {
oddArr[i] = signal[2 * i + 1];
}
oddArr = FFT(oddArr);
Signal result = new Signal(N);
for (int k = 0; k < N / 2; k++) {
double w = -2.0 * k * Math.PI / N;
ComplexNumber wk = new ComplexNumber(Math.Cos(w), Math.Sin(w));
ComplexNumber even = evenArr[k];
ComplexNumber odd = oddArr[k];
result[k] = even + (wk * odd);
result[k + N / 2] = even - (wk * odd);
}
return result;
}
Here is my Signal multiplication (using pointwise multiplication):
public static Signal2D operator* (Signal2D a, Signal2D b) {
if (a.Height != b.Height || a.Width != b.Width)
throw new ArgumentException("Sizes must be equal");
Signal2D result = new Signal2D(a.Height, a.Width);
for (int y = 0; y < a.Height; y++) {
for (int x = 0; x < a.Width; x++) {
result[y][x] = a[y][x] * b[y][x];
}
}
return result;
}
Any help is appreciated, thanks.
Edit: I left the matlab image at the original size of 1023 by 1023 and overlayed my result. It looks like I may already be at the result, I am just not sure how Matlab pads the image. Overlayed results (The red is the white part from my result, the grey is the black part from my result. Black/white is from Matlab)
I am calculating the average of the RGB channels of images in C# and matlab and getting slightly different result?? (am using 0-255 pixel values...)
The difference is not large but I just can't seem to understand the reason...
Is this common?? Or is it due to bitmap implementation of image?? Or precision issue?? Or does it mean their is something wrong with my code??
Code:
Matlab
I = imread('Photos\hv2512.jpg');
Ir=double(I(:,:,1));
Ig=double(I(:,:,2));
Ib=double(I(:,:,3));
avRed=mean2(Ir)
avGn=mean2(Ig)
avBl=mean2(Ib)
C#
Bitmap bmp= new Bitmap(open.FileName)
double[,] Red = new double[bmp.Width, bmp.Height];
double[,] Green = new double[bmp.Width, bmp.Height];
double[,] Blue = new double[bmp.Width, bmp.Height];
int PixelSize = 3;
BitmapData bmData = null;
if (Safe)
{
Color c;
for (int j = 0; j < bmp.Height; j++)
{
for (int i = 0; i < bmp.Width; i++)
{
c = bmp.GetPixel(i, j);
Red[i, j] = (double) c.R;
Green[i, j] = (double) c.G;
Blue[i, j] = (double) c.B;
}
}
}
double avRed = 0, avGrn = 0, avBlue = 0;
double sumRed = 0, sumGrn = 0, sumBlue = 0;
int cnt = 0;
for (int rws = 0; rws < Red.GetLength(0); rws++)
for (int clms = 0; clms < Red.GetLength(1); clms++)
{
sumRed = sumRed + Red[rws, clms];
sumGrn = sumGrn + Green[rws, clms];
sumBlue = sumBlue + Blue[rws, clms];
cnt++;
}
avRed = sumRed / cnt;
avGrn = sumGrn / cnt;
avBlue = sumBlue / cnt;
This is the image I am using
I'm attempting to make a Neural Network in C#, I based the design in a python code I made a while back. But somehow the end result is not the same.
I'm new to C# and I'm using it in Unity, so I have limitation to library uses.
In python numpy can do matrix multiplications with the numpy.dot() method. I Haven't found something similar in C#, especially in Unity. So I had to do it by hand.
The Python code:
import numpy as np
class NN:
def __init__(self, n_input, n_hidden_layers, n_hidden_nodes, n_output):
self.weights_hidden = []
for n in range(n_hidden_layers + 1):
if n == 0:
size = n_input, n_hidden_nodes
elif n == n_hidden_layers:
size = n_hidden_nodes, n_output
else:
size = n_hidden_nodes, n_hidden_nodes
self.weights_hidden.append(
np.random.random(size)
)
#staticmethod
def activation(x):
return np.tanh(x)
def feed_forward(self, ip):
input_values = (ip - np.mean(ip, axis=0)) / np.std(ip, axis=0)
for w, weights in enumerate(self.weights_hidden):
if w == 0:
result = input_values
result = np.array(
map(self.activation, result.dot(weights))
)
return result
ANN = NN(n_input=5, n_hidden_layers=2, n_hidden_nodes=3, n_output=1)
print ANN.feed_forward([1, 2, 3, 4, 5])
My attempt to convert it to C#.
using UnityEngine;
using System.Collections;
public class neural_net : MonoBehaviour {
int n_inputs;
int n_hidden_layers;
int n_hidden_nodes;
int n_outputs;
float[] inputs;
ArrayList hidden_weights;
ArrayList hidden_results;
float[] output_results;
public void init(int n_inputs, int n_hidden_layers, int n_hidden_nodes, int n_outputs){
this.n_inputs = n_inputs;
this.n_hidden_layers = n_hidden_layers;
this.n_hidden_nodes = n_hidden_nodes;
this.n_outputs = n_outputs;
this.hidden_weights = new ArrayList ();
this.hidden_results = new ArrayList ();
this.output_results = new float[n_outputs];
int rows;
int columns;
for (int h = 0; h < n_hidden_layers + 2; h++) {
if (h == 0){
// input -> hidden
rows = n_inputs;
columns = n_hidden_nodes;
}
else if(h == n_hidden_layers + 1){
// hidden -> output
rows = n_hidden_nodes;
columns = n_outputs;
}
else {
// hidden -> hidden
rows = n_hidden_nodes;
columns = n_hidden_nodes;
}
float[] hidden_result = new float[rows*columns];
hidden_results.Add(hidden_results);
float[,] target = new float[rows,columns];
string test = "";
for(int r = 0; r < rows; r++){
for(int c = 0; c < columns; c++){
target[r,c] = Random.Range(0.0f, 1.0f);
test += target[r,c] + ", ";
}
}
hidden_weights.Add(target);
}
}
float activation(float x){
// tanh(x);
return (1 - Mathf.Exp (-2 * x)) / (1 + Mathf.Exp (-2 * x));
}
float[] _dot_matrix(float[] results, float[,] weights){
float[] new_matrix = new float[weights.GetLength(1)];
string t0 = "";
for (int r = 0; r < weights.GetLength(1); r++){
float res = 0;
for (int c = 0; c < weights.GetLength(0); c++) {
res += results[c] * weights[c,r];
}
new_matrix[r] = res;
}
return new_matrix;
}
float[] _map_activation(float[] pre_results){
float[] results = new float[pre_results.Length];
for (int i = 0; i < results.Length; i++) {
results[i] = activation(pre_results[i]);
}
return results;
}
float[] feed_forward(){
int h;
for (h = 0; h < n_hidden_layers + 2; h++) {
float[] dot_matrix_result;
if(h == 0){
dot_matrix_result = _dot_matrix(inputs, (float[,])hidden_weights[h]);
}
else if (h == n_hidden_layers +1){
dot_matrix_result = _dot_matrix((float[])hidden_results[h-1], (float[,])hidden_weights[h]);
output_results = _map_activation(dot_matrix_result);
break;
}
else {
dot_matrix_result = _dot_matrix((float[])hidden_results[h-1], (float[,])hidden_weights[h]);
}
float[] result = _map_activation(dot_matrix_result);
hidden_results[h] = _map_activation(result);
}
return output_results;
}
float[] normalize_input(float[] inputs){
float sum = 0.0f;
for (int i = 0; i < inputs.Length; i++) {
sum += inputs[i] ;
}
float average = sum / inputs.Length;
float[] deviations = new float[inputs.Length];
for (int i = 0; i < inputs.Length; i++) {
deviations[i] = Mathf.Pow(inputs[i] - average,2);
}
float sum_deviation = 0;
for (int i = 0; i < deviations.Length; i++) {
sum_deviation += deviations[i];
}
float variance = sum_deviation / deviations.Length;
float std = Mathf.Sqrt (variance);
for (int i = 0; i < inputs.Length; i++) {
inputs[i] = (inputs[i] - average)/std;
}
return inputs;
}
public void start_net(float[] inputs){
this.inputs = normalize_input(inputs);
feed_forward ();
}
}
I run the net from other script using the init method and then the start_net() method.
I made a test with not random weights and fixed input data, but it didn't came to the same result as the python code.
What's wrong with the C# code?
at the moment im trying to implement a FIR lowpass filter on a wave file. The FIR coefficients where obtained using MATLAB using a 40 order. Now i need to implement the FIR algorithm in C# and im finding it difficult to implement it.
Any help?
Thanks
How about this:
private static double[] FIR(double[] b, double[] x)
{
int M = b.Length;
int n = x.Length;
//y[n]=b0x[n]+b1x[n-1]+....bmx[n-M]
var y = new double[n];
for (int yi = 0; yi < n; yi++)
{
double t = 0.0;
for (int bi = M-1; bi >=0; bi--)
{
if (yi - bi < 0) continue;
t += b[bi] * x[yi - bi];
}
y[yi] = t;
}
return y;
}
Try this. Does it help?
static void Main()
{
var bb = new List<double> { 1, 2, 3, 4 };
var xx = new List<double> { 3, 3, 4, 5 };
var yy = func_FIR(bb, xx);
for (int i = 0; i < yy.Count; i++)
{
Console.WriteLine("y[{0}] = {1}",i,yy[i]);
}
}
public static List<double> func_FIR(List<double> b, List<double> x)
{
//y[n]=b0x[n]+b1x[n-1]+....bmx[n-M]
var y = new List<double>();
int M = b.Count;
int n = x.Count;
double t = 0.0;
for (int j = 0; j < n; j++)
{
for (int i = 0; i < M; i++)
{
t += b[i] * x[n - i-1];
}
y.Add(t);
}
return y;
}