I am trying to migrate a code written in C# to C for better performance and to be able to use it in another software as it supports C based DLL.
I have this function in C# which performs as expected
private byte[] authDataExtract(byte[] concatenatedData)
{
try
{
byte[] authData = null;
authData = new byte[concatenatedData.Length - 10];
int blockCount = 0;
while (true)
{
if (concatenatedData.Length - 10 - blockCount < 4)
break;
else if (concatenatedData.Length - 10 - blockCount >= 4)
{
if ((isAllZero(concatenatedData, blockCount) == true) || (isAllff(concatenatedData, blockCount) == true)) //Modified to handle 0xFF padding - Sudhanwa
break;
int dlc = int.Parse(concatenatedData[blockCount + 3].ToString("X2"), System.Globalization.NumberStyles.HexNumber); //Modified to handle exceptiion in case of Padding CR - Sudhanwa
//int dlc = int.Parse(bytetostring(concatenatedData[blockCount + 3]));
if ((dlc > concatenatedData.Length - 10 - blockCount))
{
authData = new byte[concatenatedData.Length - 10];
Buffer.BlockCopy(concatenatedData, 0, authData, 0, concatenatedData.Length - 10);
blockCount = concatenatedData.Length - 10;
break;
}
authData = new byte[blockCount + 4 + dlc];
Buffer.BlockCopy(concatenatedData, 0, authData, 0, blockCount + 4 + dlc);
blockCount += dlc + 4;
}
}
return authData;
}
catch (Exception)
{
throw;
}
}
I want to write equivalent C code for this
My current C code is
void authDataExtract(unsigned char payload [],unsigned int size_payload,unsigned char * arr)
{
//unsigned char rec_tMAC [8];
int blockcount=0;
int dlc=0;
//unsigned char* arr= NULL;
//memcpy(&rec_tMAC[0],&payload[size_payload-8],8);
//printArr(rec_tMAC,8);
while (1)
{
if (size_payload- 10 - blockcount < 4)
break;
else if (size_payload - 10 - blockcount >= 4)
{
if ((isAllZero(payload,size_payload,blockcount) == true) ||
(isAllff(payload,size_payload, blockcount) == true))
break;
dlc= payload[blockcount + 3];
if ((dlc > size_payload - 10 - blockcount))
{
arr = (unsigned char*)calloc(size_payload-10,sizeof(unsigned char));
memcpy(arr,payload,size_payload-10);
blockcount = size_payload - 10;
break;
}
arr = (unsigned char*)calloc(blockcount + 4 + dlc,sizeof(unsigned char));
memcpy(arr,payload,blockcount + 4 + dlc);
blockcount += dlc + 4;
}
}
}
But it is giving exceptions with pointer .I believe I have an issue
with dynamic memory allocation.
Assuming the logic inc C# code is correct ,request your help to have exact same logic with C function.
Thanks in advance.
Do you see that C# function is returning byte[]
private byte[] authDataExtract(byte[] concatenatedData)
But C function is not.
void authDataExtract(unsigned char payload [],unsigned int size_payload,unsigned char * arr)
Note that arr is new variable and it is local to authDataExtract
function. It has no effect on the caller function.
Try as below.
unsigned char* authDataExtract(unsigned char payload [],unsigned int size_payload,unsigned char * arr) {
while(1) {
...
}
return arr;
}
from main
unsigned char *p = authDataExtract(….);
if (!p) error;
You could also use pointer to pointer but I leave that to you.
I am trying to port AES GCM implementation in python OpenTLS project, to C# (.Net). Below is the code in OpenTLS code:
#######################
### Galois Counter Mode
#######################
class AES_GCM:
def __init__(self, keys, key_size, hash):
key_size //= 8
hash_size = hash.digest_size
self.client_AES_key = keys[0 : key_size]
self.server_AES_key = keys[key_size : 2*key_size]
self.client_IV = keys[2*key_size : 2*key_size+4]
self.server_IV = keys[2*key_size+4 : 2*key_size+8]
self.H_client = bytes_to_int(AES.new(self.client_AES_key, AES.MODE_ECB).encrypt('\x00'*16))
self.H_server = bytes_to_int(AES.new(self.server_AES_key, AES.MODE_ECB).encrypt('\x00'*16))
def GF_mult(self, x, y):
product = 0
for i in range(127, -1, -1):
product ^= x * ((y >> i) & 1)
x = (x >> 1) ^ ((x & 1) * 0xE1000000000000000000000000000000)
return product
def H_mult(self, H, val):
product = 0
for i in range(16):
product ^= self.GF_mult(H, (val & 0xFF) << (8 * i))
val >>= 8
return product
def GHASH(self, H, A, C):
C_len = len(C)
A_padded = bytes_to_int(A + b'\x00' * (16 - len(A) % 16))
if C_len % 16 != 0:
C += b'\x00' * (16 - C_len % 16)
tag = self.H_mult(H, A_padded)
for i in range(0, len(C) // 16):
tag ^= bytes_to_int(C[i*16:i*16+16])
tag = self.H_mult(H, tag)
tag ^= bytes_to_int(nb_to_n_bytes(8*len(A), 8) + nb_to_n_bytes(8*C_len, 8))
tag = self.H_mult(H, tag)
return tag
def decrypt(self, ciphertext, seq_num, content_type, debug=False):
iv = self.server_IV + ciphertext[0:8]
counter = Counter.new(nbits=32, prefix=iv, initial_value=2, allow_wraparound=False)
cipher = AES.new(self.server_AES_key, AES.MODE_CTR, counter=counter)
plaintext = cipher.decrypt(ciphertext[8:-16])
# Computing the tag is actually pretty time consuming
if debug:
auth_data = nb_to_n_bytes(seq_num, 8) + nb_to_n_bytes(content_type, 1) + TLS_VERSION + nb_to_n_bytes(len(ciphertext)-8-16, 2)
auth_tag = self.GHASH(self.H_server, auth_data, ciphertext[8:-16])
auth_tag ^= bytes_to_int(AES.new(self.server_AES_key, AES.MODE_ECB).encrypt(iv + '\x00'*3 + '\x01'))
auth_tag = nb_to_bytes(auth_tag)
print('Auth tag (from server): ' + bytes_to_hex(ciphertext[-16:]))
print('Auth tag (from client): ' + bytes_to_hex(auth_tag))
return plaintext
def encrypt(self, plaintext, seq_num, content_type):
iv = self.client_IV + os.urandom(8)
# Encrypts the plaintext
plaintext_size = len(plaintext)
counter = Counter.new(nbits=32, prefix=iv, initial_value=2, allow_wraparound=False)
cipher = AES.new(self.client_AES_key, AES.MODE_CTR, counter=counter)
ciphertext = cipher.encrypt(plaintext)
# Compute the Authentication Tag
auth_data = nb_to_n_bytes(seq_num, 8) + nb_to_n_bytes(content_type, 1) + TLS_VERSION + nb_to_n_bytes(plaintext_size, 2)
auth_tag = self.GHASH(self.H_client, auth_data, ciphertext)
auth_tag ^= bytes_to_int(AES.new(self.client_AES_key, AES.MODE_ECB).encrypt(iv + b'\x00'*3 + b'\x01'))
auth_tag = nb_to_bytes(auth_tag)
# print('Auth key: ' + bytes_to_hex(nb_to_bytes(self.H)))
# print('IV: ' + bytes_to_hex(iv))
# print('Key: ' + bytes_to_hex(self.client_AES_key))
# print('Plaintext: ' + bytes_to_hex(plaintext))
# print('Ciphertext: ' + bytes_to_hex(ciphertext))
# print('Auth tag: ' + bytes_to_hex(auth_tag))
return iv[4:] + ciphertext + auth_tag
An attempt to translate this to C# code is below (sorry for the amateurish code, I am a newbie):
EDIT:
Created an array which got values from GetBytes, and printed the result:
byte[] incr = BitConverter.GetBytes((int) 2);
cf.printBuf(incr, (String) "Array:");
return;
Noticed that the result was "02 00 00 00". Hence I guess my machine is little endian
Made some changes to the code as rodrigogq mentioned. Below is the latest code. It is still not working:
Verified that GHASH, GF_mult and H_mult are giving same results. Below is the verification code:
Python:
key = "\xab\xcd\xab\xcd"
key = key * 10
h = "\x00\x00"
a = AES_GCM(key, 128, h)
H = 200
A = "\x02" * 95
C = "\x02" * 95
D = a.GHASH(H, A, C)
print(D)
C#:
BigInteger H = new BigInteger(200);
byte[] A = new byte[95];
byte[] C = new byte[95];
for (int i = 0; i < 95; i ++)
{
A[i] = 2;
C[i] = 2;
}
BigInteger a = e.GHASH(H, A, C);
Console.WriteLine(a);
Results:
For both: 129209628709014910494696220101529767594
EDIT: Now the outputs are agreeing between Python and C#. So essentially the porting is done :) However, these outputs still don't agree with Wireshark. Hence, the handshake is still failing. May be something wrong with the procedure or the contents. Below is the working code
EDIT: Finally managed to get the code working. Below is the code that resulted in a successful handshake
Working Code:
/*
* Receiving seqNum as UInt64 and content_type as byte
*
*/
public byte[] AES_Encrypt_GCM(byte[] client_write_key, byte[] client_write_iv, byte[] plaintext, UInt64 seqNum, byte content_type)
{
int plaintext_size = plaintext.Length;
List<byte> temp = new List<byte>();
byte[] init_bytes = new byte[16];
Array.Clear(init_bytes, 0, 16);
byte[] encrypted = AES_Encrypt_ECB(init_bytes, client_write_key, 128);
Array.Reverse(encrypted);
BigInteger H_client = new BigInteger(encrypted);
if (H_client < 0)
{
temp.Clear();
temp.TrimExcess();
temp.AddRange(H_client.ToByteArray());
temp.Add(0);
H_client = new BigInteger(temp.ToArray());
}
Random rnd = new Random();
byte[] random = new byte[8];
rnd.NextBytes(random);
/*
* incr is little endian, but it needs to be in big endian format
*
*/
byte[] incr = BitConverter.GetBytes((int) 2);
Array.Reverse(incr);
/*
* Counter = First 4 bytes of IV + 8 Random bytes + 4 bytes of sequential value (starting at 2)
*
*/
temp.Clear();
temp.TrimExcess();
temp.AddRange(client_write_iv);
temp.AddRange(random);
byte[] iv = temp.ToArray();
temp.AddRange(incr);
byte[] counter = temp.ToArray();
AES_CTR aesctr = new AES_CTR(counter);
ICryptoTransform ctrenc = aesctr.CreateEncryptor(client_write_key, null);
byte[] ctext = ctrenc.TransformFinalBlock(plaintext, 0, plaintext_size);
byte[] seq_num = BitConverter.GetBytes(seqNum);
/*
* Using UInt16 instead of short
*
*/
byte[] tls_version = BitConverter.GetBytes((UInt16) 771);
Console.WriteLine("Plain Text size = {0}", plaintext_size);
byte[] plaintext_size_array = BitConverter.GetBytes((UInt16) plaintext_size);
/*
* Size was returned as 10 00 instead of 00 10
*
*/
Array.Reverse(plaintext_size_array);
temp.Clear();
temp.TrimExcess();
temp.AddRange(seq_num);
temp.Add(content_type);
temp.AddRange(tls_version);
temp.AddRange(plaintext_size_array);
byte[] auth_data = temp.ToArray();
BigInteger auth_tag = GHASH(H_client, auth_data, ctext);
Console.WriteLine("H = {0}", H_client);
this.printBuf(plaintext, "plaintext = ");
this.printBuf(auth_data, "A = ");
this.printBuf(ctext, "C = ");
this.printBuf(client_write_key, "client_AES_key = ");
this.printBuf(iv.ToArray(), "iv = ");
Console.WriteLine("Auth Tag just after GHASH: {0}", auth_tag);
AesCryptoServiceProvider aes2 = new AesCryptoServiceProvider();
aes2.Key = client_write_key;
aes2.Mode = CipherMode.ECB;
aes2.Padding = PaddingMode.None;
aes2.KeySize = 128;
ICryptoTransform transform1 = aes2.CreateEncryptor();
byte[] cval = {0, 0, 0, 1};
temp.Clear();
temp.TrimExcess();
temp.AddRange(iv);
temp.AddRange(cval);
byte[] encrypted1 = AES_Encrypt_ECB(temp.ToArray(), client_write_key, 128);
Array.Reverse(encrypted1);
BigInteger nenc = new BigInteger(encrypted1);
if (nenc < 0)
{
temp.Clear();
temp.TrimExcess();
temp.AddRange(nenc.ToByteArray());
temp.Add(0);
nenc = new BigInteger(temp.ToArray());
}
this.printBuf(nenc.ToByteArray(), "NENC = ");
Console.WriteLine("NENC: {0}", nenc);
auth_tag ^= nenc;
byte[] auth_tag_array = auth_tag.ToByteArray();
Array.Reverse(auth_tag_array);
this.printBuf(auth_tag_array, "Final Auth Tag Byte Array: ");
Console.WriteLine("Final Auth Tag: {0}", auth_tag);
this.printBuf(random, "Random sent = ");
temp.Clear();
temp.TrimExcess();
temp.AddRange(random);
temp.AddRange(ctext);
temp.AddRange(auth_tag_array);
return temp.ToArray();
}
public void printBuf(byte[] data, String heading)
{
int numBytes = 0;
Console.Write(heading + "\"");
if (data == null)
{
return;
}
foreach (byte element in data)
{
Console.Write("\\x{0}", element.ToString("X2"));
numBytes = numBytes + 1;
if (numBytes == 32)
{
Console.Write("\r\n");
numBytes = 0;
}
}
Console.Write("\"\r\n");
}
public BigInteger GF_mult(BigInteger x, BigInteger y)
{
BigInteger product = new BigInteger(0);
BigInteger e10 = BigInteger.Parse("00E1000000000000000000000000000000", NumberStyles.AllowHexSpecifier);
/*
* Below operation y >> i fails if i is UInt32, so leaving it as int
*
*/
int i = 127;
while (i != -1)
{
product = product ^ (x * ((y >> i) & 1));
x = (x >> 1) ^ ((x & 1) * e10);
i = i - 1;
}
return product;
}
public BigInteger H_mult(BigInteger H, BigInteger val)
{
BigInteger product = new BigInteger(0);
int i = 0;
/*
* Below operation (val & 0xFF) << (8 * i) fails if i is UInt32, so leaving it as int
*
*/
while (i < 16)
{
product = product ^ GF_mult(H, (val & 0xFF) << (8 * i));
val = val >> 8;
i = i + 1;
}
return product;
}
public BigInteger GHASH(BigInteger H, byte[] A, byte[] C)
{
int C_len = C.Length;
List <byte> temp = new List<byte>();
int plen = 16 - (A.Length % 16);
byte[] zeroes = new byte[plen];
Array.Clear(zeroes, 0, zeroes.Length);
temp.AddRange(A);
temp.AddRange(zeroes);
temp.Reverse();
BigInteger A_padded = new BigInteger(temp.ToArray());
temp.Clear();
temp.TrimExcess();
byte[] C1;
if ((C_len % 16) != 0)
{
plen = 16 - (C_len % 16);
byte[] zeroes1 = new byte[plen];
Array.Clear(zeroes, 0, zeroes.Length);
temp.AddRange(C);
temp.AddRange(zeroes1);
C1 = temp.ToArray();
}
else
{
C1 = new byte[C.Length];
Array.Copy(C, 0, C1, 0, C.Length);
}
temp.Clear();
temp.TrimExcess();
BigInteger tag = new BigInteger();
tag = H_mult(H, A_padded);
this.printBuf(H.ToByteArray(), "H Byte Array:");
for (int i = 0; i < (int) (C1.Length / 16); i ++)
{
byte[] toTake;
if (i == 0)
{
toTake = C1.Take(16).ToArray();
}
else
{
toTake = C1.Skip(i * 16).Take(16).ToArray();
}
Array.Reverse(toTake);
BigInteger tempNum = new BigInteger(toTake);
tag ^= tempNum;
tag = H_mult(H, tag);
}
byte[] A_arr = BitConverter.GetBytes((long) (8 * A.Length));
/*
* Want length to be "00 00 00 00 00 00 00 xy" format
*
*/
Array.Reverse(A_arr);
byte[] C_arr = BitConverter.GetBytes((long) (8 * C_len));
/*
* Want length to be "00 00 00 00 00 00 00 xy" format
*
*/
Array.Reverse(C_arr);
temp.AddRange(A_arr);
temp.AddRange(C_arr);
temp.Reverse();
BigInteger array_int = new BigInteger(temp.ToArray());
tag = tag ^ array_int;
tag = H_mult(H, tag);
return tag;
}
Using SSL decryption in wireshark (using private key), I found that:
The nonce calculated by the C# code is same as that in wireshark (fixed part is client_write_IV and variable part is 8 bytes random)
The value of AAD (auth_data above) (client_write_key, seqNum + ctype + tls_version + plaintext_size) is matching with wireshark value
Cipher text (ctext above) (the C in GHASH(H, A, C)), is also matching the wireshark calculated value
However, the auth_tag calculation (GHASH(H_client, auth_data, ctext)) is failing. It would be great if someone could guide me as to what could be wrong in GHASH function. I just did a basic comparison of results of GF_mult function in python and C#, but the results are not matching too
This is not a final solution, but just an advice. I have seen you are using a lot the function BitConverter.GetBytes, int instead of Int32 or Int16.
The remarks from the official documentation says:
The order of bytes in the array returned by the GetBytes method
depends on whether the computer architecture is little-endian or
big-endian.
As for when you are using the BigInteger structure, it seems to be expecting always the little-endian order:
value
Type: System.Byte[]
An array of byte values in little-endian order.
Prefer using the Int32 and Int16 and pay attention to the order of the bytes before using it on these calculations.
Use log4net to log all the operations. Would be nice to put the same logs in the python program so that you could compare then at once, and check exactly where the calculations change.
Hope this give some tips on where to start.
I have this function in a Java program.
private static byte[] converToByte(String s)
{
byte[] output = new byte[s.length() / 2];
for (int i = 0, j = 0; i < s.length(); i += 2, j++)
{
output[j] = (byte)(Integer.parseInt(s.substring(i, i + 2), 16));
}
return output;
}
I am trying to create the same thing with C# but I'm having troubles. I tried this:
output[j] = (byte)(Int16.Parse(str.Substring(i, i + 2)));
But after a couple of iterations I got a System.OverflowException, what would be the instruction in C#?
Thanks.
private static sbyte[] converToByte(string s)
{
sbyte[] output = new sbyte[s.Length / 2];
for (int i = 0, j = 0; i < s.Length; i += 2, j++)
{
output[j] = (sbyte)(Convert.ToInt32(s.Substring(i, 2), 16));
}
return output;
}
You are using the wrong data type in your line:
output[j] = (byte)(Int16.Parse(str.Substring(i, i + 2)));
Short Name .NET Class Type Width Range (bits)
byte Byte Unsigned integer 8 0 to 255
short Int16 Signed integer 16 -32,768 to 32,767
You are getting an overflow exception because an Int16 (short) is far to big to fit into a byte.
After struggling with tihs problem myself I realised the real problem is that Java's substring method is:
substring(int beginIndex, int endIndex)
While C#'s implementation takes:
substring(int beginIndex, int length)
This means in the C# the same code is grabbing larger chunks of bytes causing an overflow.
#Dave Doknjas was on the right track but you can still convert to a byte with the new smaller chunk size.
output[j] = Convert.ToByte(str.Substring(i, i + 2), 16);
I have to write a Vigenere encryption / decryption function that operates on full bytes (to encrypt and send files over tcp and then decrypt on the other side).
My encrypting function seems to be working (more or less, can't really test it without decrypting function).
This is the code of the encrypting function:
public static Byte[] encryptByteVigenere(Byte[] plaintext, string key)
{
Byte[] result= new Byte[plaintext.Length];
key = key.Trim().ToUpper();
int keyIndex = 0;
int keylength = key.Length;
for (int i = 0; i < plaintext.Length; i++)
{
keyIndex = keyIndex % keylength;
int shift = (int)key[keyIndex] - 65;
result[i] = (byte)(((int)plaintext[i] + shift) % 256);
keyIndex++;
}
return result;
}
However, the decrypting function, even though wrote in pretty much the same way, causes an error.
"Attempted to divide by zero."
The code of the decrypting function:
public static Byte[] decryptByteVigenere(Byte[] ciphertext, string key)
{
Byte[] result = new Byte[ciphertext.Length];
key = key.Trim().ToUpper();
int keyIndex = 0;
int keylength = key.Length;
for (int i = 0; i < ciphertext.Length; i++)
{
keyIndex = keyIndex % keylength;
int shift = (int)key[keyIndex] - 65;
result[i]= (byte)(((int)ciphertext[i] + 256 - shift) % 256);
keyIndex++;
}
return result;
}
The error points at the line
keyIndex = keyIndex % keylength;
But what wonders me is that the code is pretty much the same in the first function and it doesn't seem to cause any trouble. I'm testing it on the received fild, which arrives correctly without encryption. Could anyone help me with that?
EDIT:
The method / thread that is using the decryption function code:
public void fileListenThread()
{
try
{
fileServer.Start();
String receivedFileName = "test.dat";
String key = (textKlucz.Text).ToUpper();
while (true)
{
fileClient = fileServer.AcceptTcpClient();
NetworkStream streamFileServer = fileClient.GetStream();
int thisRead = 0;
int blockSize = 1024;
Byte[] dataByte = new Byte[blockSize];
Byte[] dataByteDecrypted = new Byte[blockSize];
FileStream fileStream = new FileStream(receivedFileName, FileMode.Create);
while (true)
{
thisRead = streamFileServer.Read(dataByte, 0, blockSize);
dataByteDecrypted = Program.decryptByteVigenere(dataByte, key);
fileStream.Write(dataByteDecrypted, 0, thisRead);
if (thisRead == 0)
break;
}
fileStream.Close();
}
}
catch (SocketException e)
{
MessageBox.Show("SocketException: " + e, "Wystąpił wyjątek", MessageBoxButtons.OK, MessageBoxIcon.Error);
}
}
Ok the problem was indeed the sending / receiving method, not the function itself. I still don't really know what caused the problem, but rewriting the functions helped. Thanks for your input!
I'm leaving it here in case someone needed such function in the future... even though it's rather trivial thing.
Cheers.
I am trying to write a Java equivalent for a function in C#. The code follows.
In C#:
byte[] a = new byte[sizeof(Int32)];
readBytes(fStream, a, 0, sizeof(Int32)); //fstream is System.IO.Filestream
int answer = BitConverter.ToInt32(a, 0);
In Java:
InputStream fstream = new FileInputStream(fileName);
DataInputStream in = new DataInputStream(fstream);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
byte[] a = new byte[4];
readBytes(in, a, 0, 4);
int answer = byteArrayToInt(a);
Both Java and C#:
int readBytes(Stream stream, byte[] storageBuffer, int offset, int requiredCount)
{
int totalBytesRead = 0;
while (totalBytesRead < requiredCount)
{
int bytesRead = stream.Read(
storageBuffer,
offset + totalBytesRead,
requiredCount - totalBytesRead);
if (bytesRead == 0)
{
break; // while
}
totalBytesRead += bytesRead;
}
return totalBytesRead;
}
Output:
In C#: answer = 192 (Correct)
In JAVA: answer = -1073741824
There is a difference in the two.I am reading from a file input stream which is encoded and parsing the first four bytes. The C# code seems to produce 192 which is the correct answer while Java produces -1073741824 which is the wrong answer. Why and how ?
EDIT
Here is my byteArrayToInt
public static int byteArrayToInt(byte[] b, int offset) {
int value = 0;
for (int i = 0; i < 4; i++) {
int shift = (4 - 1 - i) * 8;
value += (b[i + offset] & 0x000000FF) << shift;
}
return value;
}
SOLUTION
The right solution for byteArrayToInt
public static int byteArrayToInt(byte[] b)
{
long value = 0;
for (int i = 0; i < b.length; i++)
{
value += (b[i] & 0xff) << (8 * i);
}
return (int) value;
}
This gives the right output
In java bytes are signed, so your -64 in java byte is binary equivalent to 192 in c# byte (192 == 256 - 64).
The problem is probaby in byteArrayToInt() where you assume it's unsigned during the conversion.
A simple
`b & 0x000000FF`
might help in that case.
Java's byte object is signed as soulcheck wrote. The binary value for 192 on an unsigned 8 bit integer would be 11000000.
If you are reading this value with a signed format, a leading 1 will indicate a negative. This means 11000000 becomes negative 01000000, which is -64.