Related
Problem
I have a BigInteger array which I'm converting to a byte array as:
var ciphertext = new BigInteger[ciphertextBlocks];
...
byte[] ciphertextBytes = ciphertext.SelectMany(c => c.ToByteArray()).ToArray();
Now, I would like to get the original BigInteger array back. This seems impossible since the individual sizes of each BigInteger is lost when it is converted to byte[].
Context
I understand this could very well be an XY problem. Therefore, here's what I'm doing. I'm implementing a simple cryptosystem called Merkle-Hellman Knapsack Cryptosystem for encryption and decryption.
I know that rolling your own cryptographic algorithms is frowned upon but I'm doing it purely for educational purposes.
Since the output of a cryptographic operation is represented as byte[], I'm converting the resultant ciphertext (BigInteger[]) to a byte[] in Encrypt()).
However, I need access to the original BigInteger[] array in the Decrypt() method to individually decrypt each BigInteger. Is there a way to achieve this in the Decrypt() method?
If the type were a fixed-size primitive like long, I could do something like this:
long[] cipherTextInt = new long[ciphertext.Length / sizeof(long)];
Buffer.BlockCopy(ciphertext, 0, cipherTextInt, 0, ciphertext.Length);
But since BigInteger can be arbitrarily large, how can this be done?
public class KnapsackCryptosystem
{
const int BitsPerByte = 8;
public PublicKeyInfo PublicKey { get; }
private PrivateKeyInfo PrivateKey { get; }
public byte[] Encrypt(byte[] plaintext)
{
var plaintextBits = new BitArray(plaintext);
var sequence = PublicKey.Sequence;
// Calculate number of ciphertext blocks:
// Dividing the length of the sequence by the bit length of the plaintext gives
// the number of ciphertext blocks. If the plaintext bit length is not a multiple of
// the sequence length (remainder != 0), an extra block is required.
var (quotient, remainder) = Math.DivRem(plaintextBits.Count, sequence.Count);
var ciphertextBlocks = quotient + (remainder == 0 ? 0 : 1);
// Create a BigInteger array to hold the ciphertext integers
var ciphertext = new BigInteger[ciphertextBlocks];
// Apply the sequence to each plaintext block
for (int i = 0; i < ciphertextBlocks; i++)
{
for (int j = 0; j < sequence.Count; j++)
{
// Calculate the end index of the current block
int endIndex = (i + 1) * sequence.Count;
// Calculate the index of the jth bit from the end;
int bitIndex = end - j - 1;
try
{
var bit = plaintextBits[bitIndex];
//Console.WriteLine($"j={j} : idx={rIndex} : bit={(bit ? 1 : 0)} | ");
Console.WriteLine($"j={j} : idx={index} : bit={(bit ? 1 : 0)} : ");
if (bit)
{
// Add the jth element to the ith ciphertext block if the bit is set
Console.WriteLine($"+{sequence[j]} .");
ciphertext[i] += sequence[j];
}
}
catch (ArgumentOutOfRangeException)
{
// thrown at the last block when the sequence length exceeds the last plaintext block
Console.WriteLine(nameof(ArgumentOutOfRangeException) + " " + index);
break;
}
}
Console.WriteLine();
}
Console.WriteLine("Ciphertext Elements:");
Array.ForEach(ciphertext, x => Console.WriteLine(x));
// Convert the BigInteger[] array to byte[]
var ciphertextBytes = ciphertext.SelectMany(c => c.ToByteArray()).ToArray();
return ciphertextBytes;
}
public byte[] Decrypt(byte[] ciphertext)
{
var sequence = PrivateKey.Sequence;
int wInverse = ModularArithmetic.MultiplicativeInverse(PrivateKey.W, PrivateKey.M);
Console.WriteLine($"wInverse: {wInverse}");
// RESTORE BigInteger[] FROM byte[] ciphertext. How to achieve this?
// cipherTextInts is the restored BigInteger array containing the ciphertext for each block
var ciphertextInts = BACK_TO_BIG_INTEGER_ARRAY(ciphertext);
var plaintextBits = new BitArray(<UNKNOWN_LENGTH>);
for (int i = 0; i < ciphertextInts.Length; i++)
{
var ciphertextElement = (wInverse * ciphertextInts[i]) % PrivateKey.M;
// Solve the subset sum problem
for (int j = PrivateKey.Sequence.Count - 1; j >= 0; j--)
{
if (PrivateKey.Sequence[j] <= ciphertextElement)
{
// Calculate the end index of the current block
int endIndex = (i + 1) * sequence.Count;
// Calculate the index of the jth bit from the end;
int bitIndex = end - j - 1;
plaintextBits[bitIndex] = true;
ciphertextElement -= PrivateKey.Sequence[j];
//Console.WriteLine($" Minus {PrivateKey.Sequence[j]}. element: {ciphertextElement}");
}
}
}
// Copy BitArray data to byte[] array and return
var (quotient1, remainder1) = Math.DivRem(plaintextBits.Length, BitsPerByte);
byte[] bytes = new byte[quotient1 + (remainder1 == 0 ? 0 : 1)];
plaintextBits.CopyTo(bytes, 0);
return bytes;
}
public static KnapsackCryptosystem Create(
IReadOnlyList<int> sequence, int m, int w)
{
if (sequence.Count == 0)
{
throw new ArgumentException(
"Sequence must be non-empty.",
nameof(sequence));
}
int sum = sequence[0];
for (int i = 1; i < sequence.Count; i++)
{
if (sequence[i] <= sum)
{
throw new ArgumentException(
"Not a superincreasing sequence.",
nameof(sequence));
}
sum += sequence[i];
}
if (m <= sum)
{
throw new ArgumentOutOfRangeException(
nameof(m),
"m must be greater than the sum of the sequence.");
}
if (MathHelpers.Gcd(m, w) != 1)
{
throw new ArgumentException(
"w must be coprime to m.",
nameof(w));
}
IReadOnlyList<int> publicSequence = GeneratePublicSequence(sequence, m, w);
return new KnapsackCryptosystem(
new PublicKeyInfo(publicSequence),
new PrivateKeyInfo(sequence, m, w)
);
}
private static IReadOnlyList<int> GeneratePublicSequence(
IReadOnlyList<int> sequence, int m, int w) => (
from item in sequence
select (w * item) % m)
.ToList()
.AsReadOnly(); // Multiply each item in the sequence by w and mod by m
public readonly record struct PublicKeyInfo(IReadOnlyList<int> Sequence);
// M is the modulus greater than sum of public sequence
// W is the integer coprime to M
private readonly record struct PrivateKeyInfo(IReadOnlyList<int> Sequence, int M, int W);
private KnapsackCryptosystem(PublicKeyInfo publicKey, PrivateKeyInfo privateKey)
{
PublicKey = publicKey;
PrivateKey = privateKey;
}
}
This question already has an answer here:
C# BigInteger.ModPow bug?
(1 answer)
Closed 2 years ago.
I've been working on a .NET project where I need BigIntegers and I've noticed that the framework's implementation delivers what appears to be incorrect results. After hours of trying to find what I'm doing wrong I decided to test my algorithm in Java and C++ (using OpenSSL) and shockingly in both languages I get the expected results.
Now I'm naturally wondering what I'm doing wrong (since there is no way on earth this is a bug that hasn't been noticed before) and I hope someone can help me!
This is the reduced C# code:
using System;
using System.Numerics;
using System.Globalization;
public class Program
{
public static void Main()
{
var B = BigInteger.Parse("023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", NumberStyles.AllowHexSpecifier);
var k = BigInteger.Parse("3", NumberStyles.AllowHexSpecifier);
var x = BigInteger.Parse("09F015DB40A59403E42FBD568AF5774A0A0488A62", NumberStyles.AllowHexSpecifier);
var g = BigInteger.Parse("7", NumberStyles.AllowHexSpecifier);
var N = BigInteger.Parse("0894B645E89E1535BBDAD5B8B290650530801B18EBFBF5E8FAB3C82872A3E9BB7", NumberStyles.AllowHexSpecifier);
var u = BigInteger.Parse("0AC06F615645BEA9B3D6D887C30D28D71B079B598", NumberStyles.AllowHexSpecifier);
var a = BigInteger.Parse("0D4515CA7747787F1DDA9962ACE81E8412D9D20D06251696ACD74735F1F3B9875", NumberStyles.AllowHexSpecifier);
var S = calc(B, k, x, g, N, u, a);
Console.WriteLine(S.ToString("X"));
}
static BigInteger calc(BigInteger B, BigInteger k, BigInteger x, BigInteger g, BigInteger N, BigInteger u, BigInteger a)
{
var val = B - k * BigInteger.ModPow(g, x, N);
var exponent = a + u * x;
return BigInteger.ModPow(val, exponent, N);
}
}
You can execute it here: https://dotnetfiddle.net/qXXiBk
Same code in Java:
import java.math.BigInteger;
public class Main
{
public static void main(String[] args)
{
BigInteger B = new BigInteger("023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", 16);
BigInteger k = new BigInteger("3", 16);
BigInteger x = new BigInteger("09F015DB40A59403E42FBD568AF5774A0A0488A62", 16);
BigInteger g = new BigInteger("7", 16);
BigInteger N = new BigInteger("0894B645E89E1535BBDAD5B8B290650530801B18EBFBF5E8FAB3C82872A3E9BB7", 16);
BigInteger u = new BigInteger("0AC06F615645BEA9B3D6D887C30D28D71B079B598", 16);
BigInteger a = new BigInteger("0D4515CA7747787F1DDA9962ACE81E8412D9D20D06251696ACD74735F1F3B9875", 16);
BigInteger S = calc(B, k, x, g, N, u, a);
System.out.println(S.toString(16));
}
private static BigInteger calc(BigInteger B, BigInteger k, BigInteger x, BigInteger g, BigInteger N, BigInteger u, BigInteger a)
{
BigInteger value = B.subtract(k.multiply(g.modPow(x, N)));
BigInteger exponent = a.add(u.multiply(x));
return value.modPow(exponent, N);
}
}
You can execute it here: https://www.onlinegdb.com/BJXxMiO28
And finally a quick and dirty C++ implementation using OpenSSL:
#include <iostream>
#include <openssl/bn.h>
class BigInteger
{
public:
BigInteger(char const* hexString, BN_CTX *ctx)
: bn_{BN_new()}
, ctx_{ctx}
{
BN_hex2bn(&bn_, hexString);
}
~BigInteger()
{
BN_free(bn_);
}
BigInteger ModPow(BigInteger const& exponent, BigInteger const& modulo) const
{
BigInteger ret{"0", ctx_};
BN_mod_exp(ret.bn_, bn_, exponent.bn_, modulo.bn_, ctx_);
return ret;
}
BigInteger Subtract(BigInteger const& rhs) const
{
BigInteger ret{"0", ctx_};
BN_sub(ret.bn_, bn_, rhs.bn_);
return ret;
}
BigInteger Multiply(BigInteger const& rhs) const
{
BigInteger ret{"0", ctx_};
BN_mul(ret.bn_, bn_, rhs.bn_, ctx_);
return ret;
}
BigInteger Add(BigInteger const& rhs) const
{
BigInteger ret{"0", ctx_};
BN_add(ret.bn_, bn_, rhs.bn_);
return ret;
}
std::string ToString() const
{
return BN_bn2hex(bn_);
}
private:
BIGNUM* bn_;
BN_CTX *ctx_;
};
BigInteger calc(BigInteger const& B, BigInteger const& k, BigInteger const& x, BigInteger const& g, BigInteger const& N, BigInteger const& u, BigInteger const& a)
{
BigInteger value = B.Subtract(k.Multiply(g.ModPow(x, N)));
BigInteger exponent = a.Add(u.Multiply(x));
return value.ModPow(exponent, N);
}
int main()
{
BN_CTX *ctx = BN_CTX_new();
BigInteger B{"023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", ctx};
BigInteger k{"3", ctx};
BigInteger x{"09F015DB40A59403E42FBD568AF5774A0A0488A62", ctx};
BigInteger g{"7", ctx};
BigInteger N{"0894B645E89E1535BBDAD5B8B290650530801B18EBFBF5E8FAB3C82872A3E9BB7", ctx};
BigInteger u{"0AC06F615645BEA9B3D6D887C30D28D71B079B598", ctx};
BigInteger a{"0D4515CA7747787F1DDA9962ACE81E8412D9D20D06251696ACD74735F1F3B9875", ctx};
auto S = calc(B, k, x, g, N, u, a);
std::cout << S.ToString();
BN_CTX_free(ctx);
}
You can execute it here: https://godbolt.org/z/PtNGdQ
Again, both C++ and Java agree on the answer beeing 218BC3CE2641EFF5F4BB95A2DB931CA62A933C6BA40D3F6E2AD5D5F7D41F0E0A and only C# says it's 98405F6F9C609C9A370E3A17B28CCC5322918ADCE44DE0DE7F995370A9E07253. This is an actual show-stopper since I need to work on systems that require the first (correct) answer. I'm really at a loss here and I sincerely hope that somebody knows what I'm doing wrong.
Cheers
Python also agrees the answer should be 218bc3ce2641eff5f4bb95a2db931ca62a933c6ba40d3f6e2ad5d5f7d41f0e0a
and the problem doesn't seem the hex parsing (even parsing the decimal version of the values the result is the same).
I think you've the correct attitude about thinking that's not possible that it's a bug in the big integer in that C# implementation, but this actually seems to me a screaming evidence this is the case (even if I must say I'm not a C# programmer, only played with it a bit).
You should in my opinion file a bug report.
EDIT
As Sir Rufo pointed out correctly in the comments the problem is in how modulo operation is handled in C# for negative dividends, changing the code to
var val = (B - k * BigInteger.ModPow(g, x, N) + N*k) % N;
produces the expected result.
I would say still a bug, but a design bug and not going to be fixed.
Information
Lets have a look inside the calc method:
When we compare the hex output values in C# val.ToString("X") and Java val.toString(16) we will get different outputs:
C#: F4EB82A8CAFDA89F0E2B69C3C4FEF2920913B60DD701C2193C41AE7EC6BC1A38B
Java: -b147d5735025760f1d4963c3b010d6df6ec49f228fe3de6c3be51813943e5c75
but when we use the decimal output values in C# val.ToString() and Java val.toString(10) we will get the same outputs:
C#: -80186293521643543106092742417459818853945355375849134884320433064971933211765
Java: -80186293521643543106092742417459818853945355375849134884320433064971933211765
This answer is based on a comparison (hex outputs) which you cannot compare.
(Posting this as an answer because this won't fit into a comment, but making it a community wiki):
The difference between the C# and Java versions happens inside calc. When I separate-out the intermediate values like so:
CSharp
BigInteger B = BigInteger.Parse("023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", NumberStyles.AllowHexSpecifier);
BigInteger k = BigInteger.Parse("3", NumberStyles.AllowHexSpecifier);
BigInteger g = BigInteger.Parse("7", NumberStyles.AllowHexSpecifier);
BigInteger x = BigInteger.Parse("09F015DB40A59403E42FBD568AF5774A0A0488A62", NumberStyles.AllowHexSpecifier);
BigInteger N = BigInteger.Parse("0894B645E89E1535BBDAD5B8B290650530801B18EBFBF5E8FAB3C82872A3E9BB7", NumberStyles.AllowHexSpecifier);
Console.WriteLine( "B == " + B.ToString("X") );
Console.WriteLine( "k == " + k.ToString("X") );
Console.WriteLine( "g == " + g.ToString("X") );
Console.WriteLine( "x == " + x.ToString("X") );
Console.WriteLine( "N == " + N.ToString("X") );
Console.WriteLine( "-------" );
BigInteger p = BigInteger.ModPow(g, x, N);
Console.WriteLine( "p == " + p.ToString("X") );
BigInteger m = k * p;
Console.WriteLine( "m == " + m.ToString("X") );
BigInteger d = B - m;
Console.WriteLine("d == " + d.ToString("X"));
Java
BigInteger B = new BigInteger("023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", 16);
BigInteger k = new BigInteger("3", 16);
BigInteger g = new BigInteger("7", 16);
BigInteger x = new BigInteger("09F015DB40A59403E42FBD568AF5774A0A0488A62", 16);
BigInteger N = new BigInteger("0894B645E89E1535BBDAD5B8B290650530801B18EBFBF5E8FAB3C82872A3E9BB7", 16);
System.out.println("B == " + B.toString(16));
System.out.println("k == " + k.toString(16));
System.out.println("g == " + g.toString(16));
System.out.println("x == " + x.toString(16));
System.out.println("N == " + N.toString(16));
System.out.println("-------");
BigInteger p = g.modPow(x, N);
System.out.println("p == " + p.toString(16));
BigInteger m = k.multiply(p);
System.out.println("m == " + m.toString(16));
BigInteger d = B.subtract(m);
System.out.println("d == " + d.toString(16));
These gives me this output:
CSharp:
B == 23B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE
k == 3
g == 7
x == 09F015DB40A59403E42FBD568AF5774A0A0488A62
N == 0894B645E89E1535BBDAD5B8B290650530801B18EBFBF5E8FAB3C82872A3E9BB7
-------
p == 46FF4F26CC2AB0EA82B849044AC68D6CC772C8232086C890C0FBC5DE13BA3111
m == 0D4FDED74648012BF8828DB0CE053A84656585869619459B242F3519A3B2E9333
value == F4EB82A8CAFDA89F0E2B69C3C4FEF2920913B60DD701C2193C41AE7EC6BC1A38B
Java:
B == 23b61801145a9cb06adf77493042d166e793b946d1b07b46070e3986a6f036be
k == 3
g == 7
x == 9f015db40a59403e42fbd568af5774a0a0488a62
N == 894b645e89e1535bbdad5b8b290650530801b18ebfbf5e8fab3c82872a3e9bb7
-------
p == 46ff4f26cc2ab0ea82b849044ac68d6cc772c8232086c890c0fbc5de13ba3111
m == d4fded74648012bf8828db0ce053a84656585869619459b242f3519a3b2e9333
d == -b147d5735025760f1d4963c3b010d6df6ec49f228fe3de6c3be51813943e5c75
So it's something weird going on in B - m and not the ModPow call.
Part 2
Let's reduce this case down to d = B - m by serializing the BigInteger values (I verified they're being serialized correctly):
CSharp
BigInteger B = BigInteger.Parse("023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", NumberStyles.AllowHexSpecifier);
Console.WriteLine( "B == " + B.ToString("X") )
BigInteger m = new BigInteger( new Byte[] { 51, 147, 46, 59, 154, 81, 243, 66, 178, 89, 148, 97, 105, 88, 88, 86, 70, 168, 83, 224, 12, 219, 40, 136, 191, 18, 128, 100, 116, 237, 253, 212, 0 } );
Console.WriteLine( "m == " + m.ToString("X") )
BigInteger d = B - m;
Console.WriteLine( "d == " + d.ToString("X") )
Java:
BigInteger B = new BigInteger("023B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE", 16);
BigInteger m = new BigInteger( new byte[] { 0, -44, -3, -19, 116, 100, -128, 18, -65, -120, 40, -37, 12, -32, 83, -88, 70, 86, 88, 88, 105, 97, -108, 89, -78, 66, -13, 81, -102, 59, 46, -109, 51 } );
System.out.println("B == " + B.toString(16));
System.out.println("m == " + m.toString(16));
BigInteger d = B.subtract(m);
System.out.println("d == " + d.toString(16));
This shows that both C# and Java have the same values for B and m and different values for d:
// C#:
B == 23B61801145A9CB06ADF77493042D166E793B946D1B07B46070E3986A6F036BE
m == 0D4FDED74648012BF8828DB0CE053A84656585869619459B242F3519A3B2E9333
d == F4EB82A8CAFDA89F0E2B69C3C4FEF2920913B60DD701C2193C41AE7EC6BC1A38B
// Java:
B == 23b61801145a9cb06adf77493042d166e793b946d1b07b46070e3986a6f036be
m == d4fded74648012bf8828db0ce053a84656585869619459b242f3519a3b2e9333
d == -b147d5735025760f1d4963c3b010d6df6ec49f228fe3de6c3be51813943e5c75
The question is - does F4EB82A8CAFDA89F0E2B69C3C4FEF2920913B60DD701C2193C41AE7EC6BC1A38B represent the same value as -b147d5735025760f1d4963c3b010d6df6ec49f228fe3de6c3be51813943e5c75?
I am trying to port AES GCM implementation in python OpenTLS project, to C# (.Net). Below is the code in OpenTLS code:
#######################
### Galois Counter Mode
#######################
class AES_GCM:
def __init__(self, keys, key_size, hash):
key_size //= 8
hash_size = hash.digest_size
self.client_AES_key = keys[0 : key_size]
self.server_AES_key = keys[key_size : 2*key_size]
self.client_IV = keys[2*key_size : 2*key_size+4]
self.server_IV = keys[2*key_size+4 : 2*key_size+8]
self.H_client = bytes_to_int(AES.new(self.client_AES_key, AES.MODE_ECB).encrypt('\x00'*16))
self.H_server = bytes_to_int(AES.new(self.server_AES_key, AES.MODE_ECB).encrypt('\x00'*16))
def GF_mult(self, x, y):
product = 0
for i in range(127, -1, -1):
product ^= x * ((y >> i) & 1)
x = (x >> 1) ^ ((x & 1) * 0xE1000000000000000000000000000000)
return product
def H_mult(self, H, val):
product = 0
for i in range(16):
product ^= self.GF_mult(H, (val & 0xFF) << (8 * i))
val >>= 8
return product
def GHASH(self, H, A, C):
C_len = len(C)
A_padded = bytes_to_int(A + b'\x00' * (16 - len(A) % 16))
if C_len % 16 != 0:
C += b'\x00' * (16 - C_len % 16)
tag = self.H_mult(H, A_padded)
for i in range(0, len(C) // 16):
tag ^= bytes_to_int(C[i*16:i*16+16])
tag = self.H_mult(H, tag)
tag ^= bytes_to_int(nb_to_n_bytes(8*len(A), 8) + nb_to_n_bytes(8*C_len, 8))
tag = self.H_mult(H, tag)
return tag
def decrypt(self, ciphertext, seq_num, content_type, debug=False):
iv = self.server_IV + ciphertext[0:8]
counter = Counter.new(nbits=32, prefix=iv, initial_value=2, allow_wraparound=False)
cipher = AES.new(self.server_AES_key, AES.MODE_CTR, counter=counter)
plaintext = cipher.decrypt(ciphertext[8:-16])
# Computing the tag is actually pretty time consuming
if debug:
auth_data = nb_to_n_bytes(seq_num, 8) + nb_to_n_bytes(content_type, 1) + TLS_VERSION + nb_to_n_bytes(len(ciphertext)-8-16, 2)
auth_tag = self.GHASH(self.H_server, auth_data, ciphertext[8:-16])
auth_tag ^= bytes_to_int(AES.new(self.server_AES_key, AES.MODE_ECB).encrypt(iv + '\x00'*3 + '\x01'))
auth_tag = nb_to_bytes(auth_tag)
print('Auth tag (from server): ' + bytes_to_hex(ciphertext[-16:]))
print('Auth tag (from client): ' + bytes_to_hex(auth_tag))
return plaintext
def encrypt(self, plaintext, seq_num, content_type):
iv = self.client_IV + os.urandom(8)
# Encrypts the plaintext
plaintext_size = len(plaintext)
counter = Counter.new(nbits=32, prefix=iv, initial_value=2, allow_wraparound=False)
cipher = AES.new(self.client_AES_key, AES.MODE_CTR, counter=counter)
ciphertext = cipher.encrypt(plaintext)
# Compute the Authentication Tag
auth_data = nb_to_n_bytes(seq_num, 8) + nb_to_n_bytes(content_type, 1) + TLS_VERSION + nb_to_n_bytes(plaintext_size, 2)
auth_tag = self.GHASH(self.H_client, auth_data, ciphertext)
auth_tag ^= bytes_to_int(AES.new(self.client_AES_key, AES.MODE_ECB).encrypt(iv + b'\x00'*3 + b'\x01'))
auth_tag = nb_to_bytes(auth_tag)
# print('Auth key: ' + bytes_to_hex(nb_to_bytes(self.H)))
# print('IV: ' + bytes_to_hex(iv))
# print('Key: ' + bytes_to_hex(self.client_AES_key))
# print('Plaintext: ' + bytes_to_hex(plaintext))
# print('Ciphertext: ' + bytes_to_hex(ciphertext))
# print('Auth tag: ' + bytes_to_hex(auth_tag))
return iv[4:] + ciphertext + auth_tag
An attempt to translate this to C# code is below (sorry for the amateurish code, I am a newbie):
EDIT:
Created an array which got values from GetBytes, and printed the result:
byte[] incr = BitConverter.GetBytes((int) 2);
cf.printBuf(incr, (String) "Array:");
return;
Noticed that the result was "02 00 00 00". Hence I guess my machine is little endian
Made some changes to the code as rodrigogq mentioned. Below is the latest code. It is still not working:
Verified that GHASH, GF_mult and H_mult are giving same results. Below is the verification code:
Python:
key = "\xab\xcd\xab\xcd"
key = key * 10
h = "\x00\x00"
a = AES_GCM(key, 128, h)
H = 200
A = "\x02" * 95
C = "\x02" * 95
D = a.GHASH(H, A, C)
print(D)
C#:
BigInteger H = new BigInteger(200);
byte[] A = new byte[95];
byte[] C = new byte[95];
for (int i = 0; i < 95; i ++)
{
A[i] = 2;
C[i] = 2;
}
BigInteger a = e.GHASH(H, A, C);
Console.WriteLine(a);
Results:
For both: 129209628709014910494696220101529767594
EDIT: Now the outputs are agreeing between Python and C#. So essentially the porting is done :) However, these outputs still don't agree with Wireshark. Hence, the handshake is still failing. May be something wrong with the procedure or the contents. Below is the working code
EDIT: Finally managed to get the code working. Below is the code that resulted in a successful handshake
Working Code:
/*
* Receiving seqNum as UInt64 and content_type as byte
*
*/
public byte[] AES_Encrypt_GCM(byte[] client_write_key, byte[] client_write_iv, byte[] plaintext, UInt64 seqNum, byte content_type)
{
int plaintext_size = plaintext.Length;
List<byte> temp = new List<byte>();
byte[] init_bytes = new byte[16];
Array.Clear(init_bytes, 0, 16);
byte[] encrypted = AES_Encrypt_ECB(init_bytes, client_write_key, 128);
Array.Reverse(encrypted);
BigInteger H_client = new BigInteger(encrypted);
if (H_client < 0)
{
temp.Clear();
temp.TrimExcess();
temp.AddRange(H_client.ToByteArray());
temp.Add(0);
H_client = new BigInteger(temp.ToArray());
}
Random rnd = new Random();
byte[] random = new byte[8];
rnd.NextBytes(random);
/*
* incr is little endian, but it needs to be in big endian format
*
*/
byte[] incr = BitConverter.GetBytes((int) 2);
Array.Reverse(incr);
/*
* Counter = First 4 bytes of IV + 8 Random bytes + 4 bytes of sequential value (starting at 2)
*
*/
temp.Clear();
temp.TrimExcess();
temp.AddRange(client_write_iv);
temp.AddRange(random);
byte[] iv = temp.ToArray();
temp.AddRange(incr);
byte[] counter = temp.ToArray();
AES_CTR aesctr = new AES_CTR(counter);
ICryptoTransform ctrenc = aesctr.CreateEncryptor(client_write_key, null);
byte[] ctext = ctrenc.TransformFinalBlock(plaintext, 0, plaintext_size);
byte[] seq_num = BitConverter.GetBytes(seqNum);
/*
* Using UInt16 instead of short
*
*/
byte[] tls_version = BitConverter.GetBytes((UInt16) 771);
Console.WriteLine("Plain Text size = {0}", plaintext_size);
byte[] plaintext_size_array = BitConverter.GetBytes((UInt16) plaintext_size);
/*
* Size was returned as 10 00 instead of 00 10
*
*/
Array.Reverse(plaintext_size_array);
temp.Clear();
temp.TrimExcess();
temp.AddRange(seq_num);
temp.Add(content_type);
temp.AddRange(tls_version);
temp.AddRange(plaintext_size_array);
byte[] auth_data = temp.ToArray();
BigInteger auth_tag = GHASH(H_client, auth_data, ctext);
Console.WriteLine("H = {0}", H_client);
this.printBuf(plaintext, "plaintext = ");
this.printBuf(auth_data, "A = ");
this.printBuf(ctext, "C = ");
this.printBuf(client_write_key, "client_AES_key = ");
this.printBuf(iv.ToArray(), "iv = ");
Console.WriteLine("Auth Tag just after GHASH: {0}", auth_tag);
AesCryptoServiceProvider aes2 = new AesCryptoServiceProvider();
aes2.Key = client_write_key;
aes2.Mode = CipherMode.ECB;
aes2.Padding = PaddingMode.None;
aes2.KeySize = 128;
ICryptoTransform transform1 = aes2.CreateEncryptor();
byte[] cval = {0, 0, 0, 1};
temp.Clear();
temp.TrimExcess();
temp.AddRange(iv);
temp.AddRange(cval);
byte[] encrypted1 = AES_Encrypt_ECB(temp.ToArray(), client_write_key, 128);
Array.Reverse(encrypted1);
BigInteger nenc = new BigInteger(encrypted1);
if (nenc < 0)
{
temp.Clear();
temp.TrimExcess();
temp.AddRange(nenc.ToByteArray());
temp.Add(0);
nenc = new BigInteger(temp.ToArray());
}
this.printBuf(nenc.ToByteArray(), "NENC = ");
Console.WriteLine("NENC: {0}", nenc);
auth_tag ^= nenc;
byte[] auth_tag_array = auth_tag.ToByteArray();
Array.Reverse(auth_tag_array);
this.printBuf(auth_tag_array, "Final Auth Tag Byte Array: ");
Console.WriteLine("Final Auth Tag: {0}", auth_tag);
this.printBuf(random, "Random sent = ");
temp.Clear();
temp.TrimExcess();
temp.AddRange(random);
temp.AddRange(ctext);
temp.AddRange(auth_tag_array);
return temp.ToArray();
}
public void printBuf(byte[] data, String heading)
{
int numBytes = 0;
Console.Write(heading + "\"");
if (data == null)
{
return;
}
foreach (byte element in data)
{
Console.Write("\\x{0}", element.ToString("X2"));
numBytes = numBytes + 1;
if (numBytes == 32)
{
Console.Write("\r\n");
numBytes = 0;
}
}
Console.Write("\"\r\n");
}
public BigInteger GF_mult(BigInteger x, BigInteger y)
{
BigInteger product = new BigInteger(0);
BigInteger e10 = BigInteger.Parse("00E1000000000000000000000000000000", NumberStyles.AllowHexSpecifier);
/*
* Below operation y >> i fails if i is UInt32, so leaving it as int
*
*/
int i = 127;
while (i != -1)
{
product = product ^ (x * ((y >> i) & 1));
x = (x >> 1) ^ ((x & 1) * e10);
i = i - 1;
}
return product;
}
public BigInteger H_mult(BigInteger H, BigInteger val)
{
BigInteger product = new BigInteger(0);
int i = 0;
/*
* Below operation (val & 0xFF) << (8 * i) fails if i is UInt32, so leaving it as int
*
*/
while (i < 16)
{
product = product ^ GF_mult(H, (val & 0xFF) << (8 * i));
val = val >> 8;
i = i + 1;
}
return product;
}
public BigInteger GHASH(BigInteger H, byte[] A, byte[] C)
{
int C_len = C.Length;
List <byte> temp = new List<byte>();
int plen = 16 - (A.Length % 16);
byte[] zeroes = new byte[plen];
Array.Clear(zeroes, 0, zeroes.Length);
temp.AddRange(A);
temp.AddRange(zeroes);
temp.Reverse();
BigInteger A_padded = new BigInteger(temp.ToArray());
temp.Clear();
temp.TrimExcess();
byte[] C1;
if ((C_len % 16) != 0)
{
plen = 16 - (C_len % 16);
byte[] zeroes1 = new byte[plen];
Array.Clear(zeroes, 0, zeroes.Length);
temp.AddRange(C);
temp.AddRange(zeroes1);
C1 = temp.ToArray();
}
else
{
C1 = new byte[C.Length];
Array.Copy(C, 0, C1, 0, C.Length);
}
temp.Clear();
temp.TrimExcess();
BigInteger tag = new BigInteger();
tag = H_mult(H, A_padded);
this.printBuf(H.ToByteArray(), "H Byte Array:");
for (int i = 0; i < (int) (C1.Length / 16); i ++)
{
byte[] toTake;
if (i == 0)
{
toTake = C1.Take(16).ToArray();
}
else
{
toTake = C1.Skip(i * 16).Take(16).ToArray();
}
Array.Reverse(toTake);
BigInteger tempNum = new BigInteger(toTake);
tag ^= tempNum;
tag = H_mult(H, tag);
}
byte[] A_arr = BitConverter.GetBytes((long) (8 * A.Length));
/*
* Want length to be "00 00 00 00 00 00 00 xy" format
*
*/
Array.Reverse(A_arr);
byte[] C_arr = BitConverter.GetBytes((long) (8 * C_len));
/*
* Want length to be "00 00 00 00 00 00 00 xy" format
*
*/
Array.Reverse(C_arr);
temp.AddRange(A_arr);
temp.AddRange(C_arr);
temp.Reverse();
BigInteger array_int = new BigInteger(temp.ToArray());
tag = tag ^ array_int;
tag = H_mult(H, tag);
return tag;
}
Using SSL decryption in wireshark (using private key), I found that:
The nonce calculated by the C# code is same as that in wireshark (fixed part is client_write_IV and variable part is 8 bytes random)
The value of AAD (auth_data above) (client_write_key, seqNum + ctype + tls_version + plaintext_size) is matching with wireshark value
Cipher text (ctext above) (the C in GHASH(H, A, C)), is also matching the wireshark calculated value
However, the auth_tag calculation (GHASH(H_client, auth_data, ctext)) is failing. It would be great if someone could guide me as to what could be wrong in GHASH function. I just did a basic comparison of results of GF_mult function in python and C#, but the results are not matching too
This is not a final solution, but just an advice. I have seen you are using a lot the function BitConverter.GetBytes, int instead of Int32 or Int16.
The remarks from the official documentation says:
The order of bytes in the array returned by the GetBytes method
depends on whether the computer architecture is little-endian or
big-endian.
As for when you are using the BigInteger structure, it seems to be expecting always the little-endian order:
value
Type: System.Byte[]
An array of byte values in little-endian order.
Prefer using the Int32 and Int16 and pay attention to the order of the bytes before using it on these calculations.
Use log4net to log all the operations. Would be nice to put the same logs in the python program so that you could compare then at once, and check exactly where the calculations change.
Hope this give some tips on where to start.
I want to write a .net based solution to the Cicdia 2012 triange puzzle.
http://uncovering-cicada.wikia.com/wiki/The_Triangle_Puzzle
Unfortunately, I keep getting the error "bad data" when creating an RSA key with the prime numbers from the solution:
RSACryptoServiceProvider RSA = new RSACryptoServiceProvider();
RSAParameters RSAKeyInfo = new RSAParameters();
RSAKeyInfo.Exponent = BigInteger.Parse("65537").ToByteArray();
RSAKeyInfo.P = BigInteger.Parse("99554414790940424414351515490472769096534141749790794321708050837").ToByteArray();
RSAKeyInfo.Q = BigInteger.Parse("104593961812606247801193807142122161186583731774511103180935025763").ToByteArray();
RSA.ImportParameters(RSAKeyInfo);
I understand the basics of asymetric encryption, but I cant understand why I cant create my own key with small primes... unless the RSACryptoServiceProvider has some kind of "strength checker" or something.
Here is an some example code illustrating my comments.
public static void Main(string[] args)
{
RSAParameters RSAKeyInfo = new RSAParameters();
BigInteger p = BigInteger.Parse("99554414790940424414351515490472769096534141749790794321708050837");
BigInteger q = BigInteger.Parse("104593961812606247801193807142122161186583731774511103180935025763");
BigInteger modulus = p * q;
BigInteger e = BigInteger.Parse("65537");
BigInteger d = BigInteger.Parse("3198894071003639550820071093788580812499328515050919260466968671765341413862337988421155590663267840745788239672194253184260553629");
BigInteger dp = d % (p - 1);
BigInteger dq = d % (q - 1);
BigInteger inverseQ = BigInteger.ModPow(q, p - 2, p);
RSAKeyInfo.Modulus = SwapEndian(modulus.ToByteArray());
RSAKeyInfo.Exponent = SwapEndian(e.ToByteArray());
RSAKeyInfo.P = SwapEndian(p.ToByteArray());
RSAKeyInfo.Q = SwapEndian(q.ToByteArray());
RSAKeyInfo.D = SwapEndian(d.ToByteArray());
RSAKeyInfo.DP = SwapEndian(dp.ToByteArray());
RSAKeyInfo.DQ = SwapEndian(dq.ToByteArray());
RSAKeyInfo.InverseQ = SwapEndian(inverseQ.ToByteArray());
RSA rsa = RSA.Create();
rsa.ImportParameters(RSAKeyInfo);
Console.WriteLine("Hello World!");
}
private static byte[] SwapEndian(byte[] v0)
{
byte[] v = (byte[])v0.Clone();
int i = 0;
int j = v.Length - 1;
while (i < j)
{
byte t = v[i];
v[i] = v[j];
v[j] = t;
i++;
j--;
}
return v;
}
Is there a existing method within .NET Framework (C#) to generate a 40 char (public?) fingerprint as shown below, when you have P, Q, G, Y and X?
Or would anybody know on how to achieve this?
Fingerprint: 81F68001 29D928AD BEE41B78 AA862106 CAEAC892
EDIT:
here is an example of what i'm trying to do:
string P = "00F35DBCD6D4C296D2FE9118B659D02608B76FAC94BB58B10283F20390E2B259BAC602466162E9EF3E6A1590702CAE49B681A75A878E266F1AFAE0FA89DA5CA44A1551B517A3F80A9D6C630F9E7D239B437F7402DF8055069735894CD9D4708F8777B5E4F3E6A8B2D4EEE50DB2C96BA16D3C81FEB923697D649A8B7771B10E5B3F";
string Q = "00B5AF039839043410E04C35BDDB30679969EBAC8B";
string G = "00F300A68E54DE33A09001E28EC09F2ABF5DAF208774F2514D878D5587D870C91C6DE42B4705078C6F4438765050039C2950B6DE85AFC0D12A7A5C521782CB760918DF68F385A7F177DF50AA6BA0284090454106E422FCAE5390ADC00B859A433430019E970BFA614374DE1FB40C600345EF19DC01A122E4676C614DC29D3DC2FE";
string Y = "00A5317849AF22BA6498F1EF973158C8BDA848BEB074CB141E629C927B18F29C8CE99815001BAAB2931F339B5C52A79BC3DCB0C5962C302707BA6FF1807EEB91D751BA723BB7512C20689AC5E67A1B656CDFD1BA2D4F6A44308509486AA8754B47784FC4C03E546897200388656BA5834A2CC0E18E58454FF60C1BA5411D6F50FD";
i'm missing the code for this intermediate piece. how do i convert P, Q, G, Y into the fingerprint. I tried different approaches, but i'm unsuccessful generating the fingerprint i see in the application that i'm trying to recreate.
/* convert public key (bigIntKey) into fingerprint */
var bigIntHash = new BigInteger(SHA1.Create().ComputeHash(key.ToByteArray()));
byte[] hash = bigIntHash.ToByteArray();
if (hash.Length != 20)
{
throw new IndexOutOfRangeException();
}
for (int i = 0; i < 5; i++)
{
int lf = BitConverter.ToInt32(hash, i * 4);
Debug.Write(lf.ToString("X") + " ");
}
EDIT2:
i tried this, but that is not working
// switch P, Q, G, Y and separately to make it work.
byte[] pArr = StringToByteArray(P);
pArr = Tools.Endian.ReverseBytes(pArr);
byte[] qArr = StringToByteArray(Q);
qArr = Tools.Endian.ReverseBytes(qArr);
byte[] gArr = StringToByteArray(G);
gArr = Tools.Endian.ReverseBytes(gArr);
byte[] yArr = StringToByteArray(Y);
yArr = Tools.Endian.ReverseBytes(yArr);
byte[] xArr = StringToByteArray(X);
xArr = Tools.Endian.ReverseBytes(xArr);
byte[] arr = Combine(pArr, qArr, gArr, yArr);
DSACryptoServiceProvider dsa = new DSACryptoServiceProvider();
DSAParameters par = new DSAParameters();
par.P = pArr;
par.Q = qArr;
par.G = gArr;
par.Y = yArr;
par.X = xArr;
dsa.ImportParameters(par);
var xml = dsa.ToXmlString(true);
It will fail on the ImportParameter.
Thank you
You need to follow the OTR spec, which says the components of the key use MPI encoding, which it specifies as the length (32 bit big-endian) followed by the integer (big-endian, no leading zeros)
void Main()
{
string P = "00F35DBCD6D4C296D2FE9118B659D02608B76FAC94BB58B10283F20390E2B259BAC602466162E9EF3E6A1590702CAE49B681A75A878E266F1AFAE0FA89DA5CA44A1551B517A3F80A9D6C630F9E7D239B437F7402DF8055069735894CD9D4708F8777B5E4F3E6A8B2D4EEE50DB2C96BA16D3C81FEB923697D649A8B7771B10E5B3F";
string Q = "00B5AF039839043410E04C35BDDB30679969EBAC8B";
string G = "00F300A68E54DE33A09001E28EC09F2ABF5DAF208774F2514D878D5587D870C91C6DE42B4705078C6F4438765050039C2950B6DE85AFC0D12A7A5C521782CB760918DF68F385A7F177DF50AA6BA0284090454106E422FCAE5390ADC00B859A433430019E970BFA614374DE1FB40C600345EF19DC01A122E4676C614DC29D3DC2FE";
string Y = "00A5317849AF22BA6498F1EF973158C8BDA848BEB074CB141E629C927B18F29C8CE99815001BAAB2931F339B5C52A79BC3DCB0C5962C302707BA6FF1807EEB91D751BA723BB7512C20689AC5E67A1B656CDFD1BA2D4F6A44308509486AA8754B47784FC4C03E546897200388656BA5834A2CC0E18E58454FF60C1BA5411D6F50FD";
var publicKey =
ToMPI(HexToBytes(P))
.Concat(ToMPI(HexToBytes(Q)))
.Concat(ToMPI(HexToBytes(G)))
.Concat(ToMPI(HexToBytes(Y)))
.ToArray();
var fingerprint=BitConverter.ToString(SHA1.Create().ComputeHash(publicKey)).Replace("-","");
fingerprint.Dump();
}
byte[] ToMPI(byte[] data)
{
//Truncate leading 0 bytes
data = data.SkipWhile(b=>b==0).ToArray();
//Length prefix - 32 bit big-endian integer
var lenBytes=new byte[4];
lenBytes[0]=(byte)(data.Length>>24);
lenBytes[1]=(byte)(data.Length>>16);
lenBytes[2]=(byte)(data.Length>>8);
lenBytes[3]=(byte)(data.Length>>0);
return lenBytes.Concat(data).ToArray();
}
// from http://stackoverflow.com/questions/311165/how-do-you-convert-byte-array-to-hexadecimal-string-and-vice-versa
public static byte[] HexToBytes(String hex)
{
int NumberChars = hex.Length;
byte[] bytes = new byte[NumberChars / 2];
for (int i = 0; i < NumberChars; i += 2)
bytes[i / 2] = Convert.ToByte(hex.Substring(i, 2), 16);
return bytes;
}
MSDN mentions what you need to do your job on this page.
Then, have a look at this answer from this SO page. The accepted answer gives the following code (and I quote):
var dsa = new DSACryptoServiceProvider();
var privateKey = dsa.ExportParameters(true); // private key
var publicKey = dsa.ExportParameters(false); // public key
I think you have everything you need to get you going.
CHEERS!
I've used this class to generate a OTR DSA key:
https://github.com/mohamedmansour/OTRLib/blob/master/Src/OTR/OTRUtilities/DSASigner.cs
Make the class public and call without constructor parameters.
var signer = new DSASigner();
var _des_key_object = signer.GetDSAKeyParameters();
Later reuse of the same key:
string _dsa_key_1_p = _des_key_object.GetHexParamP();
string _dsa_key_1_q = _des_key_object.GetHexParamQ();
string _dsa_key_1_g = _des_key_object.GetHexParamG();
string _dsa_key_1_x = _des_key_object.GetHexParamX();
// This can be a JSON for storing.
var keysArray = new string[] { _dsa_key_1_p, _dsa_key_1_q, _dsa_key_1_g, _dsa_key_1_x };
_des_key_object = new DSAKeyParams(_des_key_objectJson[0], _des_key_objectJson[1], _des_key_objectJson[2], _des_key_objectJson[3]);