Compression Performance Difference between swift and Managed(c#) - c#

I'm implementing LZF compress in managed memory environment and decompress from ios environment.
So this is my code implement lzf decompress like this
in c#
private static int LZFDecompress(byte[] input, byte[] output)
{
int inputLength = input.Length;
int outputLength = output.Length;
uint iidx = 0;
uint oidx = 0;
do
{
uint ctrl = input[iidx++];
if (ctrl < (1 << 5)) /* literal run */
{
ctrl++;
if (oidx + ctrl > outputLength)
{
//SET_ERRNO (E2BIG);
return 0;
}
do
output[oidx++] = input[iidx++];
while ((--ctrl) != 0);
}
else /* back reference */
{
uint len = ctrl >> 5;
int reference = (int)(oidx - ((ctrl & 0x1f) << 8) - 1);
if (len == 7)
len += input[iidx++];
reference -= input[iidx++];
if (oidx + len + 2 > outputLength)
{
//SET_ERRNO (E2BIG);
return 0;
}
if (reference < 0)
{
//SET_ERRNO (EINVAL);
return 0;
}
output[oidx++] = output[reference++];
output[oidx++] = output[reference++];
do
output[oidx++] = output[reference++];
while ((--len) != 0);
}
}
while (iidx < inputLength);
return (int)oidx;
}
and porting to swift like this
private static func LZFDecompress(input: [UInt8],
output: inout [UInt8])
-> Int
{
let inputLength = input.count
let outputLength = output.count
var iidx = 0
var oidx = 0
repeat
{
var ctrl = Int(input[iidx])
iidx += 1
if ctrl < (1 << 5)
{
ctrl += 1
if oidx + ctrl > outputLength
{
return 0
}
repeat
{
output[oidx] = input[iidx]
oidx += 1
iidx += 1
ctrl -= 1
}
while ctrl != 0
}
else
{
var len = ctrl >> 5
var reference = oidx - ((ctrl & 0x1f) << 8) - 1
if len == 7
{
len += Int(input[iidx])
iidx += 1
}
reference -= Int(input[iidx])
iidx += 1
if oidx + len + 2 > outputLength
{
return 0
}
if reference < 0
{
return 0
}
output[oidx] = output[reference]
oidx += 1
reference += 1
output[oidx] = output[reference]
oidx += 1
reference += 1
repeat
{
output[oidx] = output[reference]
oidx += 1
reference += 1
len -= 1
}
while len != 0
}
}
while iidx < inputLength
return oidx
}
But I have a problem, it is a performance difference.
It costs 2-3 seconds in c# but costs 9-10 seconds in swift to decompress same files...
I can't understand this situation.
I tested c# from console in windows.
And I tested swift from playground or project in mac.

It was not efficient code unconsidered SIMD and CPUs.
So I used decompress method (lz4, zlib) is provided by apple.
It's so faster than before. It costs below 1-second decompressing 200Mb file.
But In a managed environment (c#) it's slower than unmanaged.
If you want more performance, implement native.
I use these lzib managed codes.
https://github.com/jstedfast/Ionic.Zlib
https://github.com/Kulestar/unity-zlib (unity version, dotnet-mono)
It costs 6-7 seconds for decompressing and 30 seconds for compressing at the same file.
And then you should know this code to be compatible to lzip in apple.
It includes adding the header for compressed data.
public static byte[] Compress(byte[] inputData)
{
var zlib = new ZlibCodec(CompressionMode.Compress);
zlib.CompressLevel = Zlib.CompressionLevel.AppleSupported; // Level5
zlib.InputBuffer = inputData;
zlib.OutputBuffer = new byte[inputData.Length];
zlib.NextIn = 0;
zlib.AvailableBytesIn = inputData.Length;
zlib.NextOut = 0;
zlib.AvailableBytesOut = inputData.Length;
zlib.InitializeDeflate(Zlib.CompressionLevel.AppleSupported, false);
// 'false' means it's 1951(deflate) version not 1950(lzib) version
zlib.Deflate(FlushType.Finish);
var output = new byte[zlib.TotalBytesOut];
Array.Copy(zlib.OutputBuffer, output, (int)zlib.TotalBytesOut);
return output;
}
public static byte[] Decompress(byte[] inputData, int outputSize)
{
var zlib = new ZlibCodec(CompressionMode.Decompress);
zlib.CompressLevel = Zlib.CompressionLevel.AppleSupported;
zlib.InputBuffer = inputData;
zlib.OutputBuffer = new byte[outputSize];
zlib.NextIn = 0;
zlib.AvailableBytesIn = inputData.Length;
zlib.NextOut = 0;
zlib.AvailableBytesOut = outputSize;
zlib.InitializeInflate(false);
zlib.Inflate(FlushType.Finish);
var output = new byte[zlib.TotalBytesOut];
Array.Copy(zlib.OutputBuffer, output, (int)zlib.TotalBytesOut);
return output;
}
I wish to help the same person like me who implement multi-platform compressing.

Related

Xamarin.Android wav to m4a

I have found the answer for Java: https://stackoverflow.com/a/36357819/202179 and tried to port it to Xamarin.
Here is the code that I've made:
const string COMPRESSED_AUDIO_FILE_MIME_TYPE = "audio/mp4a-latm";
const int COMPRESSED_AUDIO_FILE_BIT_RATE = 64000; // 64kbps
const int SAMPLING_RATE = 48000;
const int BUFFER_SIZE = 48000;
const int CODEC_TIMEOUT_IN_MS = 5000;
void Compress()
{
var inputFile = new Java.IO.File(tempFileWavPath);
var fis = new Java.IO.FileInputStream(inputFile);
var outputFile = new Java.IO.File(fileM4APath);
if (outputFile.Exists())
outputFile.Delete();
var mux = new MediaMuxer(outputFile.AbsolutePath, MuxerOutputType.Mpeg4);
MediaFormat outputFormat = MediaFormat.CreateAudioFormat(COMPRESSED_AUDIO_FILE_MIME_TYPE, SAMPLING_RATE, 1);
outputFormat.SetInteger(MediaFormat.KeyAacProfile, (int)MediaCodecProfileType.Aacobjectlc);
outputFormat.SetInteger(MediaFormat.KeyBitRate, COMPRESSED_AUDIO_FILE_BIT_RATE);
outputFormat.SetInteger(MediaFormat.KeyMaxInputSize, 16384);
MediaCodec codec = MediaCodec.CreateEncoderByType(COMPRESSED_AUDIO_FILE_MIME_TYPE);
codec.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode);
codec.Start();
MediaCodec.BufferInfo outBuffInfo = new MediaCodec.BufferInfo();
byte[] tempBuffer = new byte[BUFFER_SIZE];
var hasMoreData = true;
double presentationTimeUs = 0;
int audioTrackIdx = 0;
int totalBytesRead = 0;
int percentComplete = 0;
do
{
int inputBufIndex = 0;
while (inputBufIndex != -1 && hasMoreData)
{
inputBufIndex = codec.DequeueInputBuffer(CODEC_TIMEOUT_IN_MS);
if (inputBufIndex >= 0)
{
var dstBuf = codec.GetInputBuffer(inputBufIndex);
dstBuf.Clear();
int bytesRead = fis.Read(tempBuffer, 0, dstBuf.Limit());
if (bytesRead == -1)
{ // -1 implies EOS
hasMoreData = false;
codec.QueueInputBuffer(inputBufIndex, 0, 0, (long)presentationTimeUs, MediaCodecBufferFlags.EndOfStream);
}
else
{
totalBytesRead += bytesRead;
dstBuf.Put(tempBuffer, 0, bytesRead);
codec.QueueInputBuffer(inputBufIndex, 0, bytesRead, (long)presentationTimeUs, 0);
presentationTimeUs = 1000000l * (totalBytesRead / 2) / SAMPLING_RATE;
}
}
}
// Drain audio
int outputBufIndex = 0;
while (outputBufIndex != (int)MediaCodecInfoState.TryAgainLater)
{
outputBufIndex = codec.DequeueOutputBuffer(outBuffInfo, CODEC_TIMEOUT_IN_MS);
if (outputBufIndex >= 0)
{
var encodedData = codec.GetOutputBuffer(outputBufIndex);
encodedData.Position(outBuffInfo.Offset);
encodedData.Limit(outBuffInfo.Offset + outBuffInfo.Size);
if ((outBuffInfo.Flags & MediaCodecBufferFlags.CodecConfig) != 0 && outBuffInfo.Size != 0)
{
codec.ReleaseOutputBuffer(outputBufIndex, false);
}
else
{
mux.WriteSampleData(audioTrackIdx, encodedData, outBuffInfo);
codec.ReleaseOutputBuffer(outputBufIndex, false);
}
}
else if (outputBufIndex == (int)MediaCodecInfoState.OutputFormatChanged)
{
outputFormat = codec.OutputFormat;
audioTrackIdx = mux.AddTrack(outputFormat);
mux.Start();
}
}
percentComplete = (int)Math.Round(((float)totalBytesRead / (float)inputFile.Length()) * 100.0);
} while (outBuffInfo.Flags != MediaCodecBufferFlags.EndOfStream);
fis.Close();
mux.Stop();
mux.Release();
}
This almost works as it converts the file, but the resulting file appears to be encoded too fast - the pitch is too high and speed is too high and the reproduction lasts shorter than expected.
It is likely that just some slight change is needed, but I am not sure what. Can anyone suggest?
I could reproduce the resulting file appears to be encoded too fast when i use the different size of SAMPLING_RATE.
For example, i download a wav file online. The Sampline Rate is 11025. If i use the original rate 48000 in the code, it would play too fast. When i use 11025, it would work.
So we need to know the Sampling Rate of the wav fille and then set it in the code.
const int SAMPLING_RATE = 11025;//44100, 48000

Problem with manual code migration from C# to C

I am trying to migrate a code written in C# to C for better performance and to be able to use it in another software as it supports C based DLL.
I have this function in C# which performs as expected
private byte[] authDataExtract(byte[] concatenatedData)
{
try
{
byte[] authData = null;
authData = new byte[concatenatedData.Length - 10];
int blockCount = 0;
while (true)
{
if (concatenatedData.Length - 10 - blockCount < 4)
break;
else if (concatenatedData.Length - 10 - blockCount >= 4)
{
if ((isAllZero(concatenatedData, blockCount) == true) || (isAllff(concatenatedData, blockCount) == true)) //Modified to handle 0xFF padding - Sudhanwa
break;
int dlc = int.Parse(concatenatedData[blockCount + 3].ToString("X2"), System.Globalization.NumberStyles.HexNumber); //Modified to handle exceptiion in case of Padding CR - Sudhanwa
//int dlc = int.Parse(bytetostring(concatenatedData[blockCount + 3]));
if ((dlc > concatenatedData.Length - 10 - blockCount))
{
authData = new byte[concatenatedData.Length - 10];
Buffer.BlockCopy(concatenatedData, 0, authData, 0, concatenatedData.Length - 10);
blockCount = concatenatedData.Length - 10;
break;
}
authData = new byte[blockCount + 4 + dlc];
Buffer.BlockCopy(concatenatedData, 0, authData, 0, blockCount + 4 + dlc);
blockCount += dlc + 4;
}
}
return authData;
}
catch (Exception)
{
throw;
}
}
I want to write equivalent C code for this
My current C code is
void authDataExtract(unsigned char payload [],unsigned int size_payload,unsigned char * arr)
{
//unsigned char rec_tMAC [8];
int blockcount=0;
int dlc=0;
//unsigned char* arr= NULL;
//memcpy(&rec_tMAC[0],&payload[size_payload-8],8);
//printArr(rec_tMAC,8);
while (1)
{
if (size_payload- 10 - blockcount < 4)
break;
else if (size_payload - 10 - blockcount >= 4)
{
if ((isAllZero(payload,size_payload,blockcount) == true) ||
(isAllff(payload,size_payload, blockcount) == true))
break;
dlc= payload[blockcount + 3];
if ((dlc > size_payload - 10 - blockcount))
{
arr = (unsigned char*)calloc(size_payload-10,sizeof(unsigned char));
memcpy(arr,payload,size_payload-10);
blockcount = size_payload - 10;
break;
}
arr = (unsigned char*)calloc(blockcount + 4 + dlc,sizeof(unsigned char));
memcpy(arr,payload,blockcount + 4 + dlc);
blockcount += dlc + 4;
}
}
}
But it is giving exceptions with pointer .I believe I have an issue
with dynamic memory allocation.
Assuming the logic inc C# code is correct ,request your help to have exact same logic with C function.
Thanks in advance.
Do you see that C# function is returning byte[]
private byte[] authDataExtract(byte[] concatenatedData)
But C function is not.
void authDataExtract(unsigned char payload [],unsigned int size_payload,unsigned char * arr)
Note that arr is new variable and it is local to authDataExtract
function. It has no effect on the caller function.
Try as below.
unsigned char* authDataExtract(unsigned char payload [],unsigned int size_payload,unsigned char * arr) {
while(1) {
...
}
return arr;
}
from main
unsigned char *p = authDataExtract(….);
if (!p) error;
You could also use pointer to pointer but I leave that to you.

Optimize summing 2 arrays of bytes

I am iterating through an array of bytes and add values of another array of bytes in a for loop.
var random = new Random();
byte[] bytes = new byte[20_000_000];
byte[] bytes2 = new byte[20_000_000];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = (byte)random.Next(255);
}
for (int i = 0; i < bytes.Length; i++)
{
bytes2[i] = (byte)random.Next(255);
}
//how to optimize the part below
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] += bytes2[i];
}
Is there any way to speed up the process, so it can be faster than linear.
You could use Vector:
static void Add(Span<byte> dst, ReadOnlySpan<byte> src)
{
Span<Vector<byte>> dstVec = MemoryMarshal.Cast<byte, Vector<byte>>(dst);
ReadOnlySpan<Vector<byte>> srcVec = MemoryMarshal.Cast<byte, Vector<byte>>(src);
for (int i = 0; i < dstVec.Length; ++i)
{
dstVec[i] += srcVec[i];
}
for (int i = dstVec.Length * Vector<byte>.Count; i < dst.Length; ++i)
{
dst[i] += src[i];
}
}
Will go even faster if you use a pointer here to align one of your arrays.
Pad the array length to the next highest multiple of 8.(It already is in your example.)
Use an unsafe context to create two ulong arrays pointing to the start of the existing byte arrays. Use a for loop to iterate bytes.Length / 8 times adding 8 bytes at a time.
On my system this runs for less than 13 milliseconds. Compared to 105 milliseconds for the original code.
You must add the /unsafe option to use this code. Open the project properties and select "allow unsafe code".
var random = new Random();
byte[] bytes = new byte[20_000_000];
byte[] bytes2 = new byte[20_000_000];
int Len = bytes.Length >> 3; // >>3 is the same as / 8
ulong MASK = 0x8080808080808080;
ulong MASKINV = 0x7f7f7f7f7f7f7f7f;
//Sanity check
if((bytes.Length & 7) != 0) throw new Exception("bytes.Length is not a multiple of 8");
if((bytes2.Length & 7) != 0) throw new Exception("bytes2.Length is not a multiple of 8");
unsafe
{
//Add 8 bytes at a time, taking into account overflow between bytes
fixed (byte* pbBytes = &bytes[0])
fixed (byte* pbBytes2 = &bytes2[0])
{
ulong* pBytes = (ulong*)pbBytes;
ulong* pBytes2 = (ulong*)pbBytes2;
for (int i = 0; i < Len; i++)
{
pBytes[i] = ((pBytes2[i] & MASKINV) + (pBytes[i] & MASKINV)) ^ ((pBytes[i] ^ pBytes2[i]) & MASK);
}
}
}
You can utilize all your processors/cores, assuming that your machine has more than one.
Parallel.ForEach(Partitioner.Create(0, bytes.Length), range =>
{
for (int i = range.Item1; i < range.Item2; i++)
{
bytes[i] += bytes2[i];
}
});
Update: The Vector<T> class can also be used in .NET Framework. It requires the package System.Numerics.Vectors. It offers the advantage of parallelization in a single core, by issuing a Single Instruction to Multiple Data (SIMD). Most current processors are SIMD-enabled. It is only enabled for 64-bit processes, so the flag [Prefer 32-bit] must be unchecked. On 32-bit processes the property Vector.IsHardwareAccelerated returns false, and the performance is bad.
using System.Numerics;
/// <summary>Adds each pair of elements in two arrays, and replaces the
/// left array element with the result.</summary>
public static void Add_UsingVector(byte[] left, byte[] right, int start, int length)
{
int i = start;
int step = Vector<byte>.Count; // the step is 16
int end = start + length - step + 1;
for (; i < end; i += step)
{
// Vectorize 16 bytes from each array
var vector1 = new Vector<byte>(left, i);
var vector2 = new Vector<byte>(right, i);
vector1 += vector2; // Vector arithmetic is unchecked only
vector1.CopyTo(left, i);
}
for (; i < start + length; i++) // Process the last few elements
{
unchecked { left[i] += right[i]; }
}
}
This runs 4-5 times faster than a simple loop, without utilizing more than one thread (25% CPU consumption in a 4-core PC).

Errors on hex characters and hash

I have a project that is almost near completion aside from a couple of stubborn but probably simple error Im receiving. I am by know means knowledgeable of C and me getting this project this far is a miracle. Im hoping someone can detect what it is I am missing in my code. Here is the view of the errors and below is the code.
private void button1_Click(object sender, EventArgs e)
{
Random rnd = new Random();
StringBuilder bin = new StringBuilder();
int buf = 0;
int bufLen = 0;
int left = 53;
for (int i = 106; i >= 1; i += -1)
{
buf <<= 1;
if (rnd.Next(i) < left)
{
buf += 1;
left -= 1;
}
bufLen += 1;
if (bufLen == 4)
{
bin.Append("0123456789ABCDEF"(buf));
bufLen = 0;
buf = 0;
}
}
string b = bin.ToString();
bin.Append("048c"(buf));
System.Security.Cryptography.SHA1Managed m = new System.Security.Cryptography.SHA1Managed();
byte[] hash = m.ComputeHash(Encoding.UTF8.GetBytes(b));
//replace first two bits in hash with bits from buf
hash(0) = Convert.ToByte(hash(0) & 0x3f | (buf * 64));
//append 24 bits from hash
b = b.Substring(0, 26) + BitConverter.ToString(hash, 0, 3).Replace("-", string.Empty);
}
}
}
x(y) means "call x with y as a parameter".
You have written "0123456789ABCDEF"(buf). "0123456789ABCDEF" isn't a function (or a functor) so you can't call it.
Perhaps you meant to index it, with "0123456789ABCDEF"[buf]? This returns the buf'th character from "0123456789ABCDEF", which is buf in hexadecimal as long as buf is between 0 and 15.
You can't concatenate a string literal with a string variable.
#include <iostream>
using std::cout;
void concatenate(const std::string& s)
{
cout << "In concatenate, string passed is: "
<< s
<< "\n";
}
int main(void)
{
std::string world = " World!\n";
concatenate("Hello"(world));
return 0;
}
Thomas#HastaLaVista ~/concatenation
# g++ -o main.exe main.cpp
main.cpp: In function `int main()':
**main.cpp:15: error: `"Hello"' cannot be used as a function**
Thomas#HastaLaVista ~/concatenation
# g++ --version
g++ (GCC) 3.4.4 (cygming special, gdc 0.12, using dmd 0.125)
Copyright (C) 2004 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
You will need a temporary string variable:
if (bufLen == 4)
{
std::string temp("01234567890ABCDEF");
temp += buf;
bin.Append(temp);
bufLen = 0;
buf = 0;
}

Convert from BitArray to Byte

I have a BitArray with the length of 8, and I need a function to convert it to a byte. How to do it?
Specifically, I need a correct function of ConvertToByte:
BitArray bit = new BitArray(new bool[]
{
false, false, false, false,
false, false, false, true
});
//How to write ConvertToByte
byte myByte = ConvertToByte(bit);
var recoveredBit = new BitArray(new[] { myByte });
Assert.AreEqual(bit, recoveredBit);
This should work:
byte ConvertToByte(BitArray bits)
{
if (bits.Count != 8)
{
throw new ArgumentException("bits");
}
byte[] bytes = new byte[1];
bits.CopyTo(bytes, 0);
return bytes[0];
}
A bit late post, but this works for me:
public static byte[] BitArrayToByteArray(BitArray bits)
{
byte[] ret = new byte[(bits.Length - 1) / 8 + 1];
bits.CopyTo(ret, 0);
return ret;
}
Works with:
string text = "Test";
byte[] bytes = System.Text.Encoding.ASCII.GetBytes(text);
BitArray bits = new BitArray(bytes);
bytes[] bytesBack = BitArrayToByteArray(bits);
string textBack = System.Text.Encoding.ASCII.GetString(bytesBack);
// bytes == bytesBack
// text = textBack
.
A poor man's solution:
protected byte ConvertToByte(BitArray bits)
{
if (bits.Count != 8)
{
throw new ArgumentException("illegal number of bits");
}
byte b = 0;
if (bits.Get(7)) b++;
if (bits.Get(6)) b += 2;
if (bits.Get(5)) b += 4;
if (bits.Get(4)) b += 8;
if (bits.Get(3)) b += 16;
if (bits.Get(2)) b += 32;
if (bits.Get(1)) b += 64;
if (bits.Get(0)) b += 128;
return b;
}
Unfortunately, the BitArray class is partially implemented in .Net Core class (UWP). For example BitArray class is unable to call the CopyTo() and Count() methods. I wrote this extension to fill the gap:
public static IEnumerable<byte> ToBytes(this BitArray bits, bool MSB = false)
{
int bitCount = 7;
int outByte = 0;
foreach (bool bitValue in bits)
{
if (bitValue)
outByte |= MSB ? 1 << bitCount : 1 << (7 - bitCount);
if (bitCount == 0)
{
yield return (byte) outByte;
bitCount = 8;
outByte = 0;
}
bitCount--;
}
// Last partially decoded byte
if (bitCount < 7)
yield return (byte) outByte;
}
The method decodes the BitArray to a byte array using LSB (Less Significant Byte) logic. This is the same logic used by the BitArray class. Calling the method with the MSB parameter set on true will produce a MSB decoded byte sequence. In this case, remember that you maybe also need to reverse the final output byte collection.
This should do the trick. However the previous answer is quite likely the better option.
public byte ConvertToByte(BitArray bits)
{
if (bits.Count > 8)
throw new ArgumentException("ConvertToByte can only work with a BitArray containing a maximum of 8 values");
byte result = 0;
for (byte i = 0; i < bits.Count; i++)
{
if (bits[i])
result |= (byte)(1 << i);
}
return result;
}
In the example you posted the resulting byte will be 0x80. In other words the first value in the BitArray coresponds to the first bit in the returned byte.
That's should be the ultimate one. Works with any length of array.
private List<byte> BoolList2ByteList(List<bool> values)
{
List<byte> ret = new List<byte>();
int count = 0;
byte currentByte = 0;
foreach (bool b in values)
{
if (b) currentByte |= (byte)(1 << count);
count++;
if (count == 7) { ret.Add(currentByte); currentByte = 0; count = 0; };
}
if (count < 7) ret.Add(currentByte);
return ret;
}
In addition to #JonSkeet's answer you can use an Extension Method as below:
public static byte ToByte(this BitArray bits)
{
if (bits.Count != 8)
{
throw new ArgumentException("bits");
}
byte[] bytes = new byte[1];
bits.CopyTo(bytes, 0);
return bytes[0];
}
And use like:
BitArray foo = new BitArray(new bool[]
{
false, false, false, false,false, false, false, true
});
foo.ToByte();
byte GetByte(BitArray input)
{
int len = input.Length;
if (len > 8)
len = 8;
int output = 0;
for (int i = 0; i < len; i++)
if (input.Get(i))
output += (1 << (len - 1 - i)); //this part depends on your system (Big/Little)
//output += (1 << i); //depends on system
return (byte)output;
}
Cheers!
Little endian byte array converter : First bit (indexed with "0") in the BitArray
assumed to represents least significant bit (rightmost bit in the bit-octet) which interpreted as "zero" or "one" as binary.
public static class BitArrayExtender {
public static byte[] ToByteArray( this BitArray bits ) {
const int BYTE = 8;
int length = ( bits.Count / BYTE ) + ( (bits.Count % BYTE == 0) ? 0 : 1 );
var bytes = new byte[ length ];
for ( int i = 0; i < bits.Length; i++ ) {
int bitIndex = i % BYTE;
int byteIndex = i / BYTE;
int mask = (bits[ i ] ? 1 : 0) << bitIndex;
bytes[ byteIndex ] |= (byte)mask;
}//for
return bytes;
}//ToByteArray
}//class

Categories