In the below, I read the files from the Bluetooth device. Is there is any way to improve the code and speed?
using (FileStream fs = new FileStream(destination,
FileMode.OpenOrCreate,
FileAccess.ReadWrite))
{
byte[] firstByte = new byte[1] { (byte)readByte };
fs.Write(firstByte, 0, 1);
// Size is idenitifed. Read till that size.
for (int i = 1; i < fileSizeInBytes; i++)
{
readByte = 0;
byte[] fb = new byte[1];
if ((readByte = inStream.Read(fb, 0, 1)) > 0)
{
fs.Write(fb, 0, readByte);
}
}
}
Try using this - it creates a new byte array of a larger size and copies the elements from yours.
Array.Resize(ref readByte, 1024);
Related
I have been experiencing some issues with imcompatiabilities between the difference implementations of zlib compression.
As a test case I thought to create test data with 10000 doubles ranging from 0 - 10000.
I created some test code to compress and decompress this data that uses the compress and uncompress in zlib.c
unsigned int Test(char* comparisonFile)
{
unsigned long partsSize = 0x80000;
const int arraySize = 10000;
Bytef doubleArray[sizeof(double) * arraySize];
Bytef outBuffer[sizeof(double) * arraySize];
for (int i = 0; i < arraySize; i++)
{
Bytef doubleBytes[sizeof(double)];
*(double*)(doubleBytes) = (double)i;
for (int x = 0; x < 8; x++)
doubleArray[(8 * i) + x] = doubleBytes[x];
}
compress(outBuffer, &partsSize, doubleArray, sizeof(double) * arraySize);
//create file of compressed data
char * filename = "zlibCompressed";
FILE * file = fopen(filename, "w+b");
int compressResult = et_int64(fwrite((char *)outBuffer, 1, size_t(partsSize), file));
fclose(file);
//load file of compressed data either from zlib or other
if (comparisonFile != NULL)
filename = comparisonFile;
FILE * compressedFile = fopen(filename, "r+b");
if (compressedFile == NULL)
return -1;
unsigned long outBufferSize = sizeof(double) * arraySize;
fseek(compressedFile, 0, SEEK_END);
long fsize = ftell(compressedFile);
fseek(compressedFile, 0, SEEK_SET); /* same as rewind(f); */
partsSize = int(fsize);
double * doubleResult = new double [arraySize];
Bytef* inBuffer = (Bytef*)malloc(sizeof(Bytef)*partsSize);
int readresult = et_int64(fread((char *)inBuffer, 1, partsSize , compressedFile));
if (readresult != partsSize)
return -1;
Bytef * uncompressedOutBuffer = static_cast<Bytef*>((void *)doubleResult);
int result = uncompress(uncompressedOutBuffer, &outBufferSize, inBuffer, partsSize);
for (int i = 0; i < arraySize; i++)
{
// uncompressed data does not match expectation
if ((int)doubleResult[i] != i)
return -2;
}
fclose(compressedFile);
return 0;
}
This allows me to test the internal compression and also substitute compression results from C#.
However, when I use ionic or standard deflate in the following manner I can only recover about 8150 of the expected 10000.
The uncompression returns a:
Z_DATA_ERROR
Given that these seem in theory interoperable, i'm not sure why C# compression results can only be partially unpacked with Adler's zlib? Any help out there?
public void ZlibTest()
{
byte[] buffer;
using (var ms = new MemoryStream())
{
for (int i = 0; i < 10000; i++)
ms.Write(BitConverter.GetBytes((double) i), 0, sizeof(double));
buffer = ms.ToArray();
}
var file = "dummy1";
if (File.Exists(file))
File.Delete(file);
using (Stream fs = new FileStream(file, FileMode.OpenOrCreate, FileAccess.ReadWrite))
{
using (var resultStream = new MemoryStream())
{
using (var compressionStream2 = new Ionic.Zlib.ZlibStream(resultStream, Ionic.Zlib.CompressionMode.Compress, CompressionLevel.Default))
{
compressionStream2.Write(buffer, 0, buffer.Length);
var packetLength = (int)resultStream.Length;
fs.Write(resultStream.ToArray(), 0, packetLength);
}
}
}
file = "dummy2";
using (Stream fs = new FileStream(file, FileMode.OpenOrCreate, FileAccess.ReadWrite))
{
using (var resultStream = new MemoryStream())
{
using (var compressionStream2 = new System.IO.Compression.DeflateStream(resultStream, System.IO.Compression.CompressionMode.Compress))
{
compressionStream2.Write(buffer, 0, buffer.Length);
var packetLength = (int)resultStream.Length;
fs.Write(BitConverter.GetBytes((ushort)40056), 0, sizeof(ushort));
fs.Write(resultStream.ToArray(), 0, packetLength);
}
}
}
}
They're both using the same zlib. There must be an error in your C# code. It is possible that you are not reading the file in binary mode in C#, which would cause occasional corruption.
I transfer a video stream from one c# app to another. When I set the frame rate high I don't get the right data after some time.
//Receiver
Socket s;
IPAddress ipAd = IPAddress.Parse("127.0.0.1");
var ip = IPAddress.Any;
myList = new TcpListener(ipAd, 8001);
myList.Start();
s = myList.AcceptSocket();
while (runThread)
{
int buffertype = -1; MemoryStream img = null; int size = 0; int
pos = 0;
try
{
// length of frame
byte[] lengthb = new byte[32];
int k = s.Receive(lengthb, SocketFlags.None);
size = BitConverter.ToInt32(lengthb, 0);
pos = 0;
byte[] lengthframe = new byte[size];
// chunking of data
int chunksize = 100;
while (pos < size - chunksize)
{
int rec = s.Receive(lengthframe, pos, chunksize, SocketFlags.None);
pos += rec;
}
s.Receive(lengthframe, pos, size - pos, SocketFlags.None);
img = new MemoryStream(lengthframe);
}
}
// Other app: Sender (all is send)
MemoryStream ms = new MemoryStream();
img.CopyTo(ms);
byte[] ba = ms.ToArray();
var buffer = BitConverter.GetBytes(ba.Length);
stm.Write(buffer, 0, buffer.Length);
stm.Write(ba, 0, ba.Length);
How can I be safe that all packages are received?
sender:
stm.Write(buffer, 0, buffer.Length);
receiver:
byte[] lengthb = new byte[32];
int k = s.Receive(lengthb, SocketFlags.None);
are you sure buffer.Length type is 32 bytes long (256 bits)? I suppose it's rather 4 or 8 bytes long. So you're potentially receiving 32 bytes buffer which includes your length field in the beggining and the rest is a part of your encoded video stream. Then you throw away that part. Doesn't sound good.
I read binary file to hex by block.
It is diffrent when I use FileStream.Read and File.ReadAllBytes
FileSteram.Read
int limit = 0;
if (openFileDlg.FileName.Length > 0)
{
fileName = openFileDlg.FileName;
FileStream fs = new FileStream(fileName, FileMode.Open, FileAccess.Read);
fsLen = (int)fs.Length;
int count = 0;
limit = 100;
byte[] read_buff = new byte[limit];
StringBuilder sb = new StringBuilder();
while ( (count = fs.Read(read_buff, 0, limit)) > 0)
{
foreach (byte b in read_buff)
{
sb.Append(Convert.ToString(b, 16).PadLeft(2, '0'));
}
}
rtxb_bin.AppendText(sb.ToString() + "\n");
}
File.ReadAllBytes
if (openFileDlg.FileName.Length > 0)
{
fileName = openFileDlg.FileName;
byte[] fileBytes = File.ReadAllBytes(fileName);
StringBuilder sb2 = new StringBuilder();
foreach (byte b2 in fileBytes)
{
sb2.Append(Convert.ToString(b2, 16).PadLeft(2, '0'));
}
rtxb_allbin.AppendText(sb2.ToString());
}
case 1, reasult is ...
........04c0020f00452a00421346108129844f2138448500208020250405250043188510812e0
and case 2 is
.......04c0020f00452a00421346108129844f2138448500208020250405250043188510812e044f212cc48120c24125404f2069c2c0008bff35f8f401efbd17047
FileStream.Read doesn't read after '12e0'
'44f212cc48120c24125404f2069c2c0008bff35f8f401efbd17047' is missing
How can I read all bytes using FileStream.Read?
Why FileStream.Read doesn't read last block?
Most likely it appears to you that it does not read last block. Suppose you have file of length 102. First iteration of you loop reads first 100 bytes, all is fine. But what happens on second (last) one? You read two bytes into read_buff, which is of length 100. Now that buffer contains 2 bytes of last block and 98 bytes of previous (first) block, because Read doesn't clear the buffer. Then you proceed with:
foreach (byte b in read_buff)
{
sb.Append(Convert.ToString(b, 16).PadLeft(2, '0'));
}
In result, sb has 100 bytes of first block, 2 bytes of last block, and then again 98 bytes of first block. If you don't look too closely, it might appear that it just skipped last block, while in reality it duplicated part of the previous one.
To fix, use count (indicating how much bytes were really read into the buffer) to work only with valid part of read_buff:
for (int i = 0; i < count; i++) {
sb.Append(Convert.ToString(read_buff[i], 16).PadLeft(2, '0'));
}
You need update offset and count.
Sintaxis
public override int Read(
byte[] array,
int offset,
int count
)
Example
public static byte[] ReadFile(string filePath)
{
byte[] buffer;
FileStream fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
try
{
int length = (int)fileStream.Length; // get file length
buffer = new byte[length]; // create buffer
int count; // actual number of bytes read
int sum = 0; // total number of bytes read
// read until Read method returns 0 (end of the stream has been reached)
while ((count = fileStream.Read(buffer, sum, length - sum)) > 0)
sum += count; // sum is a buffer offset for next reading
}
finally
{
fileStream.Close();
}
return buffer;
}
Reference
public static void ReadAndProcessLargeFile(string theFilename, long whereToStartReading = 0)
{
FileInfo info = new FileInfo(theFilename);
long fileLength = info.Length;
long timesToRead = (fileLength / megabyte);
long ctr = 0;
long timesRead = 0;
FileStream fileStram = new FileStream(theFilename, FileMode.Open, FileAccess.Read);
using (fileStram)
{
byte[] buffer = new byte[megabyte];
fileStram.Seek(whereToStartReading, SeekOrigin.Begin);
int bytesRead = 0;
//bytesRead = fileStram.Read(buffer, 0, megabyte);
//ctr = ctr + 1;
while ((bytesRead = fileStram.Read(buffer, 0, megabyte)) > 0)
{
ProcessChunk(buffer, bytesRead);
buffer = new byte[megabyte]; // This solves last read prob
}
}
}
private static void ProcessChunk(byte[] buffer, int bytesRead)
{
// Do the processing here
string utfString = Encoding.UTF8.GetString(buffer, 0, bytesRead);
Console.Write(utfString);
}
I have a method to stream mp3 from url sources. In this method, the file begins downloading and at the same time the downloaded bytes are stored in a MemoryStream. But I realized that this method is not good. Because the RAM usage is approximately 50 mb when the mp3 file is being played.
So I want to make it without storing in MemoryStream. I tried storing the downloaded bytes in a temporary file, but it didn't worked. How to fix it to work with FileStream?
It works good using MemoryStream:
MemoryStream ms = new MemoryStream();
int bytesRead = 0;
long pos = 0;
int total = 0;
do
{
bytesRead = responseStream.Read(buffer, 0, buffer.Length);
Buffer.BlockCopy(buffer, 0, bigBuffer, total, bytesRead);
total += bytesRead;
pos = ms.Position;
ms = new MemoryStream(bigBuffer);
ms.Position = pos;
frame = Mp3Frame.LoadFromStream(ms);
//Other codes to play mp3...
}
while (bytesRead > 0 || waveOut.PlaybackState == PlaybackState.Playing);
This code throws exception on the LoadFromStream line with FileStream
int bytesRead = 0;
long pos = 0;
int total = 0;
string path =Path.Combine( Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData),"temp.mp3");
FileStream fs = new FileStream(path, FileMode.Create, FileAccess.ReadWrite);
do
{
bytesRead = responseStream.Read(buffer, 0, buffer.Length);
total += bytesRead;
pos = fs.Position;
fs.Write(buffer, 0, bytesRead);
fs.Position = pos;
frame = Mp3Frame.LoadFromStream(fs);
//Other codes to play mp3...
}
while (bytesRead > 0 || waveOut.PlaybackState == PlaybackState.Playing);
I'm working with large files , beginning from 10Gb. I'm loading the parts of the file in the memory for processing. Following code works fine for smaller files (700Mb)
byte[] byteArr = new byte[layerPixelCount];
using (FileStream fs = File.OpenRead(recFileName))
{
using (BinaryReader br = new BinaryReader(fs))
{
fs.Seek(offset, SeekOrigin.Begin);
for (int i = 0; i < byteArr.Length; i++)
{
byteArr[i] = (byte)(br.ReadUInt16() / 256);
}
}
}
After opening a 10Gb file, the first run of this function is OK. But the second Seek() throws an IO exception:
An attempt was made to move the file pointer before the beginning of the file.
The numbers are:
fs.Length = 11998628352
offset = 4252580352
byteArr.Length = 7746048
I assumed that GC didn't collect the closed fs reference before the second call and tried
GC.Collect();
GC.WaitForPendingFinalizers();
but no luck.
Any help is apreciated
I'm guessing it's because either your signed integer indexer or offset is rolling over to negative values. Try declaring offset and i as long.
//Offest is now long
long offset = 4252580352;
byte[] byteArr = new byte[layerPixelCount];
using (FileStream fs = File.OpenRead(recFileName))
{
using (BinaryReader br = new BinaryReader(fs))
{
fs.Seek(offset, SeekOrigin.Begin);
for (long i = 0; i < byteArr.Length; i++)
{
byteArr[i] = (byte)(br.ReadUInt16() / 256);
}
}
}
My following written code logic is appropriate with large files beyond 4GB. The key issue to notice is the LONG data type used with the SEEK method. As a LONG is able to point beyond 2^32 data boundaries. In this example, the code is processing first processing the large file in chunks of 1GB, after the large whole 1GB chunks are processed, the left over (<1GB) bytes are processed. I use this code with calculating the CRC of files beyond the 4GB size. (using https://crc32c.machinezoo.com/ for the crc32c calculation in this example)
private uint Crc32CAlgorithmBigCrc(string fileName)
{
uint hash = 0;
byte[] buffer = null;
FileInfo fileInfo = new FileInfo(fileName);
long fileLength = fileInfo.Length;
int blockSize = 1024000000;
decimal div = fileLength / blockSize;
int blocks = (int)Math.Floor(div);
int restBytes = (int)(fileLength - (blocks * blockSize));
long offsetFile = 0;
uint interHash = 0;
Crc32CAlgorithm Crc32CAlgorithm = new Crc32CAlgorithm();
bool firstBlock = true;
using (FileStream fs = new FileStream(fileName, FileMode.Open, FileAccess.Read))
{
buffer = new byte[blockSize];
using (BinaryReader br = new BinaryReader(fs))
{
while (blocks > 0)
{
blocks -= 1;
fs.Seek(offsetFile, SeekOrigin.Begin);
buffer = br.ReadBytes(blockSize);
if (firstBlock)
{
firstBlock = false;
interHash = Crc32CAlgorithm.Compute(buffer);
hash = interHash;
}
else
{
hash = Crc32CAlgorithm.Append(interHash, buffer);
}
offsetFile += blockSize;
}
if (restBytes > 0)
{
Array.Resize(ref buffer, restBytes);
fs.Seek(offsetFile, SeekOrigin.Begin);
buffer = br.ReadBytes(restBytes);
hash = Crc32CAlgorithm.Append(interHash, buffer);
}
buffer = null;
}
}
//MessageBox.Show(hash.ToString());
//MessageBox.Show(hash.ToString("X"));
return hash;
}