I am working on a project (server side) where i need to stream data (videos, large files) to clients.
This worked perfect using ByteRangeStreamContent, as i was serving files from disk and could create a seekable stream (FileStream).
if (Request.Headers.Range != null)
{
try
{
HttpResponseMessage partialResponse = Request.CreateResponse(HttpStatusCode.PartialContent);
partialResponse.Content = new ByteRangeStreamContent(fs, Request.Headers.Range, mediaType);
return partialResponse;
}
catch (InvalidByteRangeException invalidByteRangeException)
{
return Request.CreateErrorResponse(invalidByteRangeException);
}
}
else
{
response.Content = new StreamContent(fs);
response.Content.Headers.ContentType = mediaType;
return response;
}
But, i moved the file provider from disk to an external service. The service allows me to get chunks of data (Range{0}-{1}).
Of course, it's not possible to download whole file in memory and then use a MemoryStream for ByteRangeStreamContent because of the obvious reasons (too many concurrent downloads will consume all the available memory at some point).
I found this article https://vikingerik.wordpress.com/2014/09/28/progressive-download-support-in-asp-net-web-api/ where the author says:
A change request I got for my library was to support reading only the
necessary data and sending that out rather than opening a stream for
the full data. I wasn’t sure what this would buy until the user
pointed out they are reading their resource data from a WCF stream
which does not support seeking and would need to read the whole stream
into a MemoryStream in order to allow the library to generate the
output.
That limitation still exists in this specific object but there is a
workaround. Instead of using a ByteRangeStreamContent, you could
instead use a ByteArrayContent object instead. Since the majority of
RANGE requests will be for a single start and end byte, you could pull
the range from the HttpRequestMessage, retrieve only the bytes you
need and send it back out as a byte stream. You’ll also need to add
the CONTENT-RANGE header and set the response code to 206
(PartialContent) but this could be a viable alternative (though I
haven’t tested it) for users who do not want or can’t easily get a
compliant stream object.
So, my question basically is: how can i do that ?
I finally managed to do it.
Here's how:
Custom implementation of a stream:
public class BufferedHTTPStream : Stream
{
private readonly Int64 cacheLength = 4000000;
private const Int32 noDataAvaiable = 0;
private MemoryStream stream = null;
private Int64 currentChunkNumber = -1;
private Int64? length;
private Boolean isDisposed = false;
private Func<long, long, Stream> _getStream;
private Func<long> _getContentLength;
public BufferedHTTPStream(Func<long, long, Stream> streamFunc, Func<long> lengthFunc)
{
_getStream = streamFunc;
_getContentLength = lengthFunc;
}
public override Boolean CanRead
{
get
{
EnsureNotDisposed();
return true;
}
}
public override Boolean CanWrite
{
get
{
EnsureNotDisposed();
return false;
}
}
public override Boolean CanSeek
{
get
{
EnsureNotDisposed();
return true;
}
}
public override Int64 Length
{
get
{
EnsureNotDisposed();
if (length == null)
{
length = _getContentLength();
}
return length.Value;
}
}
public override Int64 Position
{
get
{
EnsureNotDisposed();
Int64 streamPosition = (stream != null) ? stream.Position : 0;
Int64 position = (currentChunkNumber != -1) ? currentChunkNumber * cacheLength : 0;
return position + streamPosition;
}
set
{
EnsureNotDisposed();
EnsurePositiv(value, "Position");
Seek(value);
}
}
public override Int64 Seek(Int64 offset, SeekOrigin origin)
{
EnsureNotDisposed();
switch (origin)
{
case SeekOrigin.Begin:
break;
case SeekOrigin.Current:
offset = Position + offset;
break;
default:
offset = Length + offset;
break;
}
return Seek(offset);
}
private Int64 Seek(Int64 offset)
{
Int64 chunkNumber = offset / cacheLength;
if (currentChunkNumber != chunkNumber)
{
ReadChunk(chunkNumber);
currentChunkNumber = chunkNumber;
}
offset = offset - currentChunkNumber * cacheLength;
stream.Seek(offset, SeekOrigin.Begin);
return Position;
}
private void ReadNextChunk()
{
currentChunkNumber += 1;
ReadChunk(currentChunkNumber);
}
private void ReadChunk(Int64 chunkNumberToRead)
{
Int64 rangeStart = chunkNumberToRead * cacheLength;
if (rangeStart >= Length) { return; }
Int64 rangeEnd = rangeStart + cacheLength - 1;
if (rangeStart + cacheLength > Length)
{
rangeEnd = Length - 1;
}
if (stream != null) { stream.Close(); }
stream = new MemoryStream((int)cacheLength);
var responseStream = _getStream(rangeStart, rangeEnd);
responseStream.Position = 0;
responseStream.CopyTo(stream);
responseStream.Close();
stream.Position = 0;
}
public override void Close()
{
EnsureNotDisposed();
base.Close();
if (stream != null) { stream.Close(); }
isDisposed = true;
}
public override Int32 Read(Byte[] buffer, Int32 offset, Int32 count)
{
EnsureNotDisposed();
EnsureNotNull(buffer, "buffer");
EnsurePositiv(offset, "offset");
EnsurePositiv(count, "count");
if (buffer.Length - offset < count) { throw new ArgumentException("count"); }
if (stream == null) { ReadNextChunk(); }
if (Position >= Length) { return noDataAvaiable; }
if (Position + count > Length)
{
count = (Int32)(Length - Position);
}
Int32 bytesRead = stream.Read(buffer, offset, count);
Int32 totalBytesRead = bytesRead;
count -= bytesRead;
while (count > noDataAvaiable)
{
ReadNextChunk();
offset = offset + bytesRead;
bytesRead = stream.Read(buffer, offset, count);
count -= bytesRead;
totalBytesRead = totalBytesRead + bytesRead;
}
return totalBytesRead;
}
public override void SetLength(Int64 value)
{
EnsureNotDisposed();
throw new NotImplementedException();
}
public override void Write(Byte[] buffer, Int32 offset, Int32 count)
{
EnsureNotDisposed();
throw new NotImplementedException();
}
public override void Flush()
{
EnsureNotDisposed();
}
private void EnsureNotNull(Object obj, String name)
{
if (obj != null) { return; }
throw new ArgumentNullException(name);
}
private void EnsureNotDisposed()
{
if (!isDisposed) { return; }
throw new ObjectDisposedException("BufferedHTTPStream");
}
private void EnsurePositiv(Int32 value, String name)
{
if (value > -1) { return; }
throw new ArgumentOutOfRangeException(name);
}
private void EnsurePositiv(Int64 value, String name)
{
if (value > -1) { return; }
throw new ArgumentOutOfRangeException(name);
}
private void EnsureNegativ(Int64 value, String name)
{
if (value < 0) { return; }
throw new ArgumentOutOfRangeException(name);
}
}
Usage:
var fs = new BufferedHTTPStream((start, end) =>
{
// return stream from external service
}, () =>
{
// return stream length from external service
});
HttpResponseMessage partialResponse = Request.CreateResponse(HttpStatusCode.PartialContent);
partialResponse.Content = new ByteRangeStreamContent(fs, Request.Headers.Range, mediaType);
partialResponse.Content.Headers.ContentDisposition = new ContentDispositionHeaderValue("attachment")
{
FileName = fileName
};
return partialResponse;
Related
I get error
System.IO.IOException: 'The process cannot access the file 'xxx' because it is being used by another process.'
when I try to delete a temp file in a background worker service in aspnet core.
I am eventually allowed to delete the file after about a minute (52s, 73s).
If I change garbage collection to workstation mode, I may instead delete after ~1s (but still, a delay).
I have tried a combination of FileOptions to no avail, including FileOptions.WriteThrough.
When the controller writes the file, I use
FlushAsync(), Close(), Dispose() and 'using' (I know it's overkill.)
I also tried using just File.WriteAllBytesAsync, with same result.
In the background reader, I as well use Close() and Dispose().
(hint: background reader will not allow me to use DeleteOnClose,
which would have been ideal.)
As I search stackoverflow for similar 'used by another process' issues,
all those I have found eventually resolve to
'argh it turns out I/he still had an extra open instance/reference
he forgot about',
but I have not been able to figure out that I am doing that.
Another hint:
In the writing controller, I am able to delete the file immediately
after writing it, I presume because I am still on the same thread?
Is there some secret knowledge I should read somewhere,
about being able to delete recently open files, across threads?
UPDATE: Here relevant(?) code snippets:
// (AspNet Controller)
[RequestSizeLimit(9999999999)]
[DisableFormValueModelBinding]
[RequestFormLimits(MultipartBodyLengthLimit = MaxFileSize)]
[HttpPost("{sessionId}")]
public async Task<IActionResult> UploadRevisionChunk(Guid sessionId) {
log.LogWarning($"UploadRevisionChunk: {sessionId}");
string uploadFolder = UploadFolder.sessionFolderPath(sessionId);
if (!Directory.Exists(uploadFolder)) { throw new Exception($"chunk-upload failed"); }
var cr = parseContentRange(Request);
if (cr == null) { return this.BadRequest("no content range header specified"); }
string chunkName = $"{cr.From}-{cr.To}";
string saveChunkPath = Path.Combine(uploadFolder,chunkName);
await streamToChunkFile_WAB(saveChunkPath); // write-all-bytes.
//await streamToChunkFile_MAN(saveChunkPath); // Manual.
long crTo = cr.To ?? 0;
long crFrom = cr.From ?? 0;
long expected = (crTo - crFrom) + 1;
var fi = new FileInfo(saveChunkPath);
var dto = new ChunkResponse { wrote = fi.Length, expected = expected, where = "?" };
string msg = $"at {crFrom}, wrote {dto.wrote} bytes (expected {dto.expected}) to {dto.where}";
log.LogWarning(msg);
return Ok(dto);
}
private async Task streamToChunkFile_WAB(string saveChunkPath) {
using (MemoryStream ms = new MemoryStream()) {
Request.Body.CopyTo(ms);
byte[] allBytes = ms.ToArray();
await System.IO.File.WriteAllBytesAsync(saveChunkPath, allBytes);
}
}
// stream reader in the backgroundService:
public class MyMultiStream : Stream {
string[] filePaths;
FileStream curStream = null;
IEnumerator<string> i;
ILogger log;
QueueItem qItem;
public MyMultiStream(string[] filePaths_, Stream[] streams_, ILogger log_, QueueItem qItem_) {
qItem = qItem_;
log = log_;
filePaths = filePaths_;
log.LogWarning($"filepaths has #items: {filePaths.Length}");
IEnumerable<string> enumerable = filePaths;
i = enumerable.GetEnumerator();
i.MoveNext();// necessary to prime the iterator.
}
public override bool CanRead { get { return true; } }
public override bool CanWrite { get { return false; } }
public override bool CanSeek { get { return false; } }
public override long Length { get { throw new Exception("dont get length"); } }
public override long Position {
get { throw new Exception("dont get Position"); }
set { throw new Exception("dont set Position"); }
}
public override void SetLength(long value) { throw new Exception("dont set length"); }
public override long Seek(long offset, SeekOrigin origin) { throw new Exception("dont seek"); }
public override void Write(byte[] buffer, int offset, int count) { throw new Exception("dont write"); }
public override void Flush() { throw new Exception("dont flush"); }
public static int openStreamCounter = 0;
public static int closedStreamCounter = 0;
string curFileName = "?";
private FileStream getNextStream() {
string nextFileName = i.Current;
if (nextFileName == null) { throw new Exception("getNextStream should not be called past file list"); }
//tryDelete(nextFileName,log);
FileStream nextStream = new FileStream(
path:nextFileName,
mode: FileMode.Open,
access: FileAccess.Read,
share: FileShare.ReadWrite| FileShare.Delete,
bufferSize:4096, // apparently default.
options: 0
| FileOptions.Asynchronous
| FileOptions.SequentialScan
// | FileOptions.DeleteOnClose // (1) this ought to be possible, (2) we should fix this approach (3) if we can fix this, our issue is solved, and our code much simpler.
); // None); // ReadWrite); // None); // ReadWrite); //| FileShare.Read);
log.LogWarning($"TELLUS making new stream [{nextFileName}] opened:[{++openStreamCounter}] closed:[{closedStreamCounter}]");
curFileName = nextFileName;
++qItem.chunkCount;
return nextStream;
}
public override int Read(byte[] buffer, int offset, int count) {
int bytesRead = 0;
while (true) {
bytesRead = 0;
if (curStream == null) { curStream = getNextStream(); }
try {
bytesRead = curStream.Read(buffer, offset, count);
log.LogWarning($"..bytesRead:{bytesRead} [{Path.GetFileName(curFileName)}]"); // (only show a short name.)
} catch (Exception e) {
log.LogError($"failed reading [{curFileName}] [{e.Message}]",e);
}
if (bytesRead > 0) { break; }
curStream.Close();
curStream.Dispose();
curStream = null;
log.LogWarning($"TELLUS closing stream [{curFileName}] opened:[{openStreamCounter}] closed:[{++closedStreamCounter}]");
//tryDelete(curFileName); Presumably we can't delete so soon.
bool moreFileNames = i.MoveNext();
log.LogWarning($"moreFileNames?{moreFileNames}");
if (!moreFileNames) {
break;
}
}
return bytesRead;
}
..
// Background worker operating multistream:
public class BackgroundChunkWorker: BackgroundService {
ILogger L;
ChunkUploadQueue q;
public readonly IServiceScopeFactory scopeFactory;
public BackgroundChunkWorker(ILogger<int> log_, ChunkUploadQueue q_, IServiceScopeFactory scopeFactory_) {
q = q_; L = log_;
scopeFactory = scopeFactory_;
}
override protected async Task ExecuteAsync(CancellationToken cancel) { await BackgroundProcessing(cancel); }
private async Task BackgroundProcessing(CancellationToken cancel) {
while (!cancel.IsCancellationRequested) {
try {
await Task.Delay(1000,cancel);
bool ok = q.q.TryDequeue(out var item);
if (!ok) { continue; }
L.LogInformation($"item found! {item}");
await treatItemScope(item);
} catch (Exception ex) {
L.LogCritical("An error occurred when processing. Exception: {#Exception}", ex);
}
}
}
private async Task<bool> treatItemScope(QueueItem Qitem) {
using (var scope = scopeFactory.CreateScope()) {
var ris = scope.ServiceProvider.GetRequiredService<IRevisionIntegrationService>();
return await treatItem(Qitem, ris);
}
}
private async Task<bool> treatItem(QueueItem Qitem, IRevisionIntegrationService ris) {
await Task.Delay(0);
L.LogWarning($"TryAddValue from P {Qitem.sessionId}");
bool addOK = q.p.TryAdd(Qitem.sessionId, Qitem);
if (!addOK) {
L.LogError($"why couldnt we add session {Qitem.sessionId} to processing-queue?");
return false;
}
var startTime = DateTime.UtcNow;
Guid revisionId = Qitem.revisionId;
string[] filePaths = getFilePaths(Qitem.sessionId);
Stream[] streams = filePaths.Select(fileName => new FileStream(fileName, FileMode.Open)).ToArray();
MyMultiStream multiStream = new MyMultiStream(filePaths, streams, this.L, Qitem);
BimRevisionStatus brs = await ris.UploadRevision(revisionId, multiStream, startTime);
// (launchDeletes is my current hack/workaround,
// it is not part of the problem)
// await multiStream.launchDeletes();
Qitem.status = brs;
return true;
}
..
I'm using a library to encrypt files.
The library takes an input Stream and an output Stream.
I'd like to use the library to encrypt data and upload them to OneDrive.
I should use LargeFileUploadTask to upload the file to onedrive but I don't know if it is possible to avoid writing encrypted data to a temporary file before doing the actual upload.
Is there a way I can get a Stream to just write to while uploading the data to OneDrive?
var encrypter = new StreamEncrypter("password");
using (var sourceStream = await OpenStream(input)) {
using (var destStream = await OpenStreamForWrite(output)) {
await encrypter.Encrypt(sourceStream, destStream);
}
}
Found a way, as suggested I implemented my own Stream. The size of the destination file must be known before the upload (in my scenario I can compute the final size because I'm using AES encryption).
For now I didn't bother to implement the Seek method (probably it gets called only when the upload is resumed because I see it called just once with 0 as the requested position).
The test progam:
var appOptions = new PublicClientApplicationOptions()
{
ClientName = appName,
ClientId = appId,
TenantId = tenantId,
RedirectUri = redirectUri
};
var app = PublicClientApplicationBuilder.CreateWithApplicationOptions(appOptions).Build();
var storageProperties = new StorageCreationPropertiesBuilder("UserTokenCache.txt", "./cache")
.WithLinuxKeyring(
"com.contoso.devtools.tokencache",
MsalCacheHelper.LinuxKeyRingDefaultCollection,
"MSAL token cache for all Contoso dev tool apps.",
new KeyValuePair<string, string>("Version", "1"),
new KeyValuePair<string, string>("ProductGroup", "MyApps"))
.WithMacKeyChain(
"myapp_msal_service",
"myapp_msal_account")
.Build();
var cacheHelper = await MsalCacheHelper.CreateAsync(storageProperties);
cacheHelper.VerifyPersistence();
cacheHelper.RegisterCache(app.UserTokenCache);
var accounts = await app.GetAccountsAsync();
AuthenticationResult result;
try {
result = await app.AcquireTokenSilent(scopes, accounts.FirstOrDefault())
.ExecuteAsync();
} catch (MsalUiRequiredException) {
result = await app.AcquireTokenInteractive(scopes)
.ExecuteAsync();
}
var graphClient = new GraphServiceClient(new AuthenticationProvider(result.AccessToken));
using (var fileStream = System.IO.File.OpenRead(#"c:\test\test.zip")) {
UploadSession uploadSession = await graphClient.Me.Drive.Root.ItemWithPath("/test/test.zip").CreateUploadSession().Request().PostAsync();
using (var oneDriveStream = new OneDriveStream(uploadSession, fileStream.Length)) {
byte[] buffer = new byte[4096];
int read;
while ((read = await fileStream.ReadAsync(buffer, 0, buffer.Length)) > 0) {
await oneDriveStream.WriteAsync(buffer, 0, read);
}
await oneDriveStream.WaitTask();
}
}
The OneDriveStream class:
internal class OneDriveStream : Stream
{
private readonly Task? _writeTask;
private long _pos;
private readonly long _cacheSize = 1310720;
private readonly List<byte> _buffer = new();
private readonly SemaphoreSlim _bufferSema = new(1,1);
public WriterStream(UploadSession session, long length)
{
Length = length;
_writeTask = new LargeFileUploadTask<DriveItem>(session, this, (int)_cacheSize).UploadAsync();
}
public override bool CanRead => true;
public override bool CanSeek => true;
public override bool CanWrite => true;
public override long Length { get; }
public override long Position {
get => _pos;
set => _pos = value;
}
protected override void Dispose(bool disposing)
{
_writeTask?.Dispose();
}
public override void Flush()
{
}
public override int Read(byte[] buffer, int offset, int count)
{
if (_pos >= Length)
return 0;
_bufferSema.Wait();
int readCount = 0;
for (int i = 0; i < count; i++) {
if (_buffer.Count > 0) {
buffer[offset + i] = _buffer[0];
_buffer.RemoveAt(0);
_pos++;
readCount++;
if (_pos >= Length)
break;
} else {
_bufferSema.Release();
Thread.Sleep(20);
_bufferSema.Wait();
i--;
}
}
_bufferSema.Release();
return readCount;
}
public override long Seek(long offset, SeekOrigin origin)
{
return offset;
}
public override void SetLength(long value)
{
throw new NotImplementedException();
}
public override void Write(byte[] buffer, int offset, int count)
{
while(_buffer.Count > 0)
Thread.Sleep(10);
_bufferSema.Wait();
for (int i = 0; i < count; i++) {
_buffer.Add(buffer[offset + i]);
}
_bufferSema.Release();
}
public async Task WaitTask()
{
if (_writeTask != null)
await _writeTask;
}
}
I'm returning a video file through IIS for a range request in a WCF service.
The end of the code looks like this:
WriteResponseHeaders(stuff);
while (remainingBytes > 0)
{
if (response.IsClientConnected) // response is a System.Web.HttpResponse
{
int chunkSize = stream.Read(buffer, 0, 10240 < remainingBytes ? 10240 : remainingBytes);
response.OutputStream.Write(buffer, 0, chunkSize);
remainingBytes -= chunkSize;
response.Flush();
}
else
{
return;
}
}
In Firefox, Internet Explorer and Opera it works correctly. In Chrome, the video will stop playing a while before the end. Fiddler shows a 504 error:
[Fiddler] ReadResponse() failed: The server did not return a response for this request. Server returned 16556397 bytes.
If I stick a breakpoint just after the loop, and let the program sit there until the video has progressed past its stopping point, Chrome will play the full video without any problem and Fiddler will show the response with all of the correct headers and such. The only code that gets executed between that breakpoint and the end of the call is to flush the log stream.
As a test, I stuck in:
while (response.IsClientConnected)
{
System.Threading.Thread.Sleep(1000);
}
after the loop and playback was fine in all browsers. My response also looked fine in Fiddler. Of course this has way too many problems to be a proper solution, but it seems to show me that this is an issue more of timing than of behaviour.
Why does allowing the code to progress past this point too soon cause a problem and how do I prevent it from doing so?
Try returning a Stream instead of writing to the response.OutputStream.
[ServiceContract]
public interface IStreamingService
{
[OperationContract]
[WebGet(BodyStyle=WebMessageBodyStyle.Bare, UriTemplate = "/video?id={id}")]
Stream GetVideo(string id);
}
public class StreamingService : IStreamingService
{
public System.IO.Stream GetVideo(string id)
{
Stream stream = File.OpenRead("c:\\Temp\\Video.mp4");
//WriteResponseHeaders(stuff);
return stream;
}
}
Update:
If you want to support seeking you can either copy the chunk into a byte[] and return a MemoryStream or you could wrap your stream in a proxy that only returns a part of your full file.
public class PartialStream : Stream
{
private Stream underlying;
private long offset;
private long length;
public PartialStream(Stream underlying, long offset, long length)
{
this.underlying = underlying;
this.offset = offset;
if (offset + length > underlying.Length) {
this.length = underlying.Length - offset;
} else {
this.length = length;
}
this.underlying.Seek(offset, SeekOrigin.Begin);
}
public override bool CanRead { get { return true; } }
public override bool CanSeek { get { return false; } }
public override bool CanWrite { get { return false; } }
public override void Flush()
{
throw new NotSupportedException();
}
public override long Length
{
get { return this.length; }
}
public override long Position
{
get
{
return this.underlying.Position - offset;
}
set
{
this.underlying.Position = offset + Math.Min(value,this.length) ;
}
}
public override int Read(byte[] buffer, int offset, int count)
{
if (this.Position + offset >= this.length)
return 0;
if (this.Position + offset + count > this.length) {
count = (int)(this.length - this.Position - offset);
}
return underlying.Read(buffer, offset, count);
}
protected override void Dispose(bool disposing)
{
base.Dispose(disposing);
this.underlying.Dispose();
}
public override long Seek(long offset, SeekOrigin origin)
{
throw new NotImplementedException();
}
public override void SetLength(long value)
{
throw new NotImplementedException();
}
public override void Write(byte[] buffer, int offset, int count)
{
throw new NotImplementedException();
}
}
And you have to respect the Range request header.
public System.IO.Stream GetVideo(string id)
{
RangeHeaderValue rangeHeader;
bool hasRangeHeader = RangeHeaderValue.TryParse(
WebOperationContext.Current.IncomingRequest.Headers["Range"],
out rangeHeader);
var response = WebOperationContext.Current.OutgoingResponse;
Stream stream = File.OpenRead("c:\\Temp\\Video.mp4");
var offset = hasRangeHeader ? rangeHeader.Ranges.First().From.Value : 0;
response.Headers.Add("Accept-Ranges", "bytes");
response.ContentType = "video/mp4";
if (hasRangeHeader) {
response.StatusCode = System.Net.HttpStatusCode.PartialContent;
var totalLength = stream.Length;
stream = new PartialStream(stream, offset, 10 * 1024 * 1024);
var header = new ContentRangeHeaderValue(offset, offset + stream.Length - 1,totalLength);
response.Headers.Add("Content-Range", header.ToString());
}
response.ContentLength = stream.Length;
return stream;
}
I am successfully using VirtualFileDataObject code from Delay's blog, but i want to avoid streaming the entire file into memory.
I found this previously answered question on Stack Overflow Drag and Drop large virtual files from c# to Windows Explorer The question was answered by matthieu, by changing the signature of the SetData method.
Here is my problem, after changing the signature of the SetData method, other places that call it are still looking for the old signature.
Here is the original SetData;
public void SetData(short dataFormat, int index, Action<Stream> streamData)
{
_dataObjects.Add(
new DataObject
{
FORMATETC = new FORMATETC
{
cfFormat = dataFormat,
ptd = IntPtr.Zero,
dwAspect = DVASPECT.DVASPECT_CONTENT,
lindex = index,
tymed = TYMED.TYMED_ISTREAM
},
GetData = () =>
{
// Create IStream for data
var ptr = IntPtr.Zero;
var iStream = NativeMethods.CreateStreamOnHGlobal(IntPtr.Zero, true);
if (streamData != null)
{
// Wrap in a .NET-friendly Stream and call provided code to fill it
using (var stream = new IStreamWrapper(iStream))
{
streamData(stream);
}
}
// Return an IntPtr for the IStream
ptr = Marshal.GetComInterfaceForObject(iStream, typeof(IStream));
Marshal.ReleaseComObject(iStream);
return new Tuple<IntPtr, int>(ptr, NativeMethods.S_OK);
},
});
}
matthieu suggested to change it to;
public void SetData(short dataFormat, int index, Stream stream)
{
...
var iStream = new StreamWrapper(stream);
...
// Ensure the following line is commented out:
//Marshal.ReleaseComObject(iStream);
return new Tuple<IntPtr, int>(ptr, NativeMethods.S_OK);
...
}
After I make these changes the following call will not work; ( and this is where i need help)
How do i fix this call;
foreach (var fileDescriptor in fileDescriptors)
{
**SetData(FILECONTENTS, index, fileDescriptor.StreamContents);**
index++;
}
Basically changing "Action streamData" To "Stream stream" is causing my problems. I am not sure on how to call it after the changes are made.
All this code comes from Delays VirtualFileDataObject. I don't know if i should post it on here or not. But if you follow the link above it will take you to the blog so you can view it.
I am so close, just can't figure this last step out, thanks for taking a look
I've had exactly the same problem. Here is what I did to fix this issue (which as you say has not been fully addressed in the other answer)
1) Modify FileDescriptor's StreamContents property from this:
public Action<Stream> StreamContents { get; set; }
to this:
public Func<Stream> StreamContents { get; set; }
(instead of passing a Stream the client can write, we'll expect a Stream we can read from, which is exactly how Explorer works and what it expects)
2) Modify the SetData method overload from this:
public void SetData(short dataFormat, int index, Action<Stream> streamData)
to this:
public void SetData(short dataFormat, int index, Func<Stream> streamData)
3) change SetData code's GetData lambda to this:
GetData = () =>
{
ManagedIStream istream = null;
if (streamData != null)
{
Stream stream = streamData();
if (stream != null)
{
istream = new ManagedIStream(stream);
}
}
IntPtr ptr = istream != null ? Marshal.GetComInterfaceForObject(istream, typeof(IStream)) : IntPtr.Zero;
return new Tuple<IntPtr, int>(ptr, NativeMethods.S_OK);
},
4) add this ManagedIStream class to the code (you can also delete the IStreamWrapper class completely)
private class ManagedIStream : IStream
{
private Stream _stream;
public ManagedIStream(Stream stream)
{
_stream = stream;
}
public void Clone(out IStream ppstm)
{
throw new NotImplementedException();
}
public void Commit(int grfCommitFlags)
{
throw new NotImplementedException();
}
public void CopyTo(IStream pstm, long cb, IntPtr pcbRead, IntPtr pcbWritten)
{
throw new NotImplementedException();
}
public void LockRegion(long libOffset, long cb, int dwLockType)
{
throw new NotImplementedException();
}
public void Read(byte[] pv, int cb, IntPtr pcbRead)
{
int read = _stream.Read(pv, 0, cb);
if (pcbRead != IntPtr.Zero)
{
Marshal.WriteInt32(pcbRead, read);
}
}
public void Revert()
{
throw new NotImplementedException();
}
public void Seek(long dlibMove, int dwOrigin, IntPtr plibNewPosition)
{
long newPos = _stream.Seek(dlibMove, (SeekOrigin)dwOrigin);
if (plibNewPosition != IntPtr.Zero)
{
Marshal.WriteInt64(plibNewPosition, newPos);
}
}
public void SetSize(long libNewSize)
{
_stream.SetLength(libNewSize);
}
public void Stat(out System.Runtime.InteropServices.ComTypes.STATSTG pstatstg, int grfStatFlag)
{
const int STGTY_STREAM = 2;
pstatstg = new System.Runtime.InteropServices.ComTypes.STATSTG();
pstatstg.type = STGTY_STREAM;
pstatstg.cbSize = _stream.Length;
pstatstg.grfMode = 0;
if (_stream.CanRead && _stream.CanWrite)
{
const int STGM_READWRITE = 0x00000002;
pstatstg.grfMode |= STGM_READWRITE;
return;
}
if (_stream.CanRead)
{
const int STGM_READ = 0x00000000;
pstatstg.grfMode |= STGM_READ;
return;
}
if (_stream.CanWrite)
{
const int STGM_WRITE = 0x00000001;
pstatstg.grfMode |= STGM_WRITE;
return;
}
throw new IOException();
}
public void UnlockRegion(long libOffset, long cb, int dwLockType)
{
throw new NotImplementedException();
}
public void Write(byte[] pv, int cb, IntPtr pcbWritten)
{
_stream.Write(pv, 0, cb);
if (pcbWritten != IntPtr.Zero)
{
Marshal.WriteInt32(pcbWritten, cb);
}
}
}
That's it. Now you can use the code like this (using the same sample as in the original article available here: http://dlaa.me/blog/post/9913083):
new VirtualFileDataObject.FileDescriptor
{
Name = "Alphabet.txt",
Length = 26,
ChangeTimeUtc = DateTime.Now.AddDays(-1),
StreamContents = () =>
{
var contents = Enumerable.Range('a', 26).Select(i => (byte)i).ToArray();
MemoryStream ms = new MemoryStream(contents); // don't dispose/using here, it would be too early
return ms;
}
};
I'm making a program which downloads files over http.
I've got it downloading, however I want to be able to pause the downloads, close the program and resume them again at a later date.
I know the location i'm downloading them from supports this.
I'm downloading the file through HttpWebResponse and reading the response into a Stream using GetResponseStream.
When i close the app and restart it, I'm stuck as to how resume the download. I've tried doing a seek on the stream but it states its not supported.
What would be the best way to do this?
If the server supports this you have to send the Range Http header with your request using the AddRange method:
request.AddRange(1024);
This will instruct the server to start sending the file after the 1st kilobyte. Then just read the response stream as normal.
To test if a server supports resuming you can send a HEAD request and test if it sends the Accept-Ranges: bytes header.
How about an HTTPRangeStream class?
using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Text;
namespace Ionic.Kewl
{
public class HTTPRangeStream : Stream
{
private string url;
private long length;
private long position;
private long totalBytesRead;
private int totalReads;
public HTTPRangeStream(string URL)
{
url = URL;
HttpWebRequest request = (HttpWebRequest)HttpWebRequest.Create(url);
HttpWebResponse result = (HttpWebResponse)request.GetResponse();
length = result.ContentLength;
}
public long TotalBytesRead { get { return totalBytesRead; } }
public long TotalReads { get { return totalReads; } }
public override bool CanRead { get { return true; } }
public override bool CanSeek { get { return true; } }
public override bool CanWrite { get { return false; } }
public override long Length { get { return length; } }
public override bool CanTimeout
{
get
{
return base.CanTimeout;
}
}
public override long Position
{
get
{
return position;
}
set
{
if (value < 0) throw new ArgumentException();
position = value;
}
}
public override long Seek(long offset, SeekOrigin origin)
{
switch (origin)
{
case SeekOrigin.Begin:
position = offset;
break;
case SeekOrigin.Current:
position += offset;
break;
case SeekOrigin.End:
position = Length + offset;
break;
default:
break;
}
return Position;
}
public override int Read(byte[] buffer, int offset, int count)
{
HttpWebRequest request = (HttpWebRequest)HttpWebRequest.Create(url);
request.AddRange(Convert.ToInt32(position), Convert.ToInt32(position) + count);
HttpWebResponse result = (HttpWebResponse)request.GetResponse();
using (Stream stream = result.GetResponseStream())
{
stream.Read(buffer, offset, count);
stream.Close();
}
totalBytesRead += count;
totalReads++;
Position += count;
return count;
}
public override void Write(byte[] buffer, int offset, int count)
{
throw new NotSupportedException();
}
public override void SetLength(long value)
{
throw new NotSupportedException();
}
public override void Flush()
{
throw new NotSupportedException();
}
}
}
Your solution is fine, but it will only work for the cases where the server sends a Content-Length header. This header will not be present in dynamically generated content.
Also, this solution is send a request for each Read. If the content changes on the server between the requests, then you will get inconsistent results.
I would improve upon this, by storing the data locally - either on disk or in memory. Then, you can seek into it all you want. There wont be any problem of inconsistency, and you need only one HttpWebRequest to download it.