Is there a way to convert a MimeKit.MimeMessage to HTML that can be rendered in a web browser? I'm not concerned with message attachments, but would like to be able to display the message body, complete with embedded images, in a browser. I'm new to MimeKit and couldn't locate anything in the API docs for this. Any info is appreciated.
EDIT: I didn't find a way to do this natively with MimeKit, but I combined it with the HtmlAgilityPack to parse the MimeMessage.HtmBody and fix the inline images. This seems to work and I'll go with it unless someone has a better idea. For reference, here's the code:
//////////////////////////////////////////////////////////////////////////////////////////
// use MimeKit to parse the message
//////////////////////////////////////////////////////////////////////////////////////////
MimeKit.MimeMessage msg = MimeKit.MimeMessage.Load(stream);
//////////////////////////////////////////////////////////////////////////////////////////
// use HtmlAgilityPack to parse the resulting html in order to fix inline images
//////////////////////////////////////////////////////////////////////////////////////////
HtmlAgilityPack.HtmlDocument hdoc = new HtmlAgilityPack.HtmlDocument();
hdoc.LoadHtml(msg.HtmlBody);
// find all image nodes
var images = hdoc.DocumentNode.Descendants("img");
foreach (var img in images)
{
// check that this is an inline image
string cid = img.Attributes["src"].Value;
if (cid.StartsWith("cid:"))
{
// remove the cid part of the attribute
cid = cid.Remove(0, 4);
// find image object in MimeMessage
MimeKit.MimePart part = msg.BodyParts.First(x => x.ContentId == cid) as MimeKit.MimePart;
if (part != null)
{
using (MemoryStream mstream = new MemoryStream())
{
// get the raw image content
part.ContentObject.WriteTo(mstream);
mstream.Flush();
byte[] imgbytes = mstream.ToArray();
// fix the image source by making it an embedded image
img.Attributes["src"].Value = "data:" + part.ContentType.MimeType + ";" + part.ContentTransferEncoding.ToString().ToLower() + "," +
System.Text.ASCIIEncoding.ASCII.GetString(imgbytes);
}
}
}
}
// write the resulting html to the output stream
hdoc.Save(outputStream);
Your solution is similar to the logic I used to use in MimeKit's MessageReader sample, but now MimeKit provides a better solution:
/// <summary>
/// Visits a MimeMessage and generates HTML suitable to be rendered by a browser control.
/// </summary>
class HtmlPreviewVisitor : MimeVisitor
{
List<MultipartRelated> stack = new List<MultipartRelated> ();
List<MimeEntity> attachments = new List<MimeEntity> ();
readonly string tempDir;
string body;
/// <summary>
/// Creates a new HtmlPreviewVisitor.
/// </summary>
/// <param name="tempDirectory">A temporary directory used for storing image files.</param>
public HtmlPreviewVisitor (string tempDirectory)
{
tempDir = tempDirectory;
}
/// <summary>
/// The list of attachments that were in the MimeMessage.
/// </summary>
public IList<MimeEntity> Attachments {
get { return attachments; }
}
/// <summary>
/// The HTML string that can be set on the BrowserControl.
/// </summary>
public string HtmlBody {
get { return body ?? string.Empty; }
}
protected override void VisitMultipartAlternative (MultipartAlternative alternative)
{
// walk the multipart/alternative children backwards from greatest level of faithfulness to the least faithful
for (int i = alternative.Count - 1; i >= 0 && body == null; i--)
alternative[i].Accept (this);
}
protected override void VisitMultipartRelated (MultipartRelated related)
{
var root = related.Root;
// push this multipart/related onto our stack
stack.Add (related);
// visit the root document
root.Accept (this);
// pop this multipart/related off our stack
stack.RemoveAt (stack.Count - 1);
}
// look up the image based on the img src url within our multipart/related stack
bool TryGetImage (string url, out MimePart image)
{
UriKind kind;
int index;
Uri uri;
if (Uri.IsWellFormedUriString (url, UriKind.Absolute))
kind = UriKind.Absolute;
else if (Uri.IsWellFormedUriString (url, UriKind.Relative))
kind = UriKind.Relative;
else
kind = UriKind.RelativeOrAbsolute;
try {
uri = new Uri (url, kind);
} catch {
image = null;
return false;
}
for (int i = stack.Count - 1; i >= 0; i--) {
if ((index = stack[i].IndexOf (uri)) == -1)
continue;
image = stack[i][index] as MimePart;
return image != null;
}
image = null;
return false;
}
// Save the image to our temp directory and return a "file://" url suitable for
// the browser control to load.
// Note: if you'd rather embed the image data into the HTML, you can construct a
// "data:" url instead.
string SaveImage (MimePart image, string url)
{
string fileName = url.Replace (':', '_').Replace ('\\', '_').Replace ('/', '_');
string path = Path.Combine (tempDir, fileName);
if (!File.Exists (path)) {
using (var output = File.Create (path))
image.ContentObject.DecodeTo (output);
}
return "file://" + path.Replace ('\\', '/');
}
// Replaces <img src=...> urls that refer to images embedded within the message with
// "file://" urls that the browser control will actually be able to load.
void HtmlTagCallback (HtmlTagContext ctx, HtmlWriter htmlWriter)
{
if (ctx.TagId == HtmlTagId.Image && !ctx.IsEndTag && stack.Count > 0) {
ctx.WriteTag (htmlWriter, false);
// replace the src attribute with a file:// URL
foreach (var attribute in ctx.Attributes) {
if (attribute.Id == HtmlAttributeId.Src) {
MimePart image;
string url;
if (!TryGetImage (attribute.Value, out image)) {
htmlWriter.WriteAttribute (attribute);
continue;
}
url = SaveImage (image, attribute.Value);
htmlWriter.WriteAttributeName (attribute.Name);
htmlWriter.WriteAttributeValue (url);
} else {
htmlWriter.WriteAttribute (attribute);
}
}
} else if (ctx.TagId == HtmlTagId.Body && !ctx.IsEndTag) {
ctx.WriteTag (htmlWriter, false);
// add and/or replace oncontextmenu="return false;"
foreach (var attribute in ctx.Attributes) {
if (attribute.Name.ToLowerInvariant () == "oncontextmenu")
continue;
htmlWriter.WriteAttribute (attribute);
}
htmlWriter.WriteAttribute ("oncontextmenu", "return false;");
} else {
// pass the tag through to the output
ctx.WriteTag (htmlWriter, true);
}
}
protected override void VisitTextPart (TextPart entity)
{
TextConverter converter;
if (body != null) {
// since we've already found the body, treat this as an attachment
attachments.Add (entity);
return;
}
if (entity.IsHtml) {
converter = new HtmlToHtml {
HtmlTagCallback = HtmlTagCallback
};
} else if (entity.IsFlowed) {
var flowed = new FlowedToHtml ();
string delsp;
if (entity.ContentType.Parameters.TryGetValue ("delsp", out delsp))
flowed.DeleteSpace = delsp.ToLowerInvariant () == "yes";
converter = flowed;
} else {
converter = new TextToHtml ();
}
string text = entity.Text;
body = converter.Convert (entity.Text);
}
protected override void VisitTnefPart (TnefPart entity)
{
// extract any attachments in the MS-TNEF part
attachments.AddRange (entity.ExtractAttachments ());
}
protected override void VisitMessagePart (MessagePart entity)
{
// treat message/rfc822 parts as attachments
attachments.Add (entity);
}
protected override void VisitMimePart (MimePart entity)
{
// realistically, if we've gotten this far, then we can treat this as an attachment
// even if the IsAttachment property is false.
attachments.Add (entity);
}
}
And then to use this custom HtmlPreviewVisitor class, you'd have a method something like this:
void Render (WebBrowser browser, MimeMessage message)
{
var tmpDir = Path.Combine (Path.GetTempPath (), message.MessageId);
var visitor = new HtmlPreviewVisitor (tmpDir);
Directory.CreateDirectory (tmpDir);
message.Accept (visitor);
browser.DocumentText = visitor.HtmlBody;
}
I know that this seems like a lot of code, but it's covering a lot more than just the simple cases. You'll notice that it also handles rendering text/plain as well as text/plain; format=flowed bodies if the HTML is not available. It also correctly only uses images that are part of the encapsulating multipart/related tree.
One way you could modify this code is to embed the images into the img tags instead of using a temp directory. To do that, you'd modify the SaveImage method to be something like this (be warned, this next segment of code is untested):
string SaveImage (MimePart image, string url)
{
using (var output = new MemoryStream ()) {
image.ContentObject.DecodeTo (output);
var buffer = output.GetBuffer ();
int length = (int) output.Length;
return string.Format ("data:{0};base64,{1}", image.ContentType.MimeType, Convert.ToBase64String (buffer, 0, length));
}
}
Related
After upgrading CefSharp to Version 105.3.390 the browser doesn't display local pdf files anymore (displaying local html file works).
The browser window is black and Windows OS opens a window: "You'll need a new app to open this chrome-extension link".
As I am using a custom resource handler all urls look like "local://C:/path/to/file.pdf".
Settings:
var cefSettings = new CefSettings()
{
LogSeverity = LogSeverity.Verbose,
CachePath = SystemService.CefCacheDir,
LogFile = SystemService.CefSharpLogFile
};
CefCustomScheme localScheme = new CefCustomScheme
{
SchemeName = LocalSchemeHandlerFactory.SchemeName,
SchemeHandlerFactory = new LocalSchemeHandlerFactory(),
IsCSPBypassing = true
};
cefSettings.RegisterScheme(localScheme);
CefSharp.Cef.Initialize(cefSettings);
LocalSchemeHandlerFactory:
public class LocalSchemeHandlerFactory : ISchemeHandlerFactory
{
public const string SchemeName = "local";
public IResourceHandler Create(IBrowser browser, IFrame frame, string schemeName, IRequest request)
{
return new CustomSchemeHandler();
}
}
CustomSchemeHandler:
public class CustomSchemeHandler : ResourceHandler
{
public override CefReturnValue ProcessRequestAsync(IRequest request, ICallback callback)
{
string file = null;
var uri = new Uri(request.Url);
if (uri.Scheme == LocalSchemeHandlerFactory.SchemeName || uri.Scheme == SmbSchemeHandlerFactory.SchemeName)
{
if (uri.Scheme == LocalSchemeHandlerFactory.SchemeName)
{
var driveInfo = new DriveInfo(uri.Authority);
var path = uri.LocalPath.Substring(1);
file = Path.Combine(driveInfo.Name, path);
}
}
else
{
// handle invalid scheme
}
if (file == null)
return CefReturnValue.Cancel;
Task.Run(() =>
{
using (callback)
{
if (!File.Exists(file))
{
callback.Cancel();
return;
}
byte[] bytes = File.ReadAllBytes(file);
var stream = bytes != null ? new MemoryStream(bytes) : null;
if (stream == null)
{
callback.Cancel();
}
else
{
stream.Position = 0;
ResponseLength = stream.Length;
var fileExtension = Path.GetExtension(file);
MimeType = GetMimeType(fileExtension); // application/pdf
StatusCode = (int)HttpStatusCode.OK;
Stream = stream;
callback.Continue();
}
}
});
return CefReturnValue.ContinueAsync;
}
}
You find the cef log here.
What am I missing?
UPDATE: Similar to this, when trying to open dev tools in CefSharp, a window opens: "You'll need a new app to open this dev-tools link".
I found it myself. The reason was an implementation of RequestHandler:
internal class BrowserRequestHandler : RequestHandler
{
protected override bool OnBeforeBrowse(IWebBrowser chromiumWebBrowser, IBrowser browser, IFrame frame, IRequest request, bool userGesture, bool isRedirect)
{
// delegate all none local URL to OS default browser
if (!request.Url.StartsWith("file:") &&
!request.Url.StartsWith(LocalSchemeHandlerFactory.SchemeName))
{
Process.Start(request.Url);
return true;
}
return false;
}
}
But, when opening a PDF file, an additional URL is called: chrome-extension://mhjfbmdgcfjbbpaeojofohoefgiehjai/index.html (why?).
Following the logic above, this is handled by the OS which causes opening a window: "You'll need a new app to open this chrome-extension link".
After extending the if statement everything works fine:
if (!request.Url.StartsWith("chrome-extension") &&
!request.Url.StartsWith("file:") &&
!request.Url.StartsWith(LocalSchemeHandlerFactory.SchemeName))
I want to create an attribute which will perform following tasks.
Validate the file type as per the file extension.
Validate the file type as per magic number/signature of file.
Validate the file length/size.
If file is validated then I want to store the file into database as as Base 64 string.
I tried it, but in my solution I have to read file twice first in attribute to check extension, magic number and size and secondly to convert the file stream into base 64 string. But Request.Content is forward only reader so it is throwing error when I tried to read the file again.
Please see the code below
File Validator Filter
public class ValidateFileAttribute : ActionFilterAttribute
{
public override Task OnActionExecutingAsync(HttpActionContext actionContext, CancellationToken cancellationToken)
{
return Task.Factory.StartNew(async () => {
if (Request.Content.IsMimeMultipartContent())
{
var provider = actionContext.Request.Content.ReadAsMultipartAsync(cancellationToken).Result;
foreach (var content in provider.Contents)
{
//Here logic to check extension, magic number and length.
//If any error occurred then throw exception with HttpStatusCode
var fileName = content.Headers.ContentDisposition == null ? string.Empty : content.Headers.ContentDisposition.FileName;
var fileInBytes = content.ReadAsByteArrayAsync().Result;
var extention = fileName.Substring(fileName.LastIndexOf('.') + 1);
var validExtensions = new List<string>() { "pdf", "doc", "docx" };
if (!validExtensions.Contains(extention, StringComparer.OrdinalIgnoreCase))
{
//Return Exception
}
if (fileInBytes != null && fileInBytes.Any())
{
var magicNumber = BitConverter.ToString(fileInBytes).Substring(0, 11);
var validMagicNumbers = new List<string>() { "25-50-44-46", "D0-CF-11-E0", "50-4B-03-04" };
if (!validMagicNumbers.Contains(magicNumber, StringComparer.OrdinalIgnoreCase))
{
// Return Exception
}
}
if(fileInBytes != null && fileInBytes.Any() && fileInBytes.Length >= 3000000)
{
// Return Exception
}
}
}
}, cancellationToken);
}
}
Upload Action Method
[ValidateFile]
[Route("upload")]
[HttpPost]
public DocumentUploadResponse Upload()
{
if (Request.Content.IsMimeMultipartContent())
{
var provider = Request.Content.ReadAsMultipartAsync().Result;
// Getting error here..
foreach (var content in provider.Contents)
{
//Here logic to convert file stream into base 64 string.
//And store that string into Database.
var fileInBytes = content.ReadAsByteArrayAsync().Result;
var fileToStore = Convert.ToBase64String(fileInBytes);
/// Here goes Database code.....
}
}
}
Your help will be appreciated.
The Code below extracts a Png or (ico file commented out) which when displayed by Paint displays a small 16x16 icon as expected BUT this icon file (either the png or ico does not work as a Treeview Icon . Other larger Png/Ico files do however work correctly.
public static bool GetURLIconFile(string webpageUrl, string IconFile)
{
//returns the icon for webpageURL in IconFile
string siteUrl = GetWebSite(webpageUrl); // just returns URL of site
var url = GetURLIcon("http://" + siteUrl);
if (url == null)
{
DeleteFile(IconFile);
return false;
}
try
{
HttpWebRequest w = (HttpWebRequest)HttpWebRequest.Create(url);
w.AllowAutoRedirect = true;
HttpWebResponse r = (HttpWebResponse)w.GetResponse();
System.Drawing.Image ico;
using (Stream s = r.GetResponseStream())
{
ico = System.Drawing.Image.FromStream(s);
ico.Save(IconFile, System.Drawing.Imaging.ImageFormat.Png); // forpng
// ico.Save(IconFile, System.Drawing.Imaging.ImageFormat.ico); // forico
}
return true;
}
catch
{
DeleteFile(IconFile);
return false;
}
}
public static Uri GetURLIcon(string siteUrl)
{
// try looking for a /favicon.ico first
try
{
var url = new Uri(siteUrl);
var faviconUrl = new Uri(string.Format("{0}://{1}/favicon.ico", url.Scheme, url.Host));
try
{
using (var httpWebResponse = WebRequest.Create(faviconUrl).GetResponse() as HttpWebResponse)
{
if (httpWebResponse != null && httpWebResponse.StatusCode == HttpStatusCode.OK)
{
// Log("Found a /favicon.ico file for {0}", url);
return faviconUrl;
}
}
}
catch (WebException)
{
}
// otherwise parse the html and look for <link rel='icon' href='' /> using html agility pack
var htmlDocument = new HtmlWeb().Load(url.ToString());
var links = htmlDocument.DocumentNode.SelectNodes("//link");
if (links != null)
{
foreach (var linkTag in links)
{
var rel = GetAttr(linkTag, "rel");
if (rel == null)
continue;
if (rel.Value.IndexOf("icon", StringComparison.InvariantCultureIgnoreCase) > 0)
{
var href = GetAttr(linkTag, "href");
if (href == null)
continue;
Uri absoluteUrl;
if (Uri.TryCreate(href.Value, UriKind.Absolute, out absoluteUrl))
{
// Log("Found an absolute favicon url {0}", absoluteUrl);
return absoluteUrl;
}
var expandedUrl = new Uri(string.Format("{0}://{1}{2}", url.Scheme, url.Host, href.Value));
//Log("Found a relative favicon url for {0} and expanded it to {1}", url, expandedUrl);
return expandedUrl;
}
}
}
// Log("Could not find a favicon for {0}", url);
return null;
}
catch
{
return null;
}
}
Firstly I have found the cause of my problem. I was caused by another problem.
The attached code does work correctly to get the icon of a Web Page.
In answer to the question about Treeview Icons. No they are not directly supported by TreeViews BUT
ICONS are supported by Stackpanels and StackPanels can be TreeView Items, hence a Treeview can support Icons.
using .NET SDK v.1.5.21.0
I'm trying to upload a large file (63Mb) and I'm following the example at:
http://docs.aws.amazon.com/AmazonS3/latest/dev/LLuploadFileDotNet.html
But using a helper instead the hole code and using jQuery File Upload
https://github.com/blueimp/jQuery-File-Upload/blob/master/basic-plus.html
what I have is:
string bucket = "mybucket";
long totalSize = long.Parse(context.Request.Headers["X-File-Size"]),
maxChunkSize = long.Parse(context.Request.Headers["X-File-MaxChunkSize"]),
uploadedBytes = long.Parse(context.Request.Headers["X-File-UloadedBytes"]),
partNumber = uploadedBytes / maxChunkSize + 1,
fileSize = partNumber * inputStream.Length;
bool lastPart = inputStream.Length < maxChunkSize;
// http://docs.aws.amazon.com/AmazonS3/latest/dev/LLuploadFileDotNet.html
if (partNumber == 1) // initialize upload
{
iView.Utilities.Amazon_S3.S3MultipartUpload.InitializePartToCloud(fileName, bucket);
}
try
{
// upload part
iView.Utilities.Amazon_S3.S3MultipartUpload.UploadPartToCloud(fs, fileName, bucket, (int)partNumber, uploadedBytes, maxChunkSize);
if (lastPart)
// wrap it up and go home
iView.Utilities.Amazon_S3.S3MultipartUpload.CompletePartToCloud(fileName, bucket);
}
catch (System.Exception ex)
{
// Huston, we have a problem!
//Console.WriteLine("Exception occurred: {0}", exception.Message);
iView.Utilities.Amazon_S3.S3MultipartUpload.AbortPartToCloud(fileName, bucket);
}
and
public static class S3MultipartUpload
{
private static string accessKey = System.Configuration.ConfigurationManager.AppSettings["AWSAccessKey"];
private static string secretAccessKey = System.Configuration.ConfigurationManager.AppSettings["AWSSecretKey"];
private static AmazonS3 client = Amazon.AWSClientFactory.CreateAmazonS3Client(accessKey, secretAccessKey);
public static InitiateMultipartUploadResponse initResponse;
public static List<UploadPartResponse> uploadResponses;
public static void InitializePartToCloud(string destinationFilename, string destinationBucket)
{
// 1. Initialize.
uploadResponses = new List<UploadPartResponse>();
InitiateMultipartUploadRequest initRequest =
new InitiateMultipartUploadRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'));
initResponse = client.InitiateMultipartUpload(initRequest);
}
public static void UploadPartToCloud(Stream fileStream, string destinationFilename, string destinationBucket, int partNumber, long uploadedBytes, long maxChunkedBytes)
{
// 2. Upload Parts.
UploadPartRequest request = new UploadPartRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'))
.WithUploadId(initResponse.UploadId)
.WithPartNumber(partNumber)
.WithPartSize(maxChunkedBytes)
.WithFilePosition(uploadedBytes)
.WithInputStream(fileStream) as UploadPartRequest;
uploadResponses.Add(client.UploadPart(request));
}
public static void CompletePartToCloud(string destinationFilename, string destinationBucket)
{
// Step 3: complete.
CompleteMultipartUploadRequest compRequest =
new CompleteMultipartUploadRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'))
.WithUploadId(initResponse.UploadId)
.WithPartETags(uploadResponses);
CompleteMultipartUploadResponse completeUploadResponse =
client.CompleteMultipartUpload(compRequest);
}
public static void AbortPartToCloud(string destinationFilename, string destinationBucket)
{
// abort.
client.AbortMultipartUpload(new AbortMultipartUploadRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'))
.WithUploadId(initResponse.UploadId));
}
}
my maxChunckedSize is 6Mb (6 * (1024*1024)) as I have read that the minimum is 5Mb...
why am I getting "Your proposed upload is smaller than the minimum allowed size" exception? What am I doing wrong?
The error is:
<Error>
<Code>EntityTooSmall</Code>
<Message>Your proposed upload is smaller than the minimum allowed size</Message>
<ETag>d41d8cd98f00b204e9800998ecf8427e</ETag>
<MinSizeAllowed>5242880</MinSizeAllowed>
<ProposedSize>0</ProposedSize>
<RequestId>C70E7A23C87CE5FC</RequestId>
<HostId>pmhuMXdRBSaCDxsQTHzucV5eUNcDORvKY0L4ZLMRBz7Ch1DeMh7BtQ6mmfBCLPM2</HostId>
<PartNumber>1</PartNumber>
</Error>
How can I get ProposedSize if I'm passing the stream and stream length?
Here is a working solution for the latest Amazon SDK (as today: v.1.5.37.0)
Amazon S3 Multipart Upload works like:
Initialize the request using client.InitiateMultipartUpload(initRequest)
Send chunks of the file (loop until the end) using client.UploadPart(request)
Complete the request using client.CompleteMultipartUpload(compRequest)
If anything goes wrong, remember to dispose the client and request, as well fire the abort command using client.AbortMultipartUpload(abortMultipartUploadRequest)
I keep the client in Session as we need this for each chunk upload as well, keep an hold of the ETags that are now used to complete the process.
You can see an example and simple way of doing this in Amazon Docs itself, I ended up having a class to do everything, plus, I have integrated with the lovely jQuery File Upload plugin (Handler code below as well).
The S3MultipartUpload is as follow
public class S3MultipartUpload : IDisposable
{
string accessKey = System.Configuration.ConfigurationManager.AppSettings.Get("AWSAccessKey");
string secretAccessKey = System.Configuration.ConfigurationManager.AppSettings.Get("AWSSecretKey");
AmazonS3 client;
public string OriginalFilename { get; set; }
public string DestinationFilename { get; set; }
public string DestinationBucket { get; set; }
public InitiateMultipartUploadResponse initResponse;
public List<PartETag> uploadPartETags;
public string UploadId { get; private set; }
public S3MultipartUpload(string destinationFilename, string destinationBucket)
{
if (client == null)
{
System.Net.WebRequest.DefaultWebProxy = null; // disable proxy to make upload quicker
client = Amazon.AWSClientFactory.CreateAmazonS3Client(accessKey, secretAccessKey, new AmazonS3Config()
{
RegionEndpoint = Amazon.RegionEndpoint.EUWest1,
CommunicationProtocol = Protocol.HTTP
});
this.OriginalFilename = destinationFilename.TrimStart('/');
this.DestinationFilename = string.Format("{0:yyyy}{0:MM}{0:dd}{0:HH}{0:mm}{0:ss}{0:fffff}_{1}", DateTime.UtcNow, this.OriginalFilename);
this.DestinationBucket = destinationBucket;
this.InitializePartToCloud();
}
}
private void InitializePartToCloud()
{
// 1. Initialize.
uploadPartETags = new List<PartETag>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest();
initRequest.BucketName = this.DestinationBucket;
initRequest.Key = this.DestinationFilename;
// make it public
initRequest.AddHeader("x-amz-acl", "public-read");
initResponse = client.InitiateMultipartUpload(initRequest);
}
public void UploadPartToCloud(Stream fileStream, long uploadedBytes, long maxChunkedBytes)
{
int partNumber = uploadPartETags.Count() + 1; // current part
// 2. Upload Parts.
UploadPartRequest request = new UploadPartRequest();
request.BucketName = this.DestinationBucket;
request.Key = this.DestinationFilename;
request.UploadId = initResponse.UploadId;
request.PartNumber = partNumber;
request.PartSize = fileStream.Length;
//request.FilePosition = uploadedBytes // remove this line?
request.InputStream = fileStream; // as UploadPartRequest;
var up = client.UploadPart(request);
uploadPartETags.Add(new PartETag() { ETag = up.ETag, PartNumber = partNumber });
}
public string CompletePartToCloud()
{
// Step 3: complete.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest();
compRequest.BucketName = this.DestinationBucket;
compRequest.Key = this.DestinationFilename;
compRequest.UploadId = initResponse.UploadId;
compRequest.PartETags = uploadPartETags;
string r = "Something went badly wrong";
using (CompleteMultipartUploadResponse completeUploadResponse = client.CompleteMultipartUpload(compRequest))
r = completeUploadResponse.ResponseXml;
return r;
}
public void AbortPartToCloud()
{
// abort.
client.AbortMultipartUpload(new AbortMultipartUploadRequest()
{
BucketName = this.DestinationBucket,
Key = this.DestinationFilename,
UploadId = initResponse.UploadId
});
}
public void Dispose()
{
if (client != null) client.Dispose();
if (initResponse != null) initResponse.Dispose();
}
}
I use DestinationFilename as the destination file so I can avoid the same name, but I keep the OriginalFilename as I needed later.
Using jQuery File Upload Plugin, all works inside a Generic Handler, and the process is something like this:
// Upload partial file
private void UploadPartialFile(string fileName, HttpContext context, List<FilesStatus> statuses)
{
if (context.Request.Files.Count != 1)
throw new HttpRequestValidationException("Attempt to upload chunked file containing more than one fragment per request");
var inputStream = context.Request.Files[0].InputStream;
string contentRange = context.Request.Headers["Content-Range"]; // "bytes 0-6291455/14130271"
int fileSize = int.Parse(contentRange.Split('/')[1]);,
maxChunkSize = int.Parse(context.Request.Headers["X-Max-Chunk-Size"]),
uploadedBytes = int.Parse(contentRange.Replace("bytes ", "").Split('-')[0]);
iView.Utilities.AWS.S3MultipartUpload s3Upload = null;
try
{
// ######################################################################################
// 1. Initialize Amazon S3 Client
if (uploadedBytes == 0)
{
HttpContext.Current.Session["s3-upload"] = new iView.Utilities.AWS.S3MultipartUpload(fileName, awsBucket);
s3Upload = (iView.Utilities.AWS.S3MultipartUpload)HttpContext.Current.Session["s3-upload"];
string msg = System.String.Format("Upload started: {0} ({1:N0}Mb)", s3Upload.DestinationFilename, (fileSize / 1024));
this.Log(msg);
}
// cast current session object
if (s3Upload == null)
s3Upload = (iView.Utilities.AWS.S3MultipartUpload)HttpContext.Current.Session["s3-upload"];
// ######################################################################################
// 2. Send Chunks
s3Upload.UploadPartToCloud(inputStream, uploadedBytes, maxChunkSize);
// ######################################################################################
// 3. Complete Upload
if (uploadedBytes + maxChunkSize > fileSize)
{
string completeRequest = s3Upload.CompletePartToCloud();
this.Log(completeRequest); // log S3 response
s3Upload.Dispose(); // dispose all objects
HttpContext.Current.Session["s3-upload"] = null; // we don't need this anymore
}
}
catch (System.Exception ex)
{
if (ex.InnerException != null)
while (ex.InnerException != null)
ex = ex.InnerException;
this.Log(string.Format("{0}\n\n{1}", ex.Message, ex.StackTrace)); // log error
s3Upload.AbortPartToCloud(); // abort current upload
s3Upload.Dispose(); // dispose all objects
statuses.Add(new FilesStatus(ex.Message));
return;
}
statuses.Add(new FilesStatus(s3Upload.DestinationFilename, fileSize, ""));
}
Keep in mind that to have a Session object inside a Generic Handler, you need to implement IRequiresSessionState so your handler will look like:
public class UploadHandlerSimple : IHttpHandler, IRequiresSessionState
Inside fileupload.js (under _initXHRData) I have added an extra header called X-Max-Chunk-Size so I can pass this to Amazon and calculate if it's the last part of the uploaded file.
Fell free to comment and make smart edits for everyone to use.
I guess you didn't set the content-length of the part inside the UploadPartToCloud() function.
I need to export (save to) hard drive my Lotus Notes emails.
I figured out the way how to save attachments to HDD, but I can't figure out the way of how to save the whole email.
The code below shows how I export attachments. Can you suggest how can I modify it to save emails?
PS- I am new to programming.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Domino;
using System.Collections;
namespace ExportLotusAttachments
{
class Class1
{
public void ScanForEmails()
{
String textBox1 = "c:\\1";
NotesSession session = new NotesSession();
session.Initialize("");
NotesDbDirectory dir = null;
dir = session.GetDbDirectory("");
NotesDatabase db = null;
db = dir.OpenMailDatabase();
NotesDatabase NDb = dir.OpenMailDatabase(); //Database connection
//ArrayList that will hold names of the folders
ArrayList LotusViews2 = new ArrayList();
foreach (NotesView V in NDb.Views)
{
if (V.IsFolder && !(V.Name.Equals("($All)")))
{
NotesView getS = V;
LotusViews2.Add(getS.Name);
}
}
foreach (String obj in LotusViews2)
{
NotesDocument NDoc;
NotesView nInboxDocs = NDb.GetView(obj);
NDoc = nInboxDocs.GetFirstDocument();
String pAttachment;
while (NDoc != null)
{
if (NDoc.HasEmbedded && NDoc.HasItem("$File"))
{
object[] AllDocItems = (object[])NDoc.Items;
foreach (object CurItem in AllDocItems)
{
NotesItem nItem = (NotesItem)CurItem;
if (IT_TYPE.ATTACHMENT == nItem.type)
{
String path = textBox1;
pAttachment = ((object[])nItem.Values)[0].ToString();
if (!System.IO.Directory.Exists(path))
{
System.IO.Directory.CreateDirectory(textBox1);
}
try
{
NDoc.GetAttachment(pAttachment).ExtractFile(#path + pAttachment);
}
catch { }
}
}
}
NDoc = nInboxDocs.GetNextDocument(NDoc);
}
}
}
}
}
This post by Bob Babalan explains how to export lotus documents using Java. The same principle should work in C# or VB. The document is cnverted into MIME and written to the disk.
Or in version 8.5.3 (I think it started witn 8.5.1) you can just drag and drop it from the mail file to the file system.
I know it is a bit late, but this is, what I did. (Based on Bob Babalan)
Bobs Solution helped me alot to understand NotesMIMEEntities, but in his solution, he only traversed the MIME-Tree to the second "layer". This will traverse multiple layers.
public static void GetMIME(StreamWriter writer, NotesMIMEEntity mimeEntity)
{
try
{
string contentType = null;
string headers = null;
string content = null;
string preamble = null;
MIME_ENCODING encoding;
contentType = mimeEntity.ContentType;
headers = mimeEntity.Headers;
encoding = mimeEntity.Encoding;
// message envelope. If no MIME-Version header, add one
if (!headers.Contains("MIME-Version:"))
writer.WriteLine("MIME-Version: 1.0");
writer.WriteLine(headers);
// for multipart, usually no main-msg content...
content = mimeEntity.ContentAsText;
if (content != null && content.Trim().Length > 0)
writer.WriteLine(content);
writer.Flush();
if (contentType.StartsWith("multipart"))
{
preamble = mimeEntity.Preamble;
NotesMIMEEntity mimeChild = mimeEntity.GetFirstChildEntity();
while (mimeChild != null)
{
GetMimeChild(writer, mimeChild);
mimeChild = mimeChild.GetNextSibling();
}
}
writer.WriteLine(mimeEntity.BoundaryEnd);
writer.Flush();
}
catch (Exception ex)
{
Logging.Log(ex.ToString());
}
}
private void GetMimeChild(StreamWriter writer, NotesMIMEEntity mimeEntity)
{
string contentType = null;
string headers = null;
string content = null;
string preamble = null;
MIME_ENCODING encoding;
contentType = mimeEntity.ContentType;
headers = mimeEntity.Headers;
encoding = mimeEntity.Encoding;
if (encoding == MIME_ENCODING.ENC_IDENTITY_BINARY)
{
mimeEntity.EncodeContent(MIME_ENCODING.ENC_BASE64);
headers = mimeEntity.Headers;
}
preamble = mimeEntity.Preamble;
writer.Write(mimeEntity.BoundaryStart);
if (!content.EndsWith("\n"))
writer.WriteLine("");
writer.WriteLine(headers);
writer.WriteLine();
writer.Write(mimeEntity.ContentAsText);
if (contentType.StartsWith("multipart"))
{
preamble = mimeEntity.Preamble;
NotesMIMEEntity mimeChild = mimeEntity.GetFirstChildEntity();
while (mimeChild != null)
{
GetMimeChild(writer, mimeChild);
mimeChild = mimeChild.GetNextSibling();
}
}
writer.Write(mimeEntity.BoundaryEnd);
writer.Flush();
}
I would call this methods like this, to save the EML-File to a given path.
using (FileStream fs = new FileStream (path,FileMode.Create,FileAccess.ReadWrite,FileShare.None))
{
using (StreamWriter writer = new StreamWriter(fs))
{
NotesMimeEntity mimeEntity = notesDocument.GetMIMEEntity();
if (mimeEntity != null)
GetMIME(writer, mimeEntity);
}
}