Encoder SDK 4 - Push to Publishing Point - c#

I'm coding an application in c# using EC4 SP2 SDK.
I want to publish my file to a media server publishing point. I've searched and found 2 examples regarding seting up and auth on publishing points, but either are from older sdk's or do not work (and are for console). basicly my application doesn't encode nothing, as if it had nothing to encode.
When in degub mode checkpont i can see the correct properties for the source file and for the server.
The encoding process takes 0secs to process. I checked the logs on the server events and i get a warning "the security system has received and auth request that could not be decoded". I just havo no knowledge to break up further than this. Any help would be appreciated.
this is the piece of code:
private void broadcastSourceFileToMediaServer2()
{
using (LiveJob job = new LiveJob())
{
String filetoencode = #"c:\temp\niceday.wmv";
LiveFileSource filesource = job.AddFileSource(filetoencode);
filesource.PlaybackMode = FileSourcePlaybackMode.Loop;
job.ActivateSource(filesource);
job.ApplyPreset(LivePresets.VC1Broadband4x3);
//don't know which one is good to use
job.AcquireCredentials += new EventHandler<AcquireCredentialsEventArgs>(job_AcquireCredentials);
_myUserName = "indes";
_pw = PullPW("indes");
Uri url = new Uri("http://192.168.1.74:8080/live");
PushBroadcastPublishFormat pubpoint = new PushBroadcastPublishFormat();
pubpoint.PublishingPoint = url;
pubpoint.UserName = _myUserName;
pubpoint.Password = _pw;
job.PublishFormats.Add(pubpoint);
job.PreConnectPublishingPoint();
job.StartEncoding();
statusBox.Text = job.NumberOfEncodedSamples.ToString();
job.StopEncoding();
job.Dispose();
}
}
public static string _myUserName { get; set; }
public static SecureString _pw { get; set; }
//codificação de Password a enviar
private static SecureString PullPW(string pw)
{
SecureString s = new SecureString();
foreach (char c in pw) s.AppendChar(c);
return s;
}
static void job_AcquireCredentials(object sender, AcquireCredentialsEventArgs e)
{
e.UserName = _myUserName;
e.Password = _pw;
e.Modes = AcquireCredentialModes.None;
}

Progresses:
I managed to authenticate (at least get a positive audit event) on the server.
I changed from this:
//don't know which one is good to use
job.AcquireCredentials += new EventHandler<AcquireCredentialsEventArgs>(job_AcquireCredentials);
_myUserName = "indes";
_pw = PullPW("indes");
Uri url = new Uri("http://192.168.1.74:8080/live");
PushBroadcastPublishFormat pubpoint = new PushBroadcastPublishFormat();
pubpoint.PublishingPoint = url;
pubpoint.UserName = _myUserName;
pubpoint.Password = _pw;
To this:
job.AcquireCredentials += new EventHandler<AcquireCredentialsEventArgs>(job_AcquireCredentials);
_myUserName = #"mediaservername\user";
_pw = PullPW("user_password");
Uri url = new Uri("http://192.168.1.74:8080/live");
PushBroadcastPublishFormat pubpoint = new PushBroadcastPublishFormat();
pubpoint.PublishingPoint = url;
If you see on one side if had to include the domain (either domain or computername) before username. this changed the failed audit events on the server, so i could eliminate the manual credentials pubpoint.username and pubpoint.Password.
Now I'm just dealing with a lack of output format exception. On to it.

How about using SMOOTH Streaming, I managed to get my project going but I didn't get much more beyond Look below, to the part that has the PUBLISH switch type. ignore the file portion
internal bool StartStream()
{
Busy = true;
// Instantiates a new job for encoding
//
//***************************************Live Stream Archive******************************
if (blnRecordFromFile)
{
// Sets up publishing format for file archival type
FileArchivePublishFormat fileOut = new FileArchivePublishFormat();
// job.ApplyPreset(LivePresets.VC1512kDSL16x9);
// Gets timestamp and edits it for filename
string timeStamp = DateTime.Now.ToString();
timeStamp = timeStamp.Replace("/", "-");
timeStamp = timeStamp.Replace(":", ".");
// Sets file path and name
string path = "C:\\output\\";
string filename = "Capture" + timeStamp + ".ismv";
if (!Directory.Exists(path))
Directory.CreateDirectory(path);
fileOut.OutputFileName = Path.Combine(path, filename);
// Adds the format to the job. You can add additional formats as well such as
// Publishing streams or broadcasting from a port
job.PublishFormats.Add(fileOut);
}
//******************************END OF Stream PORTION****************************************
////////////////////////////////////////////////////////////////////////////////////////////////////
//*************************************** Process Files or Live Stream******************************
if (blnRecordFromFile)
{
job.ApplyPreset(LivePresets.VC1IISSmoothStreaming720pWidescreen);
job = new LiveJob();
// Verifies all information is entered
if (string.IsNullOrWhiteSpace(sourcePath) || string.IsNullOrWhiteSpace(destinationPath))
return false;
job.Status += new EventHandler<EncodeStatusEventArgs>(StreamStatus);
LiveFileSource fileSource;
try
{
// Sets file to active source and checks if it is valid
fileSource = job.AddFileSource(sourcePath);
}
catch (InvalidMediaFileException)
{
return false;
}
// Sets to loop media for streaming
// fileSource.PlaybackMode = FileSourcePlaybackMode.Loop;
// Makes this file the active source. Multiple files can be added
// and cued to move to each other at their ends
job.ActivateSource(fileSource);
}
//******************************END OF FILE PORTION****************************************
// Sets up variable for fomat data
switch (publishType)
{
case Output.Archive:
// Verifies destination path exists and if not creates it
try
{
if (!Directory.Exists(destinationPath))
Directory.CreateDirectory(destinationPath);
}
catch (IOException)
{
return false;
}
FileArchivePublishFormat archiveFormat = new FileArchivePublishFormat();
// Gets the location of the old extention and removes it
string filename = Path.GetFileNameWithoutExtension(sourcePath);
// Sets the archive path and file name
archiveFormat.OutputFileName = Path.Combine(destinationPath, filename + ".ismv");
job.PublishFormats.Add(archiveFormat);
break;
case Output.Publish:
// Setups streaming of media to publishing point
job = new LiveJob();
// Aquires audio and video devices
Collection<EncoderDevice> devices = EncoderDevices.FindDevices(EncoderDeviceType.Video);
EncoderDevice video = devices.Count > 0 ? devices[0] : null;
for (int i = 0; i < devices.Count; ++i)
// devices[i].Dispose();
devices.Clear();
devices = EncoderDevices.FindDevices(EncoderDeviceType.Audio);
EncoderDevice audio = devices.Count > 0 ? devices[0] : null;
for (int i = 1; i < devices.Count; ++i)
devices[i].Dispose();
devices.Clear();
// Checks for a/v devices
if (video != null && audio != null)
{
//job.ApplyPreset(Preset.FromFile(#"C:\Tempura\LivePreset3.xml"));
job.ApplyPreset(LivePresets.H264IISSmoothStreamingLowBandwidthStandard);
job.OutputFormat.VideoProfile.SmoothStreaming = true;
deviceSource = job.AddDeviceSource(video, audio);
// Make this source the active one
job.ActivateSource(deviceSource);
}
else
{
error = true;
}
PushBroadcastPublishFormat publishFormat = new PushBroadcastPublishFormat();
try
{
// checks the path for a valid publishing point
publishFormat.PublishingPoint = new Uri(destinationPath);
}
catch (UriFormatException)
{
return false;
}
// Adds the publishing format to the job
try
{
// job.ApplyPreset(LivePresets.VC1IISSmoothStreaming480pWidescreen);
job.PublishFormats.Add(publishFormat);
job.PreConnectPublishingPoint();
}
catch (Exception e)
{
MessageBox.Show(e.StackTrace.ToString());
}
break;
default:
return false;
}
job.StartEncoding();
return true;
}

Sadly I dont have enough rep to comment, so I have to write it as an answer.
Due to you are starting a live job, in order to stream you should not call job.StopEncoding() right after StartEncoding. I think usually you would use an event to stop the encoding. If you start encoding and immediately stop it, it is only logical you have no, or only a very small output.

I changed your code to the following and it seems work well. I guess your problem is that you disposed the instance of LiveJob class. You have to keep the instance alive before it finished encoding the whole stream. So change the using part and remove the StopEncoding and Dispose will be OK.
private void broadcastSourceFileToMediaServer2()
{
LiveJob job = new LiveJob();
String filetoencode = #"c:\temp\niceday.wmv";
LiveFileSource filesource = job.AddFileSource(filetoencode);
filesource.PlaybackMode = FileSourcePlaybackMode.Loop;
job.ActivateSource(filesource);
job.ApplyPreset(LivePresets.VC1Broadband4x3);
//don't know which one is good to use
job.AcquireCredentials += new EventHandler<AcquireCredentialsEventArgs>(job_AcquireCredentials);
_myUserName = "indes";
_pw = PullPW("indes");
Uri url = new Uri("http://192.168.1.74:8080/live");
PushBroadcastPublishFormat pubpoint = new PushBroadcastPublishFormat();
pubpoint.PublishingPoint = url;
pubpoint.UserName = _myUserName;
pubpoint.Password = _pw;
job.PublishFormats.Add(pubpoint);
job.PreConnectPublishingPoint();
job.StartEncoding();
statusBox.Text = job.NumberOfEncodedSamples.ToString();
}
public static string _myUserName { get; set; }
public static SecureString _pw { get; set; }
//codificação de Password a enviar
private static SecureString PullPW(string pw)
{
SecureString s = new SecureString();
foreach (char c in pw) s.AppendChar(c);
return s;
}
static void job_AcquireCredentials(object sender, AcquireCredentialsEventArgs e)
{
e.UserName = _myUserName;
e.Password = _pw;
e.Modes = AcquireCredentialModes.None;
}

Related

How to have an AWS Lambda/Rekognition Function return an array of object keys

This feels like a simple question and I feel like I am overthinking it. I am doing an AWS project that will compare face(s) on an image to a database (s3bucket) of other faces. So far, I have a lambda function for the comparefacerequest, a class library which invokes the function, and an UWP that inputs the image file and outputs a result. It has worked so far being based on boolean (true or false) functions, but now I want it to instead return what face(s) are recognized via an array. I struggling at implementing this.
Below is my lambda function. I have adjusted the task to be an Array instead of a bool and changed the return to be an array. At the bottom, I have created a global variable class with a testing array so I could attempt to reference the array elsewhere.
public class Function
{
//Function
public async Task<Array> FunctionHandler(string input, ILambdaContext context)
{
//number of matched faces
int matched = 0;
//Client setup
var rekognitionclient = new AmazonRekognitionClient();
var s3client = new AmazonS3Client();
//Create list of target images
ListObjectsRequest list = new ListObjectsRequest
{
BucketName = "bucket2"
};
ListObjectsResponse listre = await s3client.ListObjectsAsync(list);
//loop of list
foreach (Amazon.S3.Model.S3Object obj in listre.S3Objects)
{
//face request with input and obj.key images
var comparefacesrequest = new CompareFacesRequest
{
SourceImage = new Image
{
S3Object = new S3Objects
{
Bucket = "bucket1",
Name = input
}
},
TargetImage = new Image
{
S3Object = new S3Objects
{
Bucket = "bucket2",
Name = obj.Key
}
},
};
//compare with confidence of 95 (subject to change) to current target image
var detectresponse = await rekognitionclient.CompareFacesAsync(comparefacesrequest);
detectresponse.FaceMatches.ForEach(match =>
{
ComparedFace face = match.Face;
if (match.Similarity > 95)
{
//if face detected, raise matched
matched++;
for(int i = 0; i < Globaltest.testingarray.Length; i++)
{
if (Globaltest.testingarray[i] == "test")
{
Globaltest.testingarray[i] = obj.Key;
}
}
}
});
}
//Return true or false depending on if it is matched
if (matched > 0)
{
return Globaltest.testingarray;
}
return Globaltest.testingarray;
}
}
public static class Globaltest
{
public static string[] testingarray = { "test", "test", "test" };
}
Next, is my invoke request in my class library. It has so far been based on the lambda outputting a boolean result, but I thought, "hey, it is parsing the result, it should be fine, right"? I do convert the result to a string, as there is no GetArray, from what I know.
public async Task<bool> IsFace(string filePath, string fileName)
{
await UploadS3(filePath, fileName);
AmazonLambdaClient client = new AmazonLambdaClient(accessKey, secretKey, Amazon.RegionEndpoint.USWest2);
InvokeRequest ir = new InvokeRequest();
ir.InvocationType = InvocationType.RequestResponse;
ir.FunctionName = "ImageTesting";
ir.Payload = "\"" + fileName + "\"";
var result = await client.InvokeAsync(ir);
var strResponse = Encoding.ASCII.GetString(result.Payload.ToArray());
if (bool.TryParse(strResponse, out bool result2))
{
return result2;
}
return false;
}
Finally, here is the section of my UWP where I perform the function. I am referencing the lambda client via "using Lambdaclienttest" (name of lamda project, and this is its only instance I use the reference though). When I run my project, I do still get a face detected when it should, but the Globaltest.testingarray[0] is still equal to "test".
var Facedetector = new FaceDetector(Credentials.accesskey, Credentials.secretkey);
try
{
var result = await Facedetector.IsFace(filepath, filename);
if (result)
{
textBox1.Text = "There is a face detected";
textBox2.Text = Globaltest.testingarray[0];
}
else
{
textBox1.Text = "Try Again";
}
}
catch
{
textBox1.Text = "Please use a photo";
}
Does anyone have any suggestions?

AmazonSQSClient not refreshing AWSCredentials when Credentials File is updated

When my AWS Credentials File (see docs) is updated by an external process the AmazonSQSClient doesn't re-read it, SendMessageAsync fails with a security/token error.
We use a custom powershell script to refresh the local AWS cred's file periodically. The script works fine, the file is refreshed prior to the credentials expiring on AWS. However, if my app is running when the file is refreshed the new credentials are not re-read from the file, the "client" will show that the previous credentials are still in use.
The AWS docs list several AWSCredential providers but none of them seem to be the correct choice...I think..
Restarting the app works, the new credentials are read correctly and messages are sent until the next time the cred's file is updated.
using (var client = new AmazonSQSClient(Amazon.RegionEndpoint.EUWest1))
{
return client.SendMessageAsync(request);
}
I don't think there is a way for a running app to pick up the default credentials being refreshed in credentials file. There is a solution for Node.js loading credentials from a JSON file. You can create a similar solution in C#. You can also run a local DB to store credentials so whenever credentials file is updated DB table or JSON file is also updated. You will need to use access key and secret key in your SQS client constructor as opposed to using default credentials.
// Load these from JSON file or DB.
var accessKey = "";
var secretKey = "";
using (var client = new AmazonSQSClient(accessKey, secretKey, Amazon.RegionEndpoint.EUWest1))
{
return client.SendMessageAsync(request);
}
The following works "ok" but I've only tested it with one profile and the file watcher is not as timely as you'd like so I'd recommend you wrap your usage inside a Retry mechanism.
// Usage..
var credentials = new AwsCredentialsFile();
using (var client = new AmazonSQSClient(credentials, Amazon.RegionEndpoint.EUWest1))
{
return client.SendMessageAsync(request);
}
public class AwsCredentialsFile : AWSCredentials
{
// https://docs.aws.amazon.com/sdk-for-net/v2/developer-guide/net-dg-config-creds.html#creds-file
private const string DefaultProfileName = "default";
private static ConcurrentDictionary<string, ImmutableCredentials> _credentials = new ConcurrentDictionary<string, ImmutableCredentials>(StringComparer.OrdinalIgnoreCase);
private static FileSystemWatcher _watcher = BuildFileSystemWatcher();
private readonly System.Text.Encoding _encoding;
private readonly string _profileName;
public AwsCredentialsFile()
: this(AwsCredentialsFile.DefaultProfileName, System.Text.Encoding.UTF8)
{
}
public AwsCredentialsFile(string profileName)
: this(profileName, System.Text.Encoding.UTF8)
{
}
public AwsCredentialsFile(string profileName, System.Text.Encoding encoding)
{
_profileName = profileName;
_encoding = encoding;
}
private static FileSystemWatcher BuildFileSystemWatcher()
{
var watcher = new FileSystemWatcher
{
Path = Path.GetDirectoryName(GetDefaultCredentialsFilePath()),
NotifyFilter = NotifyFilters.LastWrite,
Filter = "credentials"
};
watcher.Changed += (object source, FileSystemEventArgs e) => { _credentials?.Clear(); };
watcher.EnableRaisingEvents = true;
return watcher;
}
public static string GetDefaultCredentialsFilePath()
{
return System.Environment.ExpandEnvironmentVariables(#"C:\Users\%USERNAME%\.aws\credentials");
}
public static (string AccessKey, string SecretAccessKey, string Token) ReadCredentialsFromFile(string profileName, System.Text.Encoding encoding)
{
var profile = $"[{profileName}]";
string awsAccessKeyId = null;
string awsSecretAccessKey = null;
string token = null;
var lines = File.ReadAllLines(GetDefaultCredentialsFilePath(), encoding);
for (int i = 0; i < lines.Length; i++)
{
var text = lines[i];
if (text.Equals(profile, StringComparison.OrdinalIgnoreCase))
{
awsAccessKeyId = lines[i + 1].Replace("aws_access_key_id = ", string.Empty);
awsSecretAccessKey = lines[i + 2].Replace("aws_secret_access_key = ", string.Empty);
if (lines.Length >= i + 3)
{
token = lines[i + 3].Replace("aws_session_token = ", string.Empty);
}
break;
}
}
var result = (AccessKey: awsAccessKeyId, SecretAccessKey: awsSecretAccessKey, Token: token);
return result;
}
public override ImmutableCredentials GetCredentials()
{
if (_credentials.TryGetValue(_profileName, out ImmutableCredentials value))
{
return value;
}
else
{
var (AccessKey, SecretAccessKey, Token) = ReadCredentialsFromFile(_profileName, _encoding);
var credentials = new ImmutableCredentials(AccessKey, SecretAccessKey, Token);
_credentials.TryAdd(_profileName, credentials);
return credentials;
}
}
}

WebIce Integration using Quick fix

I am newbie to fix protocol and quick fix programming. I am seeking a help on getting Trade Capture report from ICE. I have googled for the sample/ tutorial to use quick fix/n to get the trade report but I am not being able to get sufficient output of it.
My problem is to get Trade Capture report or deal information for this I tried using TradeCaptureReportRequest, TradeCaptureReportRequestAck, TradeCaptureReport classes but somehow its now working.
A simple how to extract information would be a great help.
thanking everyone out there in advance.
Ok I am posting as an answer because it's going to be way too long for a comment. Please keep in mind that I have written custom constants, message types, etc (I wrote my acceptor server as well, so I'm not restricted by ICE constants/enums). You will need to determine what fields are required by ICE and make changes - this will not be easy to copy/paste...
First, you need to make sure you have all required files in and referenced. I create a folder called "fix" in my project, and copy all fix files into it. These need to be (at least 1) FixXXX.xml file, if you're using FIX50SP1 or 2, you need to also have FIXT11.xml. Along with the .xml files, you need to have an initiator.cfg file (assuming you're making an initiator, no a server, otherwise this will need to be "acceptor.cfg" but again, it sounds like you're trying to connect to ICE, so initiator is the correct usage. Finally, you will need to have a QuickFix.dll. My tree looks as below:
I am not going to go through the XML files - you will need to just learn that - it is very confusing and takes time.. especially if using FIXT11.XML along with SP1 or 2.
Your initiator.cfg should be similar to below:
# default settings for sessions
[DEFAULT]
FileStorePath=store
FileLogPath=log
ConnectionType=initiator
ReconnectInterval=60
SenderCompID=[Enter yours]
ResetOnLogon=Y
ResetOnLogout=Y
ResetOnDisconnect=Y
[SESSION]
BeginString=FIXT.1.1
TargetCompID=[Enter ICE Acceptor]
DefaultApplVerID=FIX.5.0
StartTime=12:30:00
EndTime=21:30:00
# overide default setting for RecconnectInterval
ReconnectInterval=30
HeartBtInt=30
SocketConnectPort=[From ICE]
# (optional) only listen for incoming connections on a specific host
#SocketConnectHost=127.0.0.1
SocketConnectHost=[ICE Ip Address- from your documentation/registration]
DataDictionary=..\..\fix\FIX50.xml
TransportDataDictionary=..\..\fix\FIXT11.xml
Ok, assuming that you have QuickFix.dll imported and referenced, and your initiator.cfg properly connected, it's actually fairly simple:
Create a Class that handles everything. Ignore AddToLB, that is a testing function.
public class TCT_Fix : Control, IApplication
{
private readonly string username = [removed]
private readonly string password = [removed]
public string InitiatorID;
SessionID sessionID;
public bool running;
SessionSettings settings;
IMessageStoreFactory storeFactory;
ILogFactory logFactory;
SocketInitiator initiator;
public event EventHandler AddToLB;
public event EventHandler AddToAdmin;
public void StopIt()
{
if (sessionID == null) return;
try
{
Session.LookupSession(sessionID).Disconnect("Stopping");
settings.Remove(sessionID);
settings = null;
initiator.Dispose();
settings = new SessionSettings(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "fix", "initiator.cfg"));
storeFactory = new FileStoreFactory(settings);
logFactory = new FileLogFactory(settings);
initiator = new SocketInitiator(
this,
storeFactory,
settings,
logFactory);
}
catch { }
}
public void FromApp(QuickFix.Message msg, SessionID sessionID)
{
var sMsg = "FROM APP: " + msg.ToString();
AddToLB(sMsg, null);
if (msg.Header.GetField(35) == "TC") //Cash
{
DateTime dtTdate;
float fPrice;
int Qty;
int OrdType;
bool BPisBuyer;
DateTime.TryParse(msg.GetField(CustomConstants.TDATE),out dtTdate);
string BPSide = msg.GetField(CustomConstants.BP_SIDE);
float.TryParse(msg.GetField(CustomConstants.F_PRICE), out fPrice);
int.TryParse(msg.GetField(CustomConstants.QTY), out Qty);
string TCTReference = msg.GetField(CustomConstants.TCT_REF);
string BPAcct = msg.GetField(CustomConstants.BP_COMPANY);
int.TryParse(msg.GetField(CustomConstants.ORDER_TYPE), out OrdType);
string ExecBkr = msg.GetField(CustomConstants.EXEC_BKR);
string CounterParty = msg.GetField(CustomConstants.COUNTER_PARTY);
BPisBuyer = msg.GetField(CustomConstants.IS_BUYER) == "Y";
string BPTrader = msg.GetField(CustomConstants.BP_TRADER);
string CounterTrader = msg.GetField(CustomConstants.COUNTER_TRADER);
string Grade = msg.GetField(CustomConstants.GRADE);
string Location = msg.GetField(CustomConstants.LOCATION);
string CycDt = msg.GetField(CustomConstants.CYCLE_DATE);
string DelMo = msg.GetField(CustomConstants.DELIVER_MONTH);
string Terms = msg.GetField(CustomConstants.TERMS);
string Payment = msg.GetField(CustomConstants.PAYMENT);
string Origin = msg.GetField(CustomConstants.ORIGIN);
string NumOfCyc = msg.GetField(CustomConstants.NUM_OF_CYCLES);
string Via = msg.GetField(CustomConstants.VIA);
string MoveMo = msg.GetField(CustomConstants.MOVE_MONTH);
string Comment = msg.GetField(CustomConstants.COMMENT);
}
else if (msg.Header.GetField(35) == "TE") //EFP
{
DateTime dtTdate;
float fPrice;
int Qty;
int OrdType;
bool BPisBuyer;
bool IsWater;
DateTime.TryParse(msg.GetField(CustomConstants.TDATE), out dtTdate);
string BPSide = msg.GetField(CustomConstants.BP_SIDE);
float.TryParse(msg.GetField(CustomConstants.F_PRICE), out fPrice);
int.TryParse(msg.GetField(CustomConstants.QTY), out Qty);
string TCTReference = msg.GetField(CustomConstants.TCT_REF);
string BPAcct = msg.GetField(CustomConstants.BP_COMPANY);
int.TryParse(msg.GetField(CustomConstants.ORDER_TYPE), out OrdType);
string ExecBkr = msg.GetField(CustomConstants.EXEC_BKR);
string CounterParty = msg.GetField(CustomConstants.COUNTER_PARTY);
BPisBuyer = msg.GetField(CustomConstants.IS_BUYER) == "Y";
string BPTrader = msg.GetField(CustomConstants.BP_TRADER);
string CounterTrader = msg.GetField(CustomConstants.COUNTER_TRADER);
string Grade = msg.GetField(CustomConstants.GRADE);
string Location = msg.GetField(CustomConstants.LOCATION);
string CycDt = msg.GetField(CustomConstants.CYCLE_DATE);
string DelMo = msg.GetField(CustomConstants.DELIVER_MONTH);
string Terms = msg.GetField(CustomConstants.TERMS);
string Payment = msg.GetField(CustomConstants.PAYMENT);
string Origin = msg.GetField(CustomConstants.ORIGIN);
string NumOfCyc = msg.GetField(CustomConstants.NUM_OF_CYCLES);
string Via = msg.GetField(CustomConstants.VIA);
string MoveMo = msg.GetField(CustomConstants.MOVE_MONTH);
string Comment = msg.GetField(CustomConstants.COMMENT);
IsWater = msg.GetField(CustomConstants.ISWATER) == "Y";
string BPFloorBkr = msg.GetField(CustomConstants.BP_FLOOR_BKR);
string CounterFloorBkr = msg.GetField(CustomConstants.COUNTER_FLOOR_BKR);
string Diff = msg.GetField(CustomConstants.DIFFERENCE);
string MercMo = msg.GetField(CustomConstants.MERC_MO);
string MercPr = msg.GetField(CustomConstants.MERC_PRICE);
}
else if (msg.Header.GetField(35) == "TI") //Index
{
DateTime dtTdate;
float fPrice;
int Qty;
int OrdType;
bool BPisBuyer;
bool IsWater;
DateTime.TryParse(msg.GetField(CustomConstants.TDATE), out dtTdate);
string BPSide = msg.GetField(CustomConstants.BP_SIDE);
float.TryParse(msg.GetField(CustomConstants.F_PRICE), out fPrice);
int.TryParse(msg.GetField(CustomConstants.QTY), out Qty);
string TCTReference = msg.GetField(CustomConstants.TCT_REF);
string BPAcct = msg.GetField(CustomConstants.BP_COMPANY);
int.TryParse(msg.GetField(CustomConstants.ORDER_TYPE), out OrdType);
string ExecBkr = msg.GetField(CustomConstants.EXEC_BKR);
string CounterParty = msg.GetField(CustomConstants.COUNTER_PARTY);
BPisBuyer = msg.GetField(CustomConstants.IS_BUYER) == "Y";
string BPTrader = msg.GetField(CustomConstants.BP_TRADER);
string CounterTrader = msg.GetField(CustomConstants.COUNTER_TRADER);
string Grade = msg.GetField(CustomConstants.GRADE);
string Location = msg.GetField(CustomConstants.LOCATION);
string CycDt = msg.GetField(CustomConstants.CYCLE_DATE);
string DelMo = msg.GetField(CustomConstants.DELIVER_MONTH);
string Terms = msg.GetField(CustomConstants.TERMS);
string Payment = msg.GetField(CustomConstants.PAYMENT);
string Origin = msg.GetField(CustomConstants.ORIGIN);
string NumOfCyc = msg.GetField(CustomConstants.NUM_OF_CYCLES);
string Via = msg.GetField(CustomConstants.VIA);
string MoveMo = msg.GetField(CustomConstants.MOVE_MONTH);
string Comment = msg.GetField(CustomConstants.COMMENT);
IsWater = msg.GetField(CustomConstants.ISWATER) == "Y";
string BPFloorBkr = msg.GetField(CustomConstants.BP_FLOOR_BKR);
string CounterFloorBkr = msg.GetField(CustomConstants.COUNTER_FLOOR_BKR);
string Diff = msg.GetField(CustomConstants.DIFFERENCE);
string MercMo = msg.GetField(CustomConstants.MERC_MO);
string MercPr = msg.GetField(CustomConstants.MERC_PRICE);
}
}
public void OnCreate(SessionID sessionID)
{
AddToAdmin("SESSION CREATED: " + sessionID.ToString(), null);
}
public void OnLogout(SessionID sessionID)
{
AddToAdmin("LOGOUT: " + this.sessionID.ToString(), null);
}
public void OnLogon(SessionID sessionID)
{
this.sessionID = sessionID;
AddToAdmin("LOG ON: " + this.sessionID.ToString(),null);
}
public void FromAdmin(QuickFix.Message msg, SessionID sessionID)
{
AddToAdmin("FROM ADMIN: " + msg.ToString(), null);
}
public void ToAdmin(QuickFix.Message msg, SessionID sessionID)
{
if (msg.Header.GetField(35).ToString() == "A")
{
msg.SetField(new QuickFix.Fields.Username(username));
msg.SetField(new QuickFix.Fields.Password(password));
}
AddToAdmin("TO ADMIN: " + msg.ToString(), null);
}
public void ToApp(QuickFix.Message msg, SessionID sessionID)
{
AddToLB("TO APP: " + msg.ToString(), null);
}
public void GetTestMessage(string msgType)
{
if (sessionID == null) return;
QuickFix.FIX50.TestMessage msg = new QuickFix.FIX50.TestMessage();
msg.TestType = msgType;
msg.Header.SetField(new QuickFix.Fields.MsgType("TEST"));
msg.SetField(new QuickFix.Fields.StringField(CustomConstants.TEST_TYPE, msgType));
Session.SendToTarget(msg, sessionID);
}
public TCT_Fix()
{
settings = new SessionSettings(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "fix", "initiator.cfg"));
storeFactory = new FileStoreFactory(settings);
logFactory = new FileLogFactory(settings);
initiator = new SocketInitiator(
this,
storeFactory,
settings,
logFactory);
}
public TCT_Fix(ref string initID)
{
InitiatorID = initID;
settings = new SessionSettings(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "fix", "initiator.cfg"));
storeFactory = new FileStoreFactory(settings);
logFactory = new FileLogFactory(settings);
initiator = new SocketInitiator(
this,
storeFactory,
settings,
logFactory);
}
public void RunIt()
{
if (running) return;
if(initiator.IsStopped)
{
try
{
initiator.Start(); //This can throw an error due to current set up. I would recommend making the connection,
//pulling data, and then closing the connection (polling) to ensure the initiator clears the
//log files
//reference http://lists.quickfixn.com/pipermail/quickfixn-quickfixn.com/2013q1/000747.html
//2013 issue, still unresolved... Restart app
}
catch(Exception ex)
{
if (MessageBox.Show("Error restarting initiator. Program will close due to file access. This is a Quickfix bug, not an issue with this program. Please restart." + Environment.NewLine + Environment.NewLine +
"Reference: http://lists.quickfixn.com/pipermail/quickfixn-quickfixn.com/2013q1/000747.html for more information. Click ok to copy link to clipboard. Click \"X\" to ignore.") == DialogResult.OK)
{
Clipboard.SetText("http://lists.quickfixn.com/pipermail/quickfixn-quickfixn.com/2013q1/000747.html");
}
throw new Exception(ex.ToString());
}
}
running = true;
}
}
Finally, to make it stand out (this is actually in the block above as well), you construct a message similar to below, keeping in mind that your ICE Message will have certain required fields that my "TestMessage" does not. I cannot give code from production though - sorry.
public void GetTestMessage(string msgType)
{
if (sessionID == null) return;
QuickFix.FIX50.TestMessage msg = new QuickFix.FIX50.TestMessage();
msg.TestType = msgType;
msg.Header.SetField(new QuickFix.Fields.MsgType("TEST"));
msg.SetField(new QuickFix.Fields.StringField(CustomConstants.TEST_TYPE, msgType));
Session.SendToTarget(msg, sessionID);
}
The learning curve is substantial. You will just need to keep playing around until you get it. Once you get it down though, it makes sense. Stick with it. Let me know if you need anything else.

EntityTooSmall in CompleteMultipartUploadResponse

using .NET SDK v.1.5.21.0
I'm trying to upload a large file (63Mb) and I'm following the example at:
http://docs.aws.amazon.com/AmazonS3/latest/dev/LLuploadFileDotNet.html
But using a helper instead the hole code and using jQuery File Upload
https://github.com/blueimp/jQuery-File-Upload/blob/master/basic-plus.html
what I have is:
string bucket = "mybucket";
long totalSize = long.Parse(context.Request.Headers["X-File-Size"]),
maxChunkSize = long.Parse(context.Request.Headers["X-File-MaxChunkSize"]),
uploadedBytes = long.Parse(context.Request.Headers["X-File-UloadedBytes"]),
partNumber = uploadedBytes / maxChunkSize + 1,
fileSize = partNumber * inputStream.Length;
bool lastPart = inputStream.Length < maxChunkSize;
// http://docs.aws.amazon.com/AmazonS3/latest/dev/LLuploadFileDotNet.html
if (partNumber == 1) // initialize upload
{
iView.Utilities.Amazon_S3.S3MultipartUpload.InitializePartToCloud(fileName, bucket);
}
try
{
// upload part
iView.Utilities.Amazon_S3.S3MultipartUpload.UploadPartToCloud(fs, fileName, bucket, (int)partNumber, uploadedBytes, maxChunkSize);
if (lastPart)
// wrap it up and go home
iView.Utilities.Amazon_S3.S3MultipartUpload.CompletePartToCloud(fileName, bucket);
}
catch (System.Exception ex)
{
// Huston, we have a problem!
//Console.WriteLine("Exception occurred: {0}", exception.Message);
iView.Utilities.Amazon_S3.S3MultipartUpload.AbortPartToCloud(fileName, bucket);
}
and
public static class S3MultipartUpload
{
private static string accessKey = System.Configuration.ConfigurationManager.AppSettings["AWSAccessKey"];
private static string secretAccessKey = System.Configuration.ConfigurationManager.AppSettings["AWSSecretKey"];
private static AmazonS3 client = Amazon.AWSClientFactory.CreateAmazonS3Client(accessKey, secretAccessKey);
public static InitiateMultipartUploadResponse initResponse;
public static List<UploadPartResponse> uploadResponses;
public static void InitializePartToCloud(string destinationFilename, string destinationBucket)
{
// 1. Initialize.
uploadResponses = new List<UploadPartResponse>();
InitiateMultipartUploadRequest initRequest =
new InitiateMultipartUploadRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'));
initResponse = client.InitiateMultipartUpload(initRequest);
}
public static void UploadPartToCloud(Stream fileStream, string destinationFilename, string destinationBucket, int partNumber, long uploadedBytes, long maxChunkedBytes)
{
// 2. Upload Parts.
UploadPartRequest request = new UploadPartRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'))
.WithUploadId(initResponse.UploadId)
.WithPartNumber(partNumber)
.WithPartSize(maxChunkedBytes)
.WithFilePosition(uploadedBytes)
.WithInputStream(fileStream) as UploadPartRequest;
uploadResponses.Add(client.UploadPart(request));
}
public static void CompletePartToCloud(string destinationFilename, string destinationBucket)
{
// Step 3: complete.
CompleteMultipartUploadRequest compRequest =
new CompleteMultipartUploadRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'))
.WithUploadId(initResponse.UploadId)
.WithPartETags(uploadResponses);
CompleteMultipartUploadResponse completeUploadResponse =
client.CompleteMultipartUpload(compRequest);
}
public static void AbortPartToCloud(string destinationFilename, string destinationBucket)
{
// abort.
client.AbortMultipartUpload(new AbortMultipartUploadRequest()
.WithBucketName(destinationBucket)
.WithKey(destinationFilename.TrimStart('/'))
.WithUploadId(initResponse.UploadId));
}
}
my maxChunckedSize is 6Mb (6 * (1024*1024)) as I have read that the minimum is 5Mb...
why am I getting "Your proposed upload is smaller than the minimum allowed size" exception? What am I doing wrong?
The error is:
<Error>
<Code>EntityTooSmall</Code>
<Message>Your proposed upload is smaller than the minimum allowed size</Message>
<ETag>d41d8cd98f00b204e9800998ecf8427e</ETag>
<MinSizeAllowed>5242880</MinSizeAllowed>
<ProposedSize>0</ProposedSize>
<RequestId>C70E7A23C87CE5FC</RequestId>
<HostId>pmhuMXdRBSaCDxsQTHzucV5eUNcDORvKY0L4ZLMRBz7Ch1DeMh7BtQ6mmfBCLPM2</HostId>
<PartNumber>1</PartNumber>
</Error>
How can I get ProposedSize if I'm passing the stream and stream length?
Here is a working solution for the latest Amazon SDK (as today: v.1.5.37.0)
Amazon S3 Multipart Upload works like:
Initialize the request using client.InitiateMultipartUpload(initRequest)
Send chunks of the file (loop until the end) using client.UploadPart(request)
Complete the request using client.CompleteMultipartUpload(compRequest)
If anything goes wrong, remember to dispose the client and request, as well fire the abort command using client.AbortMultipartUpload(abortMultipartUploadRequest)
I keep the client in Session as we need this for each chunk upload as well, keep an hold of the ETags that are now used to complete the process.
You can see an example and simple way of doing this in Amazon Docs itself, I ended up having a class to do everything, plus, I have integrated with the lovely jQuery File Upload plugin (Handler code below as well).
The S3MultipartUpload is as follow
public class S3MultipartUpload : IDisposable
{
string accessKey = System.Configuration.ConfigurationManager.AppSettings.Get("AWSAccessKey");
string secretAccessKey = System.Configuration.ConfigurationManager.AppSettings.Get("AWSSecretKey");
AmazonS3 client;
public string OriginalFilename { get; set; }
public string DestinationFilename { get; set; }
public string DestinationBucket { get; set; }
public InitiateMultipartUploadResponse initResponse;
public List<PartETag> uploadPartETags;
public string UploadId { get; private set; }
public S3MultipartUpload(string destinationFilename, string destinationBucket)
{
if (client == null)
{
System.Net.WebRequest.DefaultWebProxy = null; // disable proxy to make upload quicker
client = Amazon.AWSClientFactory.CreateAmazonS3Client(accessKey, secretAccessKey, new AmazonS3Config()
{
RegionEndpoint = Amazon.RegionEndpoint.EUWest1,
CommunicationProtocol = Protocol.HTTP
});
this.OriginalFilename = destinationFilename.TrimStart('/');
this.DestinationFilename = string.Format("{0:yyyy}{0:MM}{0:dd}{0:HH}{0:mm}{0:ss}{0:fffff}_{1}", DateTime.UtcNow, this.OriginalFilename);
this.DestinationBucket = destinationBucket;
this.InitializePartToCloud();
}
}
private void InitializePartToCloud()
{
// 1. Initialize.
uploadPartETags = new List<PartETag>();
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest();
initRequest.BucketName = this.DestinationBucket;
initRequest.Key = this.DestinationFilename;
// make it public
initRequest.AddHeader("x-amz-acl", "public-read");
initResponse = client.InitiateMultipartUpload(initRequest);
}
public void UploadPartToCloud(Stream fileStream, long uploadedBytes, long maxChunkedBytes)
{
int partNumber = uploadPartETags.Count() + 1; // current part
// 2. Upload Parts.
UploadPartRequest request = new UploadPartRequest();
request.BucketName = this.DestinationBucket;
request.Key = this.DestinationFilename;
request.UploadId = initResponse.UploadId;
request.PartNumber = partNumber;
request.PartSize = fileStream.Length;
//request.FilePosition = uploadedBytes // remove this line?
request.InputStream = fileStream; // as UploadPartRequest;
var up = client.UploadPart(request);
uploadPartETags.Add(new PartETag() { ETag = up.ETag, PartNumber = partNumber });
}
public string CompletePartToCloud()
{
// Step 3: complete.
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest();
compRequest.BucketName = this.DestinationBucket;
compRequest.Key = this.DestinationFilename;
compRequest.UploadId = initResponse.UploadId;
compRequest.PartETags = uploadPartETags;
string r = "Something went badly wrong";
using (CompleteMultipartUploadResponse completeUploadResponse = client.CompleteMultipartUpload(compRequest))
r = completeUploadResponse.ResponseXml;
return r;
}
public void AbortPartToCloud()
{
// abort.
client.AbortMultipartUpload(new AbortMultipartUploadRequest()
{
BucketName = this.DestinationBucket,
Key = this.DestinationFilename,
UploadId = initResponse.UploadId
});
}
public void Dispose()
{
if (client != null) client.Dispose();
if (initResponse != null) initResponse.Dispose();
}
}
I use DestinationFilename as the destination file so I can avoid the same name, but I keep the OriginalFilename as I needed later.
Using jQuery File Upload Plugin, all works inside a Generic Handler, and the process is something like this:
// Upload partial file
private void UploadPartialFile(string fileName, HttpContext context, List<FilesStatus> statuses)
{
if (context.Request.Files.Count != 1)
throw new HttpRequestValidationException("Attempt to upload chunked file containing more than one fragment per request");
var inputStream = context.Request.Files[0].InputStream;
string contentRange = context.Request.Headers["Content-Range"]; // "bytes 0-6291455/14130271"
int fileSize = int.Parse(contentRange.Split('/')[1]);,
maxChunkSize = int.Parse(context.Request.Headers["X-Max-Chunk-Size"]),
uploadedBytes = int.Parse(contentRange.Replace("bytes ", "").Split('-')[0]);
iView.Utilities.AWS.S3MultipartUpload s3Upload = null;
try
{
// ######################################################################################
// 1. Initialize Amazon S3 Client
if (uploadedBytes == 0)
{
HttpContext.Current.Session["s3-upload"] = new iView.Utilities.AWS.S3MultipartUpload(fileName, awsBucket);
s3Upload = (iView.Utilities.AWS.S3MultipartUpload)HttpContext.Current.Session["s3-upload"];
string msg = System.String.Format("Upload started: {0} ({1:N0}Mb)", s3Upload.DestinationFilename, (fileSize / 1024));
this.Log(msg);
}
// cast current session object
if (s3Upload == null)
s3Upload = (iView.Utilities.AWS.S3MultipartUpload)HttpContext.Current.Session["s3-upload"];
// ######################################################################################
// 2. Send Chunks
s3Upload.UploadPartToCloud(inputStream, uploadedBytes, maxChunkSize);
// ######################################################################################
// 3. Complete Upload
if (uploadedBytes + maxChunkSize > fileSize)
{
string completeRequest = s3Upload.CompletePartToCloud();
this.Log(completeRequest); // log S3 response
s3Upload.Dispose(); // dispose all objects
HttpContext.Current.Session["s3-upload"] = null; // we don't need this anymore
}
}
catch (System.Exception ex)
{
if (ex.InnerException != null)
while (ex.InnerException != null)
ex = ex.InnerException;
this.Log(string.Format("{0}\n\n{1}", ex.Message, ex.StackTrace)); // log error
s3Upload.AbortPartToCloud(); // abort current upload
s3Upload.Dispose(); // dispose all objects
statuses.Add(new FilesStatus(ex.Message));
return;
}
statuses.Add(new FilesStatus(s3Upload.DestinationFilename, fileSize, ""));
}
Keep in mind that to have a Session object inside a Generic Handler, you need to implement IRequiresSessionState so your handler will look like:
public class UploadHandlerSimple : IHttpHandler, IRequiresSessionState
Inside fileupload.js (under _initXHRData) I have added an extra header called X-Max-Chunk-Size so I can pass this to Amazon and calculate if it's the last part of the uploaded file.
Fell free to comment and make smart edits for everyone to use.
I guess you didn't set the content-length of the part inside the UploadPartToCloud() function.

How do I implement a C# Thrift service and consume it with a Silverlight client?

I'm current looking at Thrift to use as a RPC framework for our apps (mostly written in C# and Silverlight). I've come as far as implementing a service and consuming it from a C# console app (using a socket as transport).
For the C# server side code my code looked like: (basically copying the tutorials included with the source code)
MyServiceHandler handler = new MyServiceHandler();
MyService.Processor processor = new MyService.Processor(handler);
TServerTransport serverTransport = new TServerSocket(9090);
TServer server = new TSimpleServer(processor, serverTransport);
server.Serve();
For the client side code it looked like:
TTransport transport = new TSocket("localhost", 9090);
TProtocol protocol = new TBinaryProtocol(transport);
MyService.Client client = new MyService.Client(protocol);
transport.Open();
client.SomeServiceCall();
However, we will be consuming the service from a Silverlight client, and unfortunately there is no support for sockets in Silverlight for Thrift. I assume I'm forced to use HTTP communication between the client and service, using Thrift's C# THttpClient and THttpHandler classes? I could not find any examples of how to do this out there, can anyone point me in the right direction? Some example server and client side code would be appreciated.
It seems that this issue was already addressed by this guy. According to this JIRA, the fix is available in Thrift 0.9. You can either try this snapshot (note that, as it's not a final release, it might not be stable) or you can apply this patch to the 0.8 release.
I believe by now you would have understood, there is no direct way of communicating from Silverlight to the Cassandra database either using Thrift or any other clients.
I have one simple option related to this. Write a Silverlight enabled web service and consume it from the client.
For example, on the server side you can have a web service which does insert/update/read etc., like this. I just managed to pull out some code which we use for our project. Hope this helps.
using Apache.Cassandra;
using Thrift.Protocol;
using Thrift.Transport;
namespace CassandraWebLibrary
{
public class MyDb
{
String _host;
int _port;
String _keyspace;
bool _isConnected;
TTransport _transport = null;
Apache.Cassandra.Cassandra.Client _client = null;
String columnFamily = "ColumnFamilyName";
public VazhikaattiDB(String host, int port, String keyspace)
{
_host = host;
_port = port;
_keyspace = keyspace;
_isConnected = false;
}
public bool Connect()
{
try
{
_transport = new TFramedTransport(new TSocket(_host, _port));
TProtocol protocol = new TBinaryProtocol(_transport);
_client = new Apache.Cassandra.Cassandra.Client(protocol);
_transport.Open();
_client.set_keyspace(_keyspace);
_isConnected = true;
}
catch (Exception ex)
{
log.Error(ex.ToString());
}
return _isConnected;
}
public bool Close()
{
if (_transport.IsOpen)
_transport.Close();
_isConnected = false;
return true;
}
public bool InsertData(Send your data as parameters here)
{
try
{
List<Column> list = new List<Column>();
string strKey = keyvalue;
#region Inserting into Coulmn family
List<Byte> valbytes = new List<byte>(BitConverter.GetBytes(value)); //You might have to pad this with more bytes to make it length of 8 bytes
Column doublecolumn1 = new Column()
{
Name = Encoding.UTF8.GetBytes("column1"),
Timestamp = timestampvalue,
Value = valbytes.ToArray()
};
list.Add(doublecolumn1);
Column stringcolumn2 = new Column()
{
Name = Encoding.UTF8.GetBytes("column2"),
Timestamp = timestampvalue,
Value = Encoding.UTF8.GetBytes("StringValue")
};
list.Add(stringcolumn2);
Column timecolumn3 = new Column()
{
Name = Encoding.UTF8.GetBytes("column3"),
Timestamp = timestampvalue,
Value = BitConverter.GetBytes(DateTime.Now.Ticks)
};
list.Add(timecolumn3);
#endregion
ColumnParent columnParent = new ColumnParent();
columnParent.Column_family = columnFamily;
Byte[] key = Encoding.UTF8.GetBytes(strKey);
foreach (Column column in list)
{
try
{
_client.insert(key, columnParent, column, ConsistencyLevel.QUORUM);
}
catch (Exception e)
{
log.Error(e.ToString());
}
}
return true;
}
catch (Exception ex)
{
log.Error(ex.ToString());
return false;
}
}
public List<YourReturnObject> GetData(parameters)
{
try
{
ColumnParent columnParent = new ColumnParent();
columnParent.Column_family = columnFamily;
DateTime curdate = startdate;
IndexExpression indExprsecondkey = new IndexExpression();
indExprsecondkey.Column_name = Encoding.UTF8.GetBytes("column");
indExprsecondkey.Op = IndexOperator.EQ;
List<Byte> valbytes = PadLeftBytes((int)yourid, 8);
indExprsecondkey.Value = valbytes.ToArray();
indExprList.Add(indExprsecondkey);
IndexClause indClause = new IndexClause()
{
Expressions = indExprList,
Count = 1000,
Start_key = Encoding.UTF8.GetBytes("")
};
SlicePredicate slice = new SlicePredicate()
{
Slice_range = new SliceRange()
{
//Start and Finish cannot be null
Start = new byte[0],
Finish = new byte[0],
Count = 1000,
Reversed = false
}
};
List<KeySlice> keyslices = _client.get_indexed_slices(columnParent, indClause, slice, ConsistencyLevel.ONE);
foreach (KeySlice ks in keyslices)
{
String stringcolumnvalue = Encoding.UTF8.GetString(cl.Column.Value);
double doublevalue= (Double)BitConverter.ToDouble(cl.Column.Value);
long timeticks = BitConverter.ToInt64(cl.Column.Value, 0);
DateTime dtcolumntime = new DateTime(timeticks);
}
}
catch (Exception ex)
{
log.Error(ex.ToString());
}
return yourdatalist;
}
}
}
Now the above class can be used by your webservice, which in turn will be used by Silverlight. Btw, you'll have to take care of other silverlight issues like size of data to be downloaded from server/webservice etc.,
FYI, our client service of Cassandra runs on port 9160..

Categories