I'm looking for resources, or anyone who have writting scheduler in WCF.
What I want to achive is basically what can you see in OGame, or Travian or doznes of other text browser based game.
Player click and send task to server to make building for him, or unit or something else.
From what I figured out I need to run some kind if scheduler on server that will gather all tasks, and will track them, until some perdiod of time will pass (2 mins, 10 mins, 3 days etc.), and after that period end service should call an action, like send data to database.
Well. I've been trying to make something very simply, that I can start from and ended with this:
public class BuildingScheduler
{
public int TaskID { get; set; }
public string UserName { get; set; }
public DateTime TaskEnd { get; set; }
public string BuildingName { get; set; }
public bool TaskDone { get; set; }
public DateTime RemainingTime { get; set; }
TestDBModelContainer _ctx;
TestData _testData;
public IDuplexClient Client { get; set; }
public BuildingScheduler()
{
TaskDone = false;
}
public void MakeBuilding()
{
while (DateTime.Now <= TaskEnd)
{
//Client.DisplayMessage(DateTime.Now.ToString());
RemainingTime = DateTime.Now;
}
_testData = new TestData { DataName = BuildingName, Created = DateTime.Now };
_ctx = new TestDBModelContainer();
_ctx.TestDataSet.AddObject(_testData);
_ctx.SaveChanges();
TaskDone = true;
//Client.DisplayMessage("Building completed!");
}
}
static List<UserChannel> _userChannels = new List<UserChannel>();
static List<BuildingScheduler> _buildingSchedules = new List<BuildingScheduler>();
List<BuildingScheduler> _buildingSchedulesToRemove = new List<BuildingScheduler>();
[OperationContract]
public void DoWork()
{
IDuplexClient client = OperationContext.Current.GetCallbackChannel<IDuplexClient>();
UserChannel userChannel = new UserChannel { Client = client, ClientName = "TestClient" };
string clientName = (from p in _userChannels
where p.ClientName == "TestClient"
select p.ClientName).FirstOrDefault();
lock (((ICollection)_userChannels).SyncRoot)
{
if (clientName == null)
{
_userChannels.Add(userChannel);
}
}
CheckBuilding();
}
private void CheckBuilding()
{
BuildingScheduler bs = (from p in _buildingSchedules
where p.UserName == "TestClient"
select p).FirstOrDefault();
IDuplexClient client = (from p in _userChannels
where p.ClientName == "TestClient"
select p.Client).FirstOrDefault();
if (bs != null)
{
client.DisplayMessage(bs.RemainingTime);
}
}
private void StartBuilding()
{
foreach (BuildingScheduler bs in _buildingSchedules)
{
if (bs.TaskDone == false)
{
bs.MakeBuilding();
}
else if (bs.TaskDone == true)
{
_buildingSchedulesToRemove.Add(bs);
}
}
for(int i = 0; i <= _buildingSchedulesToRemove.Count; i++)
{
BuildingScheduler bs = _buildingSchedulesToRemove.Where(p => p.TaskDone == true).Select(x => x).FirstOrDefault();
_buildingSchedules.Remove(bs);
_buildingSchedulesToRemove.Remove(bs);
}
CheckBuilding();
}
[OperationContract]
public void MakeBuilding(string name)
{
BuildingScheduler _buildng = new BuildingScheduler();
//_buildng.Client = client;
_buildng.TaskID = 1;
_buildng.UserName = "TestClient";
_buildng.TaskEnd = DateTime.Now.AddSeconds(50);
_buildng.BuildingName = name;
_buildingSchedules.Add(_buildng);
StartBuilding();
}
I have hardcoded most values, for testing.
InstanceContextMode is set for PerCall.
Anyway. This code is working. At least to some point. If we ignore zylion exceptions from Entity Framework, I can add tasks from multiple clients and they are added to db in order from newset to oldest (or shortest to longest ?).
The point is, user CANT track his tasks. When I change page I don't see how much time remaining till task done. This code essentialy do not provide time tracking because i got rid of it as it wasn't working anyway.
I guess I should store my task in some presitant data storage and everytime user check page newset state should be draw from that storage and send to him.
I'm not and expert but i think best option here is to store all data in Memory until task is done.
Any relational database will be probably to slow, if I will have to constantly update records with newset state of task.
I know I should just synchronize client side timer with server and do not stream constatly time from server. But Big question is here how to get newset state of task pogress when user come back to page after 3 second or 3 hours ?
Any advices how to make it work as expected.
And btw. I'm using pollingduplex and Silverlight.
It sounds like you are using WCF to do something for which it wasn't designed (but I understand the need). I would suggest you look at Workflow Foundation (WF) as a possible solution to your problem. Here is a good explanation:
http://msdn.microsoft.com/library/dd851337.aspx
Here is also a good intro video:
http://channel9.msdn.com/Blogs/mwink/Introduction-to-Workflow-Services-building-WCF-Services-with-WF
Workflow can consume WCF services and it is designed to work over time. It holds data in state until something changes (regardless of time) without consuming excess resources. Also, it allows for persistance and parallel processes.
Related
So I am trying to take an IAsyncEnumerable method and relay the results of said method in PowerShell. I understand that PowerShell does not have async support, and generally, people use the Task.GetAwaiter().GetResult() as the means to get their result.
However, this approach does not work (or at least I don't know how to implement it) for IAsyncEnumerable methods.
My specific case is a little more sophisticated, but let's take this example:
namespace ServerMetadataCache.Client.Powershell
{
[Cmdlet(VerbsDiagnostic.Test,"SampleCmdlet")]
[OutputType(typeof(FavoriteStuff))]
public class TestSampleCmdletCommand : PSCmdlet
{
[Parameter(
Mandatory = true,
Position = 0,
ValueFromPipeline = true,
ValueFromPipelineByPropertyName = true)]
public int FavoriteNumber { get; set; } = default!;
[Parameter(
Position = 1,
ValueFromPipelineByPropertyName = true)]
[ValidateSet("Cat", "Dog", "Horse")]
public string FavoritePet { get; set; } = "Dog";
private IAsyncEnumerable<int> InternalMethodAsync()
{
for (int i = 1; i <= 10; i++)
{
await Task.Delay(1000);//Simulate waiting for data to come through.
yield return i;
}
}
protected override void EndProcessing()
{
//this is the issue... how can I tell powershell to read/process results of
//InternalMethodAsync()? Regularly trying to read the method doesn't work
//and neither does wrapping the method with a task and using GetAwaiter().GetResult()
}
public class FavoriteStuff
{
public int FavoriteNumber { get; set; } = default!;
public string FavoritePet { get; set; } = default!;
}
}
This cmdlet is of course a dummy that just takes in a integer and either "Dog", "Cat" or "Horse", but my bigger concern is how to process the InternalMethodAsync() in the Cmdlet. The challenge is getting around the IAsyncEnumerable.
Make an async wrapper method that takes a concurrent collection - like a ConcurrentQueue<int> - as a parameter and fills it with the items from the IAsyncEnumerable<int>, then start reading from it before the task completes:
private async Task InternalMethodExchangeAsync(IProducerConsumerCollection<int> outputCollection)
{
await foreach(var item in InternalMethodAsync())
outputCollection.TryAdd(item)
}
Then in EndProcessing():
protected override void EndProcessing()
{
var resultQueue = new ConcurrentQueue<int>();
var task = InternalMethodExchangeAsync(resultQueue);
// Task still running? Let's try reading from the queue!
while(!task.IsCompleted)
if(resultQueue.TryDequeue(out int preliminaryResult))
WriteObject(preliminaryResult);
// Process remaining items
while(resultQueue.Count > 0)
if(resultQueue.TryDequeue(out int trailingResult))
WriteObject(trailingResult);
}
This question really helped me figure it out.
It Goes back to get Task.GetAwaiter().GetResult() - which works on regular tasks. You need to process the enumerator's results as tasks (which I was trying to do earlier but not in the right way). So based on the InternalMethodAsync() in my question, one could process the results like so:
private void processDummy(){
var enumerator = InternalMethodAsync().GetAsyncEnumerator();
while (enumerator.MoveNextAsync().AsTask().GetAwaiter().GetResult()){
Console.WriteLine(enumerator.Current);
}
}
The AsTask() was the key piece here.
I'm currently implementing a MongoDB database for caching.
I've made a very generic client, with the save method working like this:
public virtual void SaveAndOverwriteExistingCollection<T>(string collectionKey, T[] data)
{
if (data == null || !data.Any())
return;
var collection = Connector.MongoDatabase.GetCollection<T>(collectionKey.ToString());
var filter = new FilterDefinitionBuilder<T>().Empty;
var operations = new List<WriteModel<T>>
{
new DeleteManyModel<T>(filter),
};
operations.AddRange(data.Select(t => new InsertOneModel<T>(t)));
try
{
collection.BulkWrite(operations, new BulkWriteOptions { IsOrdered = true});
}
catch (MongoBulkWriteException mongoBulkWriteException)
{
throw mongoBulkWriteException;
}
}
With our other clients, calling this method looking similar to this:
public Person[] Get(bool bypassCache = false)
{
Person[] people = null;
if (!bypassCache)
people = base.Get<Person>(DefaultCollectionKeys.People.CreateCollectionKey());
if (people.SafeAny())
return people;
people = Client<IPeopleService>.Invoke(s => s.Get());
base.SaveAndOverwriteExistingCollection(DefaultCollectionKeys.People.CreateCollectionKey(), people);
return people;
}
After we've persisted data to the backend we reload the cache from MongoDB by calling our Get methods, passing the argument true. So we reload all of the data.
This works fine for most use cases. But considering how we are using a Web-garden solution (multiple processes) for the same application this leads to concurrency issues. If I save and reload the cache while another user is reloading the page, sometimes it throws a E11000 duplicate key error collection.
Command createIndexes failed: E11000 duplicate key error collection:
cache.Person index: Id_1_Name_1_Email_1 dup
key: { : 1, : "John Doe", : "foo#bar.com" }.
Considering how this is a web garden with multiple IIS processes running, locking won't do much good. Considering how bulkwrites should be threadsafe I'm a bit puzzled. I've looked into Upserting the data, but changing our clients to be type specific and updating each field will take too long and feels like unnecessary work. Therefore I'm looking for a very generic solution.
UPDATE
Removed the Insert and Delete. Changed it to a collection of ReplaceOneModel. Currently experiencing issues with only the last element in a collection being persisted.
public virtual void SaveAndOverwriteExistingCollection<T>(string collectionKey, T[] data)
{
if (data == null || !data.Any())
return;
var collection = Connector.MongoDatabase.GetCollection<T>(collectionKey.ToString());
var filter = new FilterDefinitionBuilder<T>().Empty;
var operations = new List<WriteModel<T>>();
operations.AddRange(data.Select(t => new ReplaceOneModel<T>(filter, t) { IsUpsert = true }));
try
{
collection.BulkWrite(operations, new BulkWriteOptions { IsOrdered = true });
}
catch (MongoBulkWriteException mongoBulkWriteException)
{
throw mongoBulkWriteException;
}
}
Just passed in a collection of 811 items and only the last one can be found in the collection after this method has been executed.
Example of a DTO being persisted:
public class TranslationSetting
{
[BsonId(IdGenerator = typeof(GuidGenerator))]
public object ObjectId { get; set; }
public string LanguageCode { get; set; }
public string SettingKey { get; set; }
public string Text { get; set; }
}
With this index:
string TranslationSettings()
{
var indexBuilder = new IndexKeysDefinitionBuilder<TranslationSetting>()
.Ascending(_ => _.SettingKey)
.Ascending(_ => _.LanguageCode);
return MongoDBClient.CreateIndex(DefaultCollectionKeys.TranslationSettings, indexBuilder);
}
I'm nearing the end of a project for which I'm trying to use DDD, but have discovered a glaring bug that I'm not sure how to easily solve.
Here is my entity - I've reduced it for simplicity:
public class Contribution : Entity
{
protected Contribution()
{
this.Parts = new List<ContributionPart>();
}
internal Contribution(Guid id)
{
this.Id = id;
this.Parts = new List<ContributionPart>();
}
public Guid Id { get; private set; }
protected virtual IList<ContributionPart> Parts { get; private set; }
public void UploadParts(string path, IEnumerable<long> partLengths)
{
if (this.Parts.Count > 0)
{
throw new InvalidOperationException("Parts have already been uploaded.");
}
long startPosition = 0;
int partNumber = 1;
foreach (long partLength in partLengths)
{
this.Parts.Add(new ContributionPart(this.Id, partNumber, partLength));
this.Commands.Add(new UploadContributionPartCommand(this.Id, partNumber, path, startPosition, partLength));
startPosition += partLength;
partNumber++;
}
}
public void SetUploadResult(int partNumber, string etag)
{
if (etag == null)
{
throw new ArgumentNullException(nameof(etag));
}
ContributionPart part = this.Parts.SingleOrDefault(p => p.PartNumber == partNumber);
if (part == null)
{
throw new ContributionPartNotFoundException(this.Id, partNumber);
}
part.SetUploadResult(etag);
if (this.Parts.All(p => p.IsUploaded))
{
IEnumerable<PartUploadedResult> results = this.Parts.Select(p => new PartUploadedResult(p.PartNumber, p.ETag));
this.Events.Add(new ContributionUploaded(this.Id, results));
}
}
}
My bug occurs in the SetUploadResult method. Basically, multiple threads are performing uploads concurrently, and then call SetUploadResult at the end of the upload. But because the entity was loaded a few seconds beforehand, each thread will be calling SetUploadResult on a different instance of the entity, and so the test if (this.Parts.All(p => p.IsUploaded) will never evaluate to true.
I'm not sure how to easily resolve this. The idea behind adding multiple UploadContributionPartCommands to the Commands collection was so that each ContributionPart could be uploaded in parallel - my CommandBus ensures this - but with each part uploaded in parallel, it causes problems for my entity logic.
I think you can refactor the Contribution so that it will not handle the SetUploadResult. It will decouple the Contribution entity and the side effects of the SetUploadResult are isolated, keeping the technical concern out of the Contribution domain model.
Create a dispatcher class that contains what the SetUploadResult is doing.
Once the Contribution entity is finished carrying out its logic, the thread of execution will return to the application service. It is at this point that the events from the entity can be fed into the dispatcher.
If they are long running process, you can add them as collection of tasks and run them asynchronously. Then you can just await when all tasks are done. You can search in SO on how to do this.
var results = await Task.WhenAll(task1, task2,...taskN);
If several threads may call the SetUploadResult method simultaneously and you have a race condition your should protect the critical section using a synchronization mechanism such as a lock: https://msdn.microsoft.com/en-us/library/c5kehkcz.aspx.
If you make the lock field static it will be shared across all instances of your entity type, e.g.:
private static readonly object _lock = new object();
public void SetUploadResult(int partNumber, string etag)
{
if (etag == null)
{
throw new ArgumentNullException(nameof(etag));
}
ContributionPart part = this.Parts.SingleOrDefault(p => p.PartNumber == partNumber);
if (part == null)
{
throw new ContributionPartNotFoundException(this.Id, partNumber);
}
part.SetUploadResult(etag);
lock (_lock) //Only one thread at a time can enter this critical section.
//The second thread will wait here until the first thread leaves the critical section.
{
if (this.Parts.All(p => p.IsUploaded))
{
IEnumerable<PartUploadedResult> results = this.Parts.Select(p => new PartUploadedResult(p.PartNumber, p.ETag));
this.Events.Add(new ContributionUploaded(this.Id, results));
}
}
}
Update - solved
The final solution differs a bit from Brandon's suggestion but his answer brought me on the right track.
class State
{
public int Offset { get; set; }
public HashSet<string> UniqueImageUrls = new HashSet<string>();
}
public IObservable<TPicture> GetPictures(ref object _state)
{
var localState = (State) _state ?? new State();
_state = localState;
return Observable.Defer(()=>
{
return Observable.Defer(() => Observable.Return(GetPage(localState.Offset)))
.SubscribeOn(TaskPoolScheduler.Default)
.Do(x=> localState.Offset += 20)
.Repeat()
.TakeWhile(x=> x.Count > 0)
.SelectMany(x=> x)
.Where(x=> !localState.UniqueImageUrls.Contains(x.ImageUrl))
.Do(x=> localState.UniqueImageUrls.Add(x.ImageUrl));
});
}
IList<TPicture> GetPage(int offset)
{
...
return result;
}
Original Question
I'm currently struggling with the following problem. The PictureProvider implementation shown below is working with an offset variable used for paging results of a backend service providing the actual data. What I would like to implement is an elegant solution making the current offset available to the consumer of the observable to allow for resuming the observable sequence at a later time at the correct offset. Resuming is already accounted for by the intialState argument to GetPictures().
Recommendations for improving the code in a more RX like fashion would be welcome as well. I'm actually not so sure if the Task.Run() stuff is appropriate here.
public class PictureProvider :
IPictureProvider<Picture>
{
#region IPictureProvider implementation
public IObservable<Picture> GetPictures(object initialState)
{
return Observable.Create<Picture>((IObserver<Picture> observer) =>
{
var state = new ProducerState(initialState);
ProducePictures(observer, state);
return state;
});
}
#endregion
void ProducePictures(IObserver<Picture> observer, ProducerState state)
{
Task.Run(() =>
{
try
{
while(!state.Terminate.WaitOne(0))
{
var page = GetPage(state.Offset);
if(page.Count == 0)
{
observer.OnCompleted();
break;
}
else
{
foreach(var picture in page)
observer.OnNext(picture);
state.Offset += page.Count;
}
}
}
catch (Exception ex)
{
observer.OnError(ex);
}
state.TerminateAck.Set();
});
}
IList<Picture> GetPage(int offset)
{
var result = new List<Picture>();
... boring web service call here
return result;
}
public class ProducerState :
IDisposable
{
public ProducerState(object initialState)
{
Terminate = new ManualResetEvent(false);
TerminateAck = new ManualResetEvent(false);
if(initialState != null)
Offset = (int) initialState;
}
public ManualResetEvent Terminate { get; private set; }
public ManualResetEvent TerminateAck { get; private set; }
public int Offset { get; set; }
#region IDisposable implementation
public void Dispose()
{
Terminate.Set();
TerminateAck.WaitOne();
Terminate.Dispose();
TerminateAck.Dispose();
}
#endregion
}
}
I suggest refactoring your interface to yield the state as part of the data. Now the client has what they need to resubscribe where they left off.
Also, once you start using Rx, you should find that using synchronization primitives like ManualResetEvent are rarely necessary. If you refactor your code so that retrieving each page is its own Task, then you can eliminate all of that synchronization code.
Also, if you are calling a "boring web service" in GetPage, then just make it async. This gets rid of the need to call Task.Run among other benefits.
Here is a refactored version, using .NET 4.5 async/await syntax. It could also be done without async/await. I also added a GetPageAsync method that uses Observable.Run just in case you really cannot convert your webservice call to be asynchronous
/// <summary>A set of pictures</summary>
public struct PictureSet
{
public int Offset { get; private set; }
public IList<Picture> Pictures { get; private set; }
/// <summary>Clients will use this property if they want to pick up where they left off</summary>
public int NextOffset { get { return Offset + Pictures.Count; } }
public PictureSet(int offset, IList<Picture> pictures)
:this() { Offset = offset; Pictures = pictures; }
}
public class PictureProvider : IPictureProvider<PictureSet>
{
public IObservable<PictureSet> GetPictures(int offset = 0)
{
// use Defer() so we can capture a copy of offset
// for each observer that subscribes (so multiple
// observers do not update each other's offset
return Observable.Defer<PictureSet>(() =>
{
var localOffset = offset;
// Use Defer so we re-execute GetPageAsync()
// each time through the loop.
// Update localOffset after each GetPageAsync()
// completes so that the next call to GetPageAsync()
// uses the next offset
return Observable.Defer(() => GetPageAsync(localOffset))
.Select(pictures =>
{
var s = new PictureSet(localOffset, pictures);
localOffset += pictures.Count;
})
.Repeat()
.TakeWhile(pictureSet => pictureSet.Pictures.Count > 0);
});
}
private async Task<IList<Picture>> GetPageAsync(int offset)
{
var data = await BoringWebServiceCallAsync(offset);
result = data.Pictures.ToList();
}
// this version uses Observable.Run() (which just uses Task.Run under the hood)
// in case you cannot convert your
// web service call to be asynchronous
private IObservable<IList<Picture>> GetPageAsync(int offset)
{
return Observable.Run(() =>
{
var result = new List<Picture>();
... boring web service call here
return result;
});
}
}
Clients just need to add a SelectMany call to get their IObservable<Picture>. They can choose to store the pictureSet.NextOffset if they wish.
pictureProvider
.GetPictures()
.SelectMany(pictureSet => pictureSet.Pictures)
.Subscribe(picture => whatever);
Instead of thinking about how to save the subscription state, I would think about how to replay the state of the inputs (i.e. I'd try to create a serializable ReplaySubject that, on resume, would just resubscribe and catch back up to the current state).
I am currently writing an IRC bot. I'd like to avoid excess flood, so I decided to create a message queue that would send the next message every X milliseconds, but my attempt failed. Line 43:
unset.Add((string)de.Key);
throws an OutOfMemory exception. I have absolutely no idea what I'm doing wrong.
Perhaps I should also explain the general idea behind such (possibly complicated) way of queuing.
Firstly, the main Hashtable queueht stores ConcurrentQueue<string> types, where targets for the messages serve as keys. I would like the bot to iterate through the hashtable, sending one message from each queue (and removing the key if the queue is emptied). I couldn't think of a suitable method to work on the hashtable itself, so I decided to create another queue, ConcurrentQueue<string> queue, which would store keys and their order of use when emptying the queue.
Assuming a hypothetical situation with several hundred items in a queue (which might be possible), any new request would be delayed by Lord knows how long (built-in delay between messages plus latency), so I have the method Add() rebuild queue. I create a deep copy of queueht (or so I hope) and generate a new queue based on this disposable copy, getting rid of it in the process.
I assume my train of thought and/or code to be horribly wrong, since I have nearly no experience with threading, collections more complicated than simple arrays and OOP habits/conventions whatsoever. I would really appreciate the solution to my problem with an explanation. Thanks in advance!
EDIT: Posting the entire class.
class SendQueue
{
Hashtable queueht;
ConcurrentQueue<string> queue;
Timer tim;
IRCBot host;
public SendQueue(IRCBot host)
{
this.host = host;
this.tim = new Timer();
this.tim.Elapsed += new ElapsedEventHandler(this.SendNewMsg);
this.queueht = new Hashtable();
this.queue = new ConcurrentQueue<string>();
}
public void Add(string target, string msg)
{
try
{
this.queueht.Add(target, new ConcurrentQueue<string>());
}
finally
{
((ConcurrentQueue<string>)this.queueht[target]).Enqueue(msg);
}
Hashtable ht = new Hashtable(queueht);
List<string> unset = new List<string>();
while (ht.Count > 0)
{
foreach (DictionaryEntry de in ht)
{
ConcurrentQueue<string> cq = (ConcurrentQueue<string>)de.Value;
string res;
if (cq.TryDequeue(out res))
this.queue.Enqueue((string)de.Key);
else
unset.Add((string)de.Key);
}
}
if (unset.Count > 0)
foreach (string item in unset)
ht.Remove(item);
}
private void SendNewMsg(object sender, ElapsedEventArgs e)
{
string target;
if (queue.TryDequeue(out target))
{
string message;
if (((ConcurrentQueue<string>)queueht[target]).TryDequeue(out message))
this.host.Say(target, message);
}
}
}
EDIT2: I am aware that while (ht.Count > 0) will be executed indefinitely. It's just a part leftover from previous version which looked like that:
while (ht.Count > 0)
{
foreach (DictionaryEntry de in ht)
{
ConcurrentQueue<string> cq = (ConcurrentQueue<string>)de.Value;
string res;
if (cq.TryDequeue(out res))
this.queue.Enqueue((string)de.Key);
else
ht.Remove((string)de.Key);
}
}
But the collection cannot be modified when it's evaluated (and I found that out the hard way), so it's no longer like that. I just forgot to change the condition for while.
I took liberty of trying TheThing's solution. While it seems to fulfil its purpose, it doesn't send any messages... Here's its final form:
class User
{
public User(string username)
{
this.Username = username;
this.RequestQueue = new Queue<string>();
}
public User(string username, string message)
: this(username)
{
this.RequestQueue.Enqueue(message);
}
public string Username { get; set; }
public Queue<string> RequestQueue { get; private set; }
}
class SendQueue
{
Timer tim;
IRCBot host;
public bool shouldRun = false;
public Dictionary<string, User> Users; //Dictionary of users currently being processed
public ConcurrentQueue<User> UserQueue; //List of order for which users should be processed
public SendQueue(IRCBot launcher)
{
this.Users = new Dictionary<string, User>();
this.UserQueue = new ConcurrentQueue<User>();
this.tim = new Timer(WorkerTick, null, Timeout.Infinite, 450);
this.host = launcher;
}
public void Add(string username, string request)
{
lock (this.UserQueue) //For threadsafety
{
if (this.Users.ContainsKey(username))
{
//The user is in the user list. That means he has previously sent request that are awaiting to be processed.
//As such, we can safely add his new message at the end of HIS request list.
this.Users[username].RequestQueue.Enqueue(request); //Add users new message at the end of the list
return;
}
//User is not in the user list. Means it's his first request. Create him in the user list and add his message
var user = new User(username, request);
this.Users.Add(username, user); //Create the user and his message
this.UserQueue.Enqueue(user); //Add the user to the last of the precessing users.
}
}
public void WorkerTick(object sender)
{
if (shouldRun)
{
//This tick runs every 400ms and processes next message to be sent.
lock (this.UserQueue) //For threadsafety
{
User user;
if (this.UserQueue.TryDequeue(out user)) //Pop the next user to be processed.
{
string message = user.RequestQueue.Dequeue(); //Pop his request
this.host.Say(user.Username, message);
if (user.RequestQueue.Count > 0) //If user has more messages waiting to be processed
{
this.UserQueue.Enqueue(user); //Add him at the end of the userqueue
}
else
{
this.Users.Remove(user.Username); //User has no more messages, we can safely remove him from the user list
}
}
}
}
}
}
I tried switching to ConcurrentQueue, which should work as well (though in a more thread-safe way, not that I know anything about thread safety). I also tried switching to System.Threading.Timer, but that doesn't help either. I've run out of ideas long ago.
EDIT: Being a complete and utter idiot, I didn't set the time for Timer to start. Changing the bool part to a Start() method that changes the timer's dueTime and interval made it work. Problem solved.
From what I can best understand, you want to be able to queue users in order and each of their request.
Meaning, if one user request like 1000 request, others can still send theirs and the bot serves 1 request from each user in a FIFO manner.
If so, then what you need is a manner, similar to this functionality:
class User
{
public User(string username)
{
this.Username = username;
this.RequestQueue = new Queue<string>();
}
public User(string username, string message)
: this(username)
{
this.RequestQueue.Enqueue(message);
}
public string Username { get; set; }
public Queue<string> RequestQueue { get; private set; }
}
///......................
public class MyClass
{
public MyClass()
{
this.Users = new Dictionary<string, User>();
this.UserQueue = new Queue<User>();
}
public Dictionary<string, User> Users; //Dictionary of users currently being processed
public Queue<User> UserQueue; //List of order for which users should be processed
public void OnMessageRecievedFromIrcChannel(string username, string request)
{
lock (this.UserQueue) //For threadsafety
{
if (this.Users.ContainsKey(username))
{
//The user is in the user list. That means he has previously sent request that are awaiting to be processed.
//As such, we can safely add his new message at the end of HIS request list.
this.Users[username].RequestQueue.Enqueue(request); //Add users new message at the end of the list
return;
}
//User is not in the user list. Means it's his first request. Create him in the user list and add his message
var user = new User(username, request);
this.Users.Add(username, user); //Create the user and his message
this.UserQueue.Enqueue(user); //Add the user to the last of the precessing users.
}
}
//**********************************
public void WorkerTick()
{
//This tick runs every 400ms and processes next message to be sent.
lock (this.UserQueue) //For threadsafety
{
var user = this.UserQueue.Dequeue(); //Pop the next user to be processed.
var message = user.RequestQueue.Dequeue(); //Pop his request
/////PROCESSING MESSAGE GOES HERE
if (user.RequestQueue.Count > 0) //If user has more messages waiting to be processed
{
this.UserQueue.Enqueue(user); //Add him at the end of the userqueue
}
else
{
this.Users.Remove(user.Username); //User has no more messages, we can safely remove him from the user list
}
}
}
}
Basically, we have a queue of users. We pop the next user, process his first request and add him to the end of the user list if he has more request waiting to be processed.
Hope this clears some functionality. For the record, the code above is more of a pseudocode than a functional code xD
From what I can see, you never escape from the while since you never remove items from the temporary hashtable ht until outside of it. Thus, the count will always be > 0.
Try this:
class User
{
public User(string username)
{
this.Username = username;
this.RequestQueue = new Queue<string>();
}
private static readonly TimeSpan _minPostThreshold = new TimeSpan(0,0,5); //five seconds
public void PostMessage(string message)
{
var lastMsgTime = _lastMessageTime;
_lastMessageTime = DateTime.Now;
if (lastMsgTime != default(DateTime))
{
if ((_lastMessageTime - lastMsgTime) < _minPostThreshold)
{
return;
}
}
_requestQueue.Enqueue(message);
}
public string NextMessage
{
get
{
if (!HasMessages)
{
return null;
}
return _requestQueue.Dequeue();
}
}
public bool HasMessages
{
get{return _requestQueue.Count > 0;}
}
public string Username { get; set; }
private Queue<string> _requestQueue { get; private set; }
private DateTime _lastMessageTime;
}
class SendQueue
{
Timer tim;
IRCBot host;
public bool shouldRun = false;
public Dictionary<string, User> Users; //Dictionary of users currently being processed
private Queue<User> _postQueue = new Queue<User>();
public SendQueue(IRCBot launcher)
{
this.Users = new Dictionary<string, User>();
this.tim = new Timer(WorkerTick, null, Timeout.Infinite, 450);
this.host = launcher;
}
public void Add(string username, string request)
{
User targetUser;
lock (Users) //For threadsafety
{
if (!Users.TryGetValue(username, out targetUser))
{
//User is not in the user list. Means it's his first request. Create him in the user list and add his message
targetUser = new User(username);
Users.Add(username, targetUser); //Create the user and his message
}
targetUser.PostMessage(request);
}
lock(_postQueue)
{
_postQueue.Enqueue(targetUser);
}
}
public void WorkerTick(object sender)
{
if (shouldRun)
{
User nextUser = null;
lock(_postQueue)
{
if (_postQueue.Count > 0)
{
nextUser = _PostQueue.Dequeue();
}
}
if (nextUser != null)
{
host.Say(nextUser.Username, nextUser.NextMessage);
}
}
}
}
UPDATE: changed after better understanding requirements.
This provides both per user flood control and overall throttling. It is also much simpler.
Note this was written on the fly and hasn't even been compiled, and there are probably some threading issues around User instances that need to be considered, but it should work.