I have a set of huge taks to be performed in c#. Each calcuation will produce a resultant data which i want to write into a file (i am using SQLite). Currently i am doing this in a sequential way like this [Task1 -> FileSaving1], [Task2 -> FileSaving2],. and so on.
But my priority is to complete all the calculations first, so i want to run the calculation in parallel in one thread and the file saving to be done in another. FileSaving thread will be signalled everytime when the calculation is over and the data is ready to be written. FileSaving can be sequential or Parallel.
How to acheive this in C#? I am using .Net 4.0.
Please provide me with some example if possible.
You can use a BlockingCollection<T> to help with this.
The tricky thing is that you want several threads processing work items, but they can produce their output in a random order so you need to multiplex the output when writing it (assuming you want to write the data in the same order as it would have been written if you used the old single-threaded solution).
I wrote a class to do this a while back.
It assumes that you can encapsulate each "work item" in an instance of a class. Those instances are added to a work queue; then multiple threads (via Task) can remove work items from the work queue, process them, and then output them to a priority queue.
Finally, another thread can remove the completed work items from the completed queue, being careful to multiplex them so that it removes the items in the same order as they were originally added to the work queue.
This implementation creates and manages the threads for you. You need to tell it how many worker threads to use, and supply it delegates to provide new work items (Read()), process each work item (Process()) and output each work item (Write()).
Only the Process() delegate is called by multiple threads.
Note that if you don't care about the order, you can avoid all this stuff and pretty much use BlockingCollection directly.
Here's the code:
public sealed class ParallelWorkProcessor<T> where T: class // T is the work item type.
{
public delegate T Read(); // Called by only one thread.
public delegate T Process(T block); // Called simultaneously by multiple threads.
public delegate void Write(T block); // Called by only one thread.
public ParallelWorkProcessor(Read read, Process process, Write write, int numWorkers = 0)
{
_read = read;
_process = process;
_write = write;
numWorkers = (numWorkers > 0) ? numWorkers : Environment.ProcessorCount;
_workPool = new SemaphoreSlim(numWorkers*2);
_inputQueue = new BlockingCollection<WorkItem>(numWorkers);
_outputQueue = new ConcurrentPriorityQueue<int, T>();
_workers = new Task[numWorkers];
startWorkers();
Task.Factory.StartNew(enqueueWorkItems);
_multiplexor = Task.Factory.StartNew(multiplex);
}
private void startWorkers()
{
for (int i = 0; i < _workers.Length; ++i)
{
_workers[i] = Task.Factory.StartNew(processBlocks);
}
}
private void enqueueWorkItems()
{
int index = 0;
while (true)
{
T data = _read();
if (data == null) // Signals end of input.
{
_inputQueue.CompleteAdding();
_outputQueue.Enqueue(index, null); // Special sentinel WorkItem .
break;
}
_workPool.Wait();
_inputQueue.Add(new WorkItem(data, index++));
}
}
private void multiplex()
{
int index = 0; // Next required index.
int last = int.MaxValue;
while (index != last)
{
KeyValuePair<int, T> workItem;
_outputQueue.WaitForNewItem(); // There will always be at least one item - the sentinel item.
while ((index != last) && _outputQueue.TryPeek(out workItem))
{
if (workItem.Value == null) // The sentinel item has a null value to indicate that it's the sentinel.
{
last = workItem.Key; // The sentinel's key is the index of the last block + 1.
}
else if (workItem.Key == index) // Is this block the next one that we want?
{
// Even if new items are added to the queue while we're here, the new items will be lower priority.
// Therefore it is safe to assume that the item we will dequeue now is the same one we peeked at.
_outputQueue.TryDequeue(out workItem);
Contract.Assume(workItem.Key == index); // This *must* be the case.
_workPool.Release(); // Allow the enqueuer to queue another work item.
_write(workItem.Value);
++index;
}
else // If it's not the block we want, we know we'll get a new item at some point.
{
_outputQueue.WaitForNewItem();
}
}
}
}
private void processBlocks()
{
foreach (var block in _inputQueue.GetConsumingEnumerable())
{
var processedData = _process(block.Data);
_outputQueue.Enqueue(block.Index, processedData);
}
}
public bool WaitForFinished(int maxMillisecondsToWait) // Can be Timeout.Infinite.
{
return _multiplexor.Wait(maxMillisecondsToWait);
}
private sealed class WorkItem
{
public WorkItem(T data, int index)
{
Data = data;
Index = index;
}
public T Data { get; private set; }
public int Index { get; private set; }
}
private readonly Task[] _workers;
private readonly Task _multiplexor;
private readonly SemaphoreSlim _workPool;
private readonly BlockingCollection<WorkItem> _inputQueue;
private readonly ConcurrentPriorityQueue<int, T> _outputQueue;
private readonly Read _read;
private readonly Process _process;
private readonly Write _write;
}
Here's the test code for it:
namespace Demo
{
public static class Program
{
private static void Main(string[] args)
{
_rng = new Random(34324);
int threadCount = 8;
_maxBlocks = 200;
ThreadPool.SetMinThreads(threadCount + 2, 4); // Kludge to prevent slow thread startup.
_numBlocks = _maxBlocks;
var stopwatch = Stopwatch.StartNew();
var processor = new ParallelWorkProcessor<byte[]>(read, process, write, threadCount);
processor.WaitForFinished(Timeout.Infinite);
Console.WriteLine("\n\nFinished in " + stopwatch.Elapsed + "\n\n");
}
private static byte[] read()
{
if (_numBlocks-- == 0)
{
return null;
}
var result = new byte[128];
result[0] = (byte)(_maxBlocks-_numBlocks);
Console.WriteLine("Supplied input: " + result[0]);
return result;
}
private static byte[] process(byte[] data)
{
if (data[0] == 10) // Hack for test purposes. Make it REALLY slow for this item!
{
Console.WriteLine("Delaying a call to process() for 5s for ID 10");
Thread.Sleep(5000);
}
Thread.Sleep(10 + _rng.Next(50));
Console.WriteLine("Processed: " + data[0]);
return data;
}
private static void write(byte[] data)
{
Console.WriteLine("Received output: " + data[0]);
}
private static Random _rng;
private static int _numBlocks;
private static int _maxBlocks;
}
}
This also requires a ConcurrentPriorityQueue implementation from here.
I had to modify that slightly, so here's my modified version:
using System;
using System.Collections;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Threading;
namespace ConsoleApplication1
{
/// <summary>Provides a thread-safe priority queue data structure.</summary>
/// <typeparam name="TKey">Specifies the type of keys used to prioritize values.</typeparam>
/// <typeparam name="TValue">Specifies the type of elements in the queue.</typeparam>
[SuppressMessage("Microsoft.Naming", "CA1711:IdentifiersShouldNotHaveIncorrectSuffix")]
[SuppressMessage("Microsoft.Naming", "CA1710:IdentifiersShouldHaveCorrectSuffix")]
[DebuggerDisplay("Count={Count}")]
public sealed class ConcurrentPriorityQueue<TKey, TValue> :
IProducerConsumerCollection<KeyValuePair<TKey,TValue>>
where TKey : IComparable<TKey>
{
/// <summary>Initializes a new instance of the ConcurrentPriorityQueue class.</summary>
public ConcurrentPriorityQueue() {}
/// <summary>Initializes a new instance of the ConcurrentPriorityQueue class that contains elements copied from the specified collection.</summary>
/// <param name="collection">The collection whose elements are copied to the new ConcurrentPriorityQueue.</param>
[SuppressMessage("Microsoft.Design", "CA1006:DoNotNestGenericTypesInMemberSignatures")]
public ConcurrentPriorityQueue(IEnumerable<KeyValuePair<TKey, TValue>> collection)
{
if (collection == null) throw new ArgumentNullException("collection");
foreach (var item in collection) _minHeap.Insert(item);
}
/// <summary>Adds the key/value pair to the priority queue.</summary>
/// <param name="priority">The priority of the item to be added.</param>
/// <param name="value">The item to be added.</param>
public void Enqueue(TKey priority, TValue value)
{
Enqueue(new KeyValuePair<TKey, TValue>(priority, value));
}
/// <summary>Adds the key/value pair to the priority queue.</summary>
/// <param name="item">The key/value pair to be added to the queue.</param>
public void Enqueue(KeyValuePair<TKey, TValue> item)
{
lock (_syncLock)
{
_minHeap.Insert(item);
_newItem.Set();
}
}
/// <summary>Waits for a new item to appear.</summary>
public void WaitForNewItem()
{
_newItem.WaitOne();
}
/// <summary>Attempts to remove and return the next prioritized item in the queue.</summary>
/// <param name="result">
/// When this method returns, if the operation was successful, result contains the object removed. If
/// no object was available to be removed, the value is unspecified.
/// </param>
/// <returns>
/// true if an element was removed and returned from the queue succesfully; otherwise, false.
/// </returns>
public bool TryDequeue(out KeyValuePair<TKey, TValue> result)
{
result = default(KeyValuePair<TKey, TValue>);
lock (_syncLock)
{
if (_minHeap.Count > 0)
{
result = _minHeap.Remove();
return true;
}
}
return false;
}
/// <summary>Attempts to return the next prioritized item in the queue.</summary>
/// <param name="result">
/// When this method returns, if the operation was successful, result contains the object.
/// The queue was not modified by the operation.
/// </param>
/// <returns>
/// true if an element was returned from the queue succesfully; otherwise, false.
/// </returns>
public bool TryPeek(out KeyValuePair<TKey, TValue> result)
{
result = default(KeyValuePair<TKey, TValue>);
lock (_syncLock)
{
if (_minHeap.Count > 0)
{
result = _minHeap.Peek();
return true;
}
}
return false;
}
/// <summary>Empties the queue.</summary>
public void Clear() { lock(_syncLock) _minHeap.Clear(); }
/// <summary>Gets whether the queue is empty.</summary>
public bool IsEmpty { get { return Count == 0; } }
/// <summary>Gets the number of elements contained in the queue.</summary>
public int Count
{
get { lock (_syncLock) return _minHeap.Count; }
}
/// <summary>Copies the elements of the collection to an array, starting at a particular array index.</summary>
/// <param name="array">
/// The one-dimensional array that is the destination of the elements copied from the queue.
/// </param>
/// <param name="index">
/// The zero-based index in array at which copying begins.
/// </param>
/// <remarks>The elements will not be copied to the array in any guaranteed order.</remarks>
public void CopyTo(KeyValuePair<TKey, TValue>[] array, int index)
{
lock (_syncLock) _minHeap.Items.CopyTo(array, index);
}
/// <summary>Copies the elements stored in the queue to a new array.</summary>
/// <returns>A new array containing a snapshot of elements copied from the queue.</returns>
public KeyValuePair<TKey, TValue>[] ToArray()
{
lock (_syncLock)
{
var clonedHeap = new MinBinaryHeap(_minHeap);
var result = new KeyValuePair<TKey, TValue>[_minHeap.Count];
for (int i = 0; i < result.Length; i++)
{
result[i] = clonedHeap.Remove();
}
return result;
}
}
/// <summary>Attempts to add an item in the queue.</summary>
/// <param name="item">The key/value pair to be added.</param>
/// <returns>
/// true if the pair was added; otherwise, false.
/// </returns>
bool IProducerConsumerCollection<KeyValuePair<TKey, TValue>>.TryAdd(KeyValuePair<TKey, TValue> item)
{
Enqueue(item);
return true;
}
/// <summary>Attempts to remove and return the next prioritized item in the queue.</summary>
/// <param name="item">
/// When this method returns, if the operation was successful, result contains the object removed. If
/// no object was available to be removed, the value is unspecified.
/// </param>
/// <returns>
/// true if an element was removed and returned from the queue succesfully; otherwise, false.
/// </returns>
bool IProducerConsumerCollection<KeyValuePair<TKey, TValue>>.TryTake(out KeyValuePair<TKey, TValue> item)
{
return TryDequeue(out item);
}
/// <summary>Returns an enumerator that iterates through the collection.</summary>
/// <returns>An enumerator for the contents of the queue.</returns>
/// <remarks>
/// The enumeration represents a moment-in-time snapshot of the contents of the queue. It does not
/// reflect any updates to the collection after GetEnumerator was called. The enumerator is safe to
/// use concurrently with reads from and writes to the queue.
/// </remarks>
public IEnumerator<KeyValuePair<TKey, TValue>> GetEnumerator()
{
var arr = ToArray();
return ((IEnumerable<KeyValuePair<TKey, TValue>>)arr).GetEnumerator();
}
/// <summary>Returns an enumerator that iterates through a collection.</summary>
/// <returns>An IEnumerator that can be used to iterate through the collection.</returns>
IEnumerator IEnumerable.GetEnumerator() { return GetEnumerator(); }
/// <summary>Copies the elements of the collection to an array, starting at a particular array index.</summary>
/// <param name="array">
/// The one-dimensional array that is the destination of the elements copied from the queue.
/// </param>
/// <param name="index">
/// The zero-based index in array at which copying begins.
/// </param>
void ICollection.CopyTo(Array array, int index)
{
lock (_syncLock) ((ICollection)_minHeap.Items).CopyTo(array, index);
}
/// <summary>
/// Gets a value indicating whether access to the ICollection is synchronized with the SyncRoot.
/// </summary>
bool ICollection.IsSynchronized { get { return true; } }
/// <summary>
/// Gets an object that can be used to synchronize access to the collection.
/// </summary>
object ICollection.SyncRoot { get { return _syncLock; } }
/// <summary>Implements a binary heap that prioritizes smaller values.</summary>
private sealed class MinBinaryHeap
{
private readonly List<KeyValuePair<TKey, TValue>> _items;
/// <summary>Initializes an empty heap.</summary>
public MinBinaryHeap()
{
_items = new List<KeyValuePair<TKey, TValue>>();
}
/// <summary>Initializes a heap as a copy of another heap instance.</summary>
/// <param name="heapToCopy">The heap to copy.</param>
/// <remarks>Key/Value values are not deep cloned.</remarks>
public MinBinaryHeap(MinBinaryHeap heapToCopy)
{
_items = new List<KeyValuePair<TKey, TValue>>(heapToCopy.Items);
}
/// <summary>Empties the heap.</summary>
public void Clear() { _items.Clear(); }
/// <summary>Adds an item to the heap.</summary>
public void Insert(KeyValuePair<TKey,TValue> entry)
{
// Add the item to the list, making sure to keep track of where it was added.
_items.Add(entry);
int pos = _items.Count - 1;
// If the new item is the only item, we're done.
if (pos == 0) return;
// Otherwise, perform log(n) operations, walking up the tree, swapping
// where necessary based on key values
while (pos > 0)
{
// Get the next position to check
int nextPos = (pos-1) / 2;
// Extract the entry at the next position
var toCheck = _items[nextPos];
// Compare that entry to our new one. If our entry has a smaller key, move it up.
// Otherwise, we're done.
if (entry.Key.CompareTo(toCheck.Key) < 0)
{
_items[pos] = toCheck;
pos = nextPos;
}
else break;
}
// Make sure we put this entry back in, just in case
_items[pos] = entry;
}
/// <summary>Returns the entry at the top of the heap.</summary>
public KeyValuePair<TKey, TValue> Peek()
{
// Returns the first item
if (_items.Count == 0) throw new InvalidOperationException("The heap is empty.");
return _items[0];
}
/// <summary>Removes the entry at the top of the heap.</summary>
public KeyValuePair<TKey, TValue> Remove()
{
// Get the first item and save it for later (this is what will be returned).
if (_items.Count == 0) throw new InvalidOperationException("The heap is empty.");
KeyValuePair<TKey, TValue> toReturn = _items[0];
// Remove the first item if there will only be 0 or 1 items left after doing so.
if (_items.Count <= 2) _items.RemoveAt(0);
// A reheapify will be required for the removal
else
{
// Remove the first item and move the last item to the front.
_items[0] = _items[_items.Count - 1];
_items.RemoveAt(_items.Count - 1);
// Start reheapify
int current = 0, possibleSwap = 0;
// Keep going until the tree is a heap
while (true)
{
// Get the positions of the node's children
int leftChildPos = 2 * current + 1;
int rightChildPos = leftChildPos + 1;
// Should we swap with the left child?
if (leftChildPos < _items.Count)
{
// Get the two entries to compare (node and its left child)
var entry1 = _items[current];
var entry2 = _items[leftChildPos];
// If the child has a lower key than the parent, set that as a possible swap
if (entry2.Key.CompareTo(entry1.Key) < 0) possibleSwap = leftChildPos;
}
else break; // if can't swap this, we're done
// Should we swap with the right child? Note that now we check with the possible swap
// position (which might be current and might be left child).
if (rightChildPos < _items.Count)
{
// Get the two entries to compare (node and its left child)
var entry1 = _items[possibleSwap];
var entry2 = _items[rightChildPos];
// If the child has a lower key than the parent, set that as a possible swap
if (entry2.Key.CompareTo(entry1.Key) < 0) possibleSwap = rightChildPos;
}
// Now swap current and possible swap if necessary
if (current != possibleSwap)
{
var temp = _items[current];
_items[current] = _items[possibleSwap];
_items[possibleSwap] = temp;
}
else break; // if nothing to swap, we're done
// Update current to the location of the swap
current = possibleSwap;
}
}
// Return the item from the heap
return toReturn;
}
/// <summary>Gets the number of objects stored in the heap.</summary>
public int Count { get { return _items.Count; } }
internal List<KeyValuePair<TKey, TValue>> Items { get { return _items; } }
}
private readonly AutoResetEvent _newItem = new AutoResetEvent(false);
private readonly object _syncLock = new object();
private readonly MinBinaryHeap _minHeap = new MinBinaryHeap();
}
}
Related
I draw the following topology where each node is an object of class SensorNode, the blue links indicate the links between each node with its neighbors where the circles around the nodes represent the transmission range for each node.
The sink also is an object of class Sink.
I need to instantiate messaging and communication between them but I have no idea what mechanisms should I used to perform message passing between theses objects (sensor nodes) where each node has its unique ID, the sink has a fixed ID which is 1 in my code because I use only a single sink.
The following are the classes where I am still stuck to how to implement send receive, and forward in terms of making this communication applicable between these different objects...
Class "SensorNode"
namespace CRN_Topology
{
class SensorNode
{
public int snID;
public string snName;
public int snDepth;
public DateTime schedulingTime;
public double holdingTime;
public double energy;
public List<int> queue11 = new List<int>();
public List<DateTime> queue12 = new List<DateTime>();
public List<Packet> queue21 = new List<Packet>();
public List<DateTime> queue22 = new List<DateTime>();
public SensorNode(int id,string name,int depth, double energy)
{
this.snID = id;
this.snName = name;
this.snDepth = depth;
this.energy = energy;
}
public void insertHistoryQueue(int packetID, DateTime receivingTime)
{
queue11.Add(packetID);
queue12.Add(receivingTime);
}
public void insertPriorityQueue(Packet packet, DateTime schedulingTime)
{
queue21.Add(packet);
queue22.Add(schedulingTime);
}
public DateTime schedulingTimeCalculations(double holdingTime, DateTime systemTime)
{
schedulingTime = DateTime.FromOADate(holdingTime).Date + systemTime.TimeOfDay;
return schedulingTime;
}
public double holdingTimeCalculations(double alpha, double depth, double beta)
{
holdingTime = alpha * depth + beta;
return holdingTime;
}
public void receive(Packet packet)
{
}
public void forward(Packet packet, int neighborID)
{
}
public void remove()
{
}
public void sendDirect(int rxID, Packet packet)
{
}
}
}
Class "Sink"
namespace CRN_Topology
{
class Sink
{
public string name;
public int sinkID;
public int sinkX;
public int sinkY;
public List<Packet> queue1 = new List<Packet>();
public List<DateTime> queue2 = new List<DateTime>();
public Sink(string name, int Id , int xLocation, int yLocation)
{
this.name = name;
this.sinkID = Id;
this.sinkX = xLocation;
this.sinkY = yLocation;
}
public void insert(Packet packet, DateTime receivingTime)
{
queue1.Add(packet);
queue2.Add(receivingTime);
}
}
}
Any idea, I need your suggestions and your help as I do not have an idea how to pass information between these objects (sensor nodes) and between the sensor nodes and the sink. What is the library which is responsible for this application in C#?
You could use actual events. Yet, for this case IObservable and IObserver seems to provide a better pattern. Although while trying to implement this, I quickly moved away from that pattern.
Below is the solution I developed. What I present is an abstract class Node indended to serve as base for SensorNode and Sink as both can recieve connections.
Edit 1: Or you may make it into its own thing and use composition, you may implement the abstract method Recieve to raise a custom event.
Edit 2: Perhaps it is better to thing of the Recieve method as Send? I mean, in my code the intended implementation is to have it use _connections to broadcast or try to get the Packet to its destination, and do logging and whatever else. I don really know if that is what you indended for your Recieve method.
abstract class Node
{
/// <summary>
/// Set of all the ids.
/// </summary>
private static readonly Dictionary<int, object> _nodes;
/// <summary>
/// The Id of the node.
/// </summary>
/// <remarks>Can't change.</remarks>
private readonly int _id;
/// <summary>
/// The connections of the node.
/// </summary>
protected readonly Dictionary<int, Node> _connections;
static Node()
{
_nodes = new Dictionary<int, object>();
}
protected Node(int id)
{
// Try register the Id provided
if (_nodes.ContainsKey(id))
{
// If we fail to add it, it means another Node has the same Id already.
throw new ArgumentException($"The id {id} is already in use", nameof(id));
}
_nodes.Add(id, null);
// Store the Id for future reference
_id = id;
_connections = new Dictionary<int, Node>();
}
~Node()
{
// Try to release the Id
// AppDomain unload could be happening
// Any reference could have been set to null
// Do not start async operations
// Do not throw exceptions
// You may, if you so desire, make Node IDisposable, and dispose including this code
var nodes = _nodes;
if (nodes != null)
{
nodes.Remove(Id);
}
}
/// <summary>
/// The Id of the Node
/// </summary>
public int Id { get => _id; }
/// <summary>
/// Connects nodes, bidirectionally.
/// Connect(x, y) is equivalent to Connect(y, x).
/// </summary>
/// <param name="x">The first node to connect</param>
/// <param name="y">The second node to connect</param>
public static void Connect(Node x, Node y)
{
if (x == null)
{
throw new ArgumentNullException(nameof(x));
}
if (y == null)
{
throw new ArgumentNullException(nameof(y));
}
// Bidirectional
x._connections[y.Id] = y;
y._connections[x.Id] = x;
}
/// <summary>
/// Disconnects nodes, bidirectionally.
/// Disconnect(x, y) is equivalent to Disconnect(y, x).
/// </summary>
/// <param name="x">The first node to connect</param>
/// <param name="y">The second node to connect</param>
public static void Disconnect(Node x, Node y)
{
if (x == null)
{
throw new ArgumentNullException(nameof(x));
}
if (y == null)
{
throw new ArgumentNullException(nameof(y));
}
// Short circuit
if (y._connections.ContainsKey(x.Id) && x._connections.ContainsKey(y.Id))
{
// Bidirectional
x._connections.Remove(y.Id);
y._connections.Remove(x.Id);
}
}
protected abstract void Recieve(Packet value);
}
Note: I didn't add anything to prevent a connection from a Node to itself
I have left Recieve abstract for you to implement. Sink will probably just log the messages, while the SensorNode will have to check the destination and fordward the message.
To send a message from a node to another, use the field _connections. The key is theidof the connectedNode. Thefore, if you want to broadcast, you can iterate over_connections`. If you want to run them in parallel, I have a thread-safe version below.
I have considered that you may need to attach information to the connections (such as wight / distance / cost / delay / latency). If that's the case, consider to create a Connection class and make _connections a dictionary of it. The pragmatic advantage it would have is that you may add the same Connection object to both Nodes and then updates to it will be visible to them both. Or just use Tuple or add more dictionaries, whatever, I don't care.
It took me a while to figure out a good thread-safe implementation. If it used Monitor it would block reads to the connections dictionary, which you need to do to send Pakets, so that's not good. A Read-Write lock is a bit better, but it may result in starving the Connect and Disconnect methods.
What I've come up with is a good old state machine. I added another dictionary to keep the state. Made all the dictionaries ConcurrentDictionary to allow parallel operations and to be able to modify the state atomically.
The code is the following:
abstract class Node
{
/// <summary>
/// Set of all the ids.
/// </summary>
private static readonly ConcurrentDictionary<int, object> _nodes;
/// <summary>
/// The Id of the node.
/// </summary>
/// <remarks>Can't change.</remarks>
private readonly int _id;
/// <summary>
/// The connections of the node.
/// </summary>
protected readonly ConcurrentDictionary<int, Node> _connections;
/// <summary>
/// Status of the connection for synchronization
/// </summary>
private readonly ConcurrentDictionary<int, int> _connectionStatus;
private const int _connecting = 0;
private const int _connected = _connecting + 1;
private const int _disconnecting = _connected + 1;
static Node()
{
_nodes = new ConcurrentDictionary<int, object>();
}
protected Node(int id)
{
// Try register the Id provided
if (!_nodes.TryAdd(id, null))
{
// If we fail to add it, it means another Node has the same Id already.
throw new ArgumentException($"The id {id} is already in use", nameof(id));
}
// Store the Id for future reference
_id = id;
_connections = new ConcurrentDictionary<int, Node>();
_connectionStatus = new oncurrentDictionary<int, int>();
}
~Node()
{
// Try to release the Id
// AppDomain unload could be happening
// Any reference could have been set to null
// Do not start async operations
// Do not throw exceptions
// You may, if you so desire, make Node IDisposable, and dispose including this code
var nodes = _nodes;
if (nodes != null)
{
nodes.TryRemove(Id, out object waste);
}
}
/// <summary>
/// The Id of the Node
/// </summary>
public int Id { get => _id; }
/// <summary>
/// Connects nodes, bidirectionally.
/// Connect(x, y) is equivalent to Connect(y, x).
/// </summary>
/// <param name="x">The first node to connect</param>
/// <param name="y">The second node to connect</param>
public static bool Connect(Node x, Node y)
{
if (x == null)
{
throw new ArgumentNullException(nameof(x));
}
if (y == null)
{
throw new ArgumentNullException(nameof(y));
}
// Bidirectional
// Take nodes in order of Id, for syncrhonization
var a = x;
var b = y;
if (b.Id < a.Id)
{
a = y;
b = x;
}
if (a._connectionStatus.TryAdd(b.Id, _connecting)
&& b._connectionStatus.TryAdd(a.Id, _connecting))
{
a._connections[b.Id] = b;
b._connections[a.Id] = a;
a._connectionStatus[b.Id] = _connected;
b._connectionStatus[a.Id] = _connected;
return true;
}
return false;
}
/// <summary>
/// Disconnects nodes, bidirectionally.
/// Disconnect(x, y) is equivalent to Disconnect(y, x).
/// </summary>
/// <param name="x">The first node to connect</param>
/// <param name="y">The second node to connect</param>
public static bool Disconnect(Node x, Node y)
{
if (x == null)
{
throw new ArgumentNullException(nameof(x));
}
if (y == null)
{
throw new ArgumentNullException(nameof(y));
}
// Short circuit
if (!y._connections.ContainsKey(x.Id) && !x._connections.ContainsKey(y.Id))
{
return false;
}
// Take nodes in order of Id, for syncrhonization
var a = x;
var b = y;
if (b.Id < a.Id)
{
a = y;
b = x;
}
if (a._connectionStatus.TryUpdate(b.Id, _disconnecting, _connected)
&& b._connectionStatus.TryUpdate(a.Id, _disconnecting, _connected))
{
a._connections.TryRemove(b.Id, out x);
b._connections.TryRemove(a.Id, out y);
int waste;
a._connectionStatus.TryRemove(b.Id, out waste);
b._connectionStatus.TryRemove(a.Id, out waste);
return true;
}
return false;
}
protected abstract void Recieve(Packet value);
}
Threading chatter:
Connect and Disconnect must try to operate on the same order, which is why I have ordered by Id. If they didn't operate on the same order, concurrent execution of Connect and Disconnect might result in unidirectional connection.
If you thread are trying to add the same connection, only one will success (due to TryAdd). If two threads where trying to remove the same connection, only one will success (due to TryUpdate). If the connection exists, Connect will fail. If the connction doesn't exist Disconnect will fail.
If Connect and Disconnect happen concurrently and the connection existed, Connect will not be able to add it and fail unless Disconnect manages to remove it first. If the connection didn't exist, Disconnect will fail unless Connect manages to add it first.
The status _connecting and _disconnecting are there to prevent Connect and Disconnect to see the situation where the connectin exist in one direction but not in the other.
No thread will ever have to wait for another thread to finish. And there is no need for syncrhonization when only reading _connections.
While, in theory a thread that is only reading _connections may be able to see a situation where a connection exists only in one direction, that thread will not be able to send Packet in both directions at the same time. Thefore from the point of view of that thread, the connection was added or removed between its attempts to send Packet.
There is no method to verify if a connection exists, such method would not be reliable because as soon as a thread checks a connection exists, another thread may remove the connection before the first one is able to use it. Preventing a connection from being removed was not part of the requirements, yet it can be added as another connection status.
You can use aggregation relation to achieve what you need. Assuming that any sink can connect two and only tho nodes, every sink class has to contain two properties of type SensorNode. For example:
public class Sink
{
public SensorNode Node1 { get; set; }
public SensorNode Node1 { get; set; }
//...
}
This allows you to control relation between nodes, so you have access to every node, which are connected through sinks. Calling methods on this objects allows you to initiate interaction between objects. BTW, SensorNode class can also contain reference to a list of all its sinks in order to interact with them from it's own methods:
public class SensorNode
{
public List<Sink> ConnectedSinks { get; set; }
}
PS: It is not a very good idea to use public fields in OO Languages, so you'd better consider using public properties instead.
I was trying a practice problem and have implemented a generic Singly Linked List in C# whose head is a private variable -
/// <summary>
/// Root of the linked list
/// </summary>
private Node head;
public class Node
{
private object data;
private Node next;
/// <summary>
/// Data contained in the node
/// </summary>
public object Data
{
get
{
return data;
}
set
{
data = value;
}
}
/// <summary>
/// Reference to the next node of the singly linked list
/// </summary>
public Node Next
{
get
{
return next;
}
set
{
next = value;
}
}
}
Now I am trying to solve another problem where I need to implement a FIFO queue using this singly linked list for a cat and dog shelter. This queue should allow dequeueing the last enqueued cat/dog but additionally also two methods -
DequeueCat - Dequeue oldest cat even if it isn't the last node of the queue
DequeueDog - Dequeue oldest dog even if it isn't the last node of the queue
I didn't want to implement these methods in the SinglyLinkedList class but create a separate class named CatDogShelterQueue -
But for implementing the DequeueCat/Dog methods, I'd need to traverse the linkedlist and reach the oldest Cat/Dog node.
I know of a few ways in which to do this -
Make 'head' protected instead of private and inherit SinglyLinkedList as the base class of CatDogShelterQueue
Make 'head' internal and use it by referencing a variable of SinglyLinkedList type -
private SinglyLinkedList queue;
For now, I chose the second option but wanted to know what is best practice in situations like these.
My final current solution is below -
SinglyLinkedList
/* Program: Singly Linked List implementation in C#
*
* Date: 12/24/2015
*/
using System;
namespace CrackingTheCodingInterview
{
/// <summary>
/// A node of the singly linked list
/// It contains data in generic object type and points to next node in the list
/// </summary>
public class Node
{
private object data;
private Node next;
/// <summary>
/// Data contained in the node
/// </summary>
public object Data
{
get
{
return data;
}
set
{
data = value;
}
}
/// <summary>
/// Reference to the next node of the singly linked list
/// </summary>
public Node Next
{
get
{
return next;
}
set
{
next = value;
}
}
}
/// <summary>
/// Implementation of a singly linked list
/// </summary>
public partial class SinglyLinkedList
{
/// <summary>
/// Root of the linked list
/// </summary>
internal Node head;
/// <summary>
/// Creates a new node for inserting in singly linked list
/// <param name="d">Data to be inserted in node</param>
/// <returns>Returns the created node to be added in singly linked list</returns>
/// </summary>
private Node CreateNewNode(object d)
{
Node newNode = new Node();
newNode.Data = d;
newNode.Next = null;
return newNode;
}
/// <summary>
/// Default constructor of singly linked list
/// </summary>
public SinglyLinkedList()
{
head = null;
}
/// <summary>
/// Insert a node to the start of singly linked list
/// </summary>
/// <param name="d">Data to be inserted in the singly linked list</param>
public void Insert(object d)
{
Node newNode = CreateNewNode(d);
if (head == null)
{
head = newNode;
}
else
{
newNode.Next = head;
head = newNode;
}
}
/// <summary>
/// Traverse and print all the nodes of singly linked list
/// </summary>
public void Print()
{
Node temp = head;
while (temp != null)
{
Console.Write("{0}->", temp.Data);
temp = temp.Next;
}
Console.WriteLine();
}
/// <summary>
/// <param name="d">Data to be searched in all the nodes of a singly linked list</param>
/// Traverses through each node of a singly linked list and searches for an element
/// <returns>Node if the searched element exists else null </returns>
/// </summary>
public Node Search(object d)
{
Node temp = head;
while (temp != null)
{
if (temp.Data.Equals(d))
{
return temp;
}
temp = temp.Next;
}
return null;
}
/// <summary>
/// Returns head of the linked list if it exists else null
/// <returns>Data in head of linked list if it exists else null</returns>
/// </summary>
public object DeleteHead()
{
if (head != null)
{
Node temp = head;
head = head.Next;
return temp.Data;
}
else
{
return null;
}
}
/// <summary>
/// Peeks head of the linked list if it exists else null
/// <returns>Data in head of linked list if it exists else null</returns>
/// </summary>
public object PeekHead()
{
if (head != null)
{
return head.Data;
}
else
{
return null;
}
}
/// <summary>
/// Checks if the singly linked list is empty
/// <returns>true if linked list is empty else false</returns>
/// </summary>
public bool IsEmpty()
{
return (head == null);
}
/// <summary>
/// Inserts the element at the end of the linked list
/// <param name="d">Element to be inserted at the end</param>
/// </summary>
public void InsertEnd(object d)
{
Node newNode = CreateNewNode(d);
Node temp = head;
if (head == null)
{
head = newNode;
}
else
{
while (temp.Next != null)
{
temp = temp.Next;
}
temp.Next = newNode;
}
}
/// <summary>
/// Deletes the last node of the linked list
/// </summary>
/// <returns>Last node of linked list</returns>
public object DeleteEnd()
{
if (head == null || head.Next == null)
{
return DeleteHead();
}
else
{
Node temp = head;
while (temp.Next.Next != null)
{
temp = temp.Next;
}
object del = temp.Next.Data;
temp.Next = null;
return del;
}
}
/// <summary>
/// Deletes second last element of linked list
/// <returns>Second last element of linked list</returns>
/// </summary>
public object DeleteSecondLast()
{
if (head == null || head.Next == null)
{
return null;
}
else
{
if (head.Next.Next == null)
{
object del = head.Data;
head = head.Next;
return del;
}
else
{
Node temp = head;
while (temp.Next.Next.Next != null)
{
temp = temp.Next;
}
object del = temp.Next.Data;
temp = temp.Next.Next;
return del;
}
}
}
}
}
CatDogShelterQueue
/* Program: To implement a queue for cat and dog shelter
*
* Date: 1/1/2016
*/
using System;
namespace CrackingTheCodingInterview
{
/// <summary>
/// Class for a cat and dog shelter queue
/// </summary>
public class CatDogShelterQueue
{
/// <summary>
/// Class for storing type and number of cat/dog
/// </summary>
public class CatDog
{
internal CatDogType type;
internal int num;
/// <summary>
/// Constructor for initializing CatDog with its type and number
/// </summary>
public CatDog(int n, CatDogType t)
{
num = n;
type = t;
}
}
private SinglyLinkedList queue;
/// <summary>
/// Constructor initializing a singly linked list acting as a FIFO queue
/// </summary>
public CatDogShelterQueue()
{
queue = new SinglyLinkedList();
}
/// <summary>
/// <param name="d"> Cat/Dog Number </param>
/// <param name="c"> Type - Cat or Dog </param>
/// </summary>
public void enqueue(object d, CatDogType c)
{
CatDog cd = new CatDog((int)d, c);
queue.Insert(cd);
}
/// <summary>
/// Dequeueing last animal - cat or dog
/// </summary>
/// <returns>Cat or Dog oldest arrival</returns>
public CatDog dequeueAny()
{
return (CatDog)queue.DeleteEnd();
}
/// <summary>
/// Dequeueing the oldest cat
/// </summary>
/// <returns>Oldest entered cat</returns>
public CatDog dequeueCat()
{
Node lastCat = null;
Node temp = queue.head;
while(temp != null)
{
if (((CatDog)temp.Data).type == CatDogType.Cat)
{
lastCat = temp;
}
temp = temp.Next;
}
if(lastCat.Next == null)
{
return (CatDog)queue.DeleteEnd();
}
else
{
CatDog del = (CatDog)lastCat.Data;
lastCat.Data = lastCat.Next.Data;
lastCat.Next = lastCat.Next.Next;
return del;
}
}
/// <summary>
/// Dequeueing the oldest dog
/// </summary>
/// <returns>Oldest entered dog</returns>
public CatDog dequeueDog()
{
Node lastCat = null;
Node temp = queue.head;
while (temp != null)
{
if (((CatDog)temp.Data).type == CatDogType.Dog)
{
lastCat = temp;
}
temp = temp.Next;
}
if (lastCat.Next == null)
{
return (CatDog)queue.DeleteEnd();
}
else
{
CatDog del = (CatDog)lastCat.Data;
lastCat.Data = lastCat.Next.Data;
lastCat.Next = lastCat.Next.Next;
return del;
}
}
}
internal class CatDogShelterQueueTest
{
static void Main()
{
CatDogShelterQueue q = new CatDogShelterQueue();
q.enqueue(1, CatDogType.Cat);
q.enqueue(2, CatDogType.Dog);
q.enqueue(3, CatDogType.Cat);
q.enqueue(4, CatDogType.Dog);
q.enqueue(5, CatDogType.Cat);
Console.WriteLine("Dequeueing last animal: {0}", q.dequeueAny().num);
Console.WriteLine("Dequeueing last cat: {0}", q.dequeueCat().num);
Console.WriteLine("Dequeueing last dog: {0}", q.dequeueDog().num);
Console.ReadLine();
}
}
/// <summary>
/// Cat or Dog
/// </summary>
public enum CatDogType
{
/// <summary>
/// Cat
/// </summary>
Cat,
/// <summary>
/// Dog
/// </summary>
Dog
};
}
I am working on a project where I plan on using Redis as persistent data storage, however the task at hand, I am working on a generic Object cache. and as a huge fan of LINQ I have started designing a cache which does support this.
public ConcurrentBag<Object> Cache = new ConcurrentBag<object>();
public List<T> FindBy<T>(Func<T, bool> predicate) where T : class
{
var tmp = new List<T>();
foreach (var i in Cache)
{
try
{
T obj = i as T;
if (obj != null)
tmp.Add(obj);
}
catch { }
}
return tmp.Where(predicate).ToList();
}
I am afraid that when the object cache grows large it will become inefficient. (I estimate 500k-1m objects)
I was hoping that it would be possible to use something like this
public ConcurrentBag<Object> Cache = new ConcurrentBag<object>();
public List<T> FindBy<T>(Func<T, bool> predicate) where T : class
{
return Cache.Where<T>(predicate).ToList();
}
Hopefully I am not all off-track here? Any suggestions are welcome :)
Hash your genric type and save list of specific type..
Something like:
Dictionary<Type,List<T>>
Then get value by type key and query as you wanted
Since you estimate a lot of items in the cache and the operations on the cache will be type specific, you could use multiple bags wrapped into a dictionary. That would speed up finding the subset of the cache of type of interest and would be ideal if the cache contained many minimal subsets of different types.
readonly IDictionary<Type, ConcurrentBag<object>> _cache = new ConcurrentDictionary<Type, ConcurrentBag<object>>();
public List<T> FindBy<T>(Func<T, bool> predicate) where T : class
{
// Check if items of type {T} exist in the cache.
ConcurrentBag<object> bag;
if (_cache.TryGetValue(typeof (T), out bag))
{
// Cast, apply predicate and return.
return bag.Cast<T>().Where(predicate).ToList();
}
// Return an empty list.
return new List<T>();
}
Of course now you also need to handle adding items properly to the cache to ensure that different types will be put into their corresponding bags.
Big thanks to both Discosultan and user1190916 Whom both pointed me in the right direction in what I needed to get a CRUD cached-object repository with full LINQ support using Redis for persistent storage (Client ServiceStack.Redis), this is what I have managed to conjure up thus far.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Linq.Expressions;
using System.Text;
using System.Collections.Concurrent;
using ServiceStack.Redis;
namespace RedisTestRepo
{
class Program
{
//public static DataRepository Db;
static void Main(string[] args)
{
Repo r = new Repo();
// We do not touch sequence, by running example we can see that sequence will give Users new unique Id.
// Empty data store.
Console.WriteLine("Our User Data store should be empty.");
Console.WriteLine("Users In \"Database\" : {0}\n", r.All<User>().Count);
// Add imaginary users.
Console.WriteLine("Adding 100 imaginairy users.");
for (int i = 0; i < 99; i++)
r.Create<User>(new User { Id = r.Next<User>(), Name = "Joachim Nordvik" });
// We should have 100 users in data store.
Console.WriteLine("Users In \"Database\" : {0}\n", r.All<User>().Count);
// Lets print 10 users from data store.
Console.WriteLine("Order by Id, Take (10) and print users.");
foreach (var u in r.All<User>().OrderBy(z => z.Id).Take(10))
{
Console.WriteLine("ID:{0}, Name: {1}", u.Id, u.Name);
// Lets update an entity.
u.Name = "My new Name";
r.Update<User>(x=>x.Id == u.Id, u);
}
// Lets print 20 users from data store, we already edited 10 users.
Console.WriteLine("\nOrder by Id, Take (20) and print users, we previously edited the users that we printed lets see if it worked.");
foreach (var u in r.All<User>().OrderBy(z => z.Id).Take(20))
{
Console.WriteLine("ID:{0}, Name: {1}", u.Id, u.Name);
}
// Clean up data store.
Console.WriteLine("\nCleaning up Data Store.\n");
foreach (var u in r.All<User>())
r.Delete<User>(u);
// Confirm that we no longer have any users.
Console.WriteLine("Confirm that we no longer have User entities in Data Store.");
Console.WriteLine("Users In \"Database\" : {0}\n\n", r.All<User>().Count);
Console.WriteLine("Hit return to exit!");
Console.Read();
}
}
public class Repo
{
private static readonly PooledRedisClientManager m = new PooledRedisClientManager();
public Repo()
{
// Spool Redis Database into our object cache.
LoadIntoCache<User>();
}
readonly IDictionary<Type, List<object>> _cache = new ConcurrentDictionary<Type, List<object>>();
/// <summary>
/// Load {T} into object cache from Data Store.
/// </summary>
/// <typeparam name="T">class</typeparam>
private void LoadIntoCache<T>() where T : class
{
_cache[typeof(T)] = GetAll<T>().Cast<object>().ToList();
}
/// <summary>
/// Add single {T} into cache and Data Store.
/// </summary>
/// <typeparam name="T">class</typeparam>
/// <param name="entity">class object</param>
public void Create<T>(T entity) where T : class
{
List<object> list;
if (!_cache.TryGetValue(typeof(T), out list))
{
list = new List<object>();
}
list.Add(entity);
_cache[typeof(T)] = list;
Store<T>(entity);
}
/// <summary>
/// Delete single {T} from cache and Data Store.
/// </summary>
/// <typeparam name="T">class</typeparam>
/// <param name="entity">class object</param>
public void Delete<T>(T entity) where T : class
{
List<object> list;
if (_cache.TryGetValue(typeof(T), out list))
{
list.Remove(entity);
_cache[typeof(T)] = list;
RedisDelete<T>(entity);
}
}
/// <summary>
/// Tries to update or Add entity to object cache and Data Store.
/// </summary>
/// <typeparam name="T">class</typeparam>
/// <param name="predicate">linq expression</param>
/// <param name="entity">entity</param>
public void Update<T>(Func<T, bool> predicate, T entity) where T : class
{
List<object> list;
if (_cache.TryGetValue(typeof(T), out list))
{
// Look for old entity.
var e = list.Cast<T>().Where(predicate).FirstOrDefault();
if(e != null)
{
list.Remove(e);
}
// Regardless if object existed or not we add it to our Cache / Data Store.
list.Add(entity);
_cache[typeof(T)] = list;
Store<T>(entity);
}
}
/// <summary>
/// Find List<T>(predicate) in cache.
/// </summary>
/// <typeparam name="T">class</typeparam>
/// <param name="predicate">linq statement</param>
/// <returns></returns>
public List<T> FindBy<T>(Func<T, bool> predicate) where T : class
{
List<object> list;
if (_cache.TryGetValue(typeof(T), out list))
{
return list.Cast<T>().Where(predicate).ToList();
}
return new List<T>();
}
/// <summary>
/// Find All {T}
/// </summary>
/// <typeparam name="T"></typeparam>
/// <returns>List<T></returns>
public List<T> All<T>() where T : class
{
return GetAll<T>().ToList();
}
/// <summary>
/// Find Single {T} in object cache.
/// </summary>
/// <typeparam name="T">class</typeparam>
/// <param name="predicate">linq statement</param>
/// <returns></returns>
public T Read<T>(Func<T, bool> predicate) where T : class
{
List<object> list;
if (_cache.TryGetValue(typeof(T), out list))
{
return list.Cast<T>().Where(predicate).FirstOrDefault();
}
return null;
}
public long Next<T>() where T : class
{
long id = 1;
using (var ctx = m.GetClient())
{
try
{
id = ctx.As<T>().GetNextSequence();
}
catch(Exception ex)
{
// Add exception handler.
}
}
return id;
}
private void RedisDelete<T>(T entity) where T : class
{
using (var ctx = m.GetClient())
ctx.As<T>().Delete(entity);
}
private T Find<T>(long id) where T : class
{
using (var ctx = m.GetClient())
return ctx.As<T>().GetById(id);
}
private IList<T> GetAll<T>() where T : class
{
using(var ctx = m.GetClient())
{
try
{
return ctx.As<T>().GetAll();
}
catch
{
return new List<T>();
}
}
}
private void Store<T>(T entity) where T : class
{
using (var ctx = m.GetClient())
ctx.Store<T>(entity);
}
}
public class User
{
public long Id { get; set; }
public string Name { get; set; }
}
}
Observable.Concat is an implementation that joins observables but the second IObservable<T> only makes the subscription when the first is completed.
http://www.introtorx.com/content/v1.0.10621.0/12_CombiningSequences.html#Concat
Is there any implementation of a "HotConcat"? Similar to Observable.Merge, but keeping the delivery order, first pushing the elements of initial subscription and then the subsequents. Something like:
I know that is possible to use the ReplaySubject<T>, but it doesn't seems so good, because of performance and memory usage impacts..
Here's the implementation I've been using for a while. This implementation introduces a BufferUntilSubscribed operator that turns an IObservable into a IConnectableObservable that will start buffering whenever you call Connect and will deliver the buffered results to the first subscriber. Once the first subscriber has "caught up", then the buffering will stop and the subscriber will just be given the live events as they arrive.
Once you have that, you can write HotConcat as something like:
public static IObservable<T> HotConcat<T>(params IObservable<T>[] sources)
{
var s2 = sources.Select(s => s.BufferUntilSubscribed());
var subscriptions = new CompositeDisposable(s2.Select(s2 => s2.Connect()).ToArray());
return Observable.Create<T>(observer =>
{
var s = new SingleAssignmentDisposable();
var d = new CompositeDisposable(subscriptions);
d.Add(s);
s.Disposable = s2.Concat().Subscribe(observer);
return d;
});
}
Here's the implemementation of BufferUntilSubscribed:
private class BufferUntilSubscribedObservable<T> : IConnectableObservable<T>
{
private readonly IObservable<T> _source;
private readonly IScheduler _scheduler;
private readonly Subject<T> _liveEvents;
private bool _observationsStarted;
private Queue<T> _buffer;
private readonly object _gate;
public BufferUntilSubscribedObservable(IObservable<T> source, IScheduler scheduler)
{
_source = source;
_scheduler = scheduler;
_liveEvents = new Subject<T>();
_buffer = new Queue<T>();
_gate = new object();
_observationsStarted = false;
}
public IDisposable Subscribe(IObserver<T> observer)
{
lock (_gate)
{
if (_observationsStarted)
{
return _liveEvents.Subscribe(observer);
}
_observationsStarted = true;
var bufferedEvents = GetBuffers().Concat().Finally(RemoveBuffer); // Finally clause to remove the buffer if the first observer stops listening.
return Observable.Merge(_liveEvents, bufferedEvents).Subscribe(observer);
}
}
public IDisposable Connect()
{
return _source.Subscribe(OnNext, _liveEvents.OnError, _liveEvents.OnCompleted);
}
private void RemoveBuffer()
{
lock (_gate)
{
_buffer = null;
}
}
/// <summary>
/// Acquires a lock and checks the buffer. If it is empty, then replaces it with null and returns null. Else replaces it with an empty buffer and returns the old buffer.
/// </summary>
/// <returns></returns>
private Queue<T> GetAndReplaceBuffer()
{
lock (_gate)
{
if (_buffer == null)
{
return null;
}
if (_buffer.Count == 0)
{
_buffer = null;
return null;
}
var result = _buffer;
_buffer = new Queue<T>();
return result;
}
}
/// <summary>
/// An enumerable of buffers that will complete when a call to GetAndReplaceBuffer() returns a null, e.g. when the observer has caught up with the incoming source data.
/// </summary>
/// <returns></returns>
private IEnumerable<IObservable<T>> GetBuffers()
{
Queue<T> buffer;
while ((buffer = GetAndReplaceBuffer()) != null)
{
yield return buffer.ToObservable(_scheduler);
}
}
private void OnNext(T item)
{
lock (_gate)
{
if (_buffer != null)
{
_buffer.Enqueue(item);
return;
}
}
_liveEvents.OnNext(item);
}
}
/// <summary>
/// Returns a connectable observable, that once connected, will start buffering data until the observer subscribes, at which time it will send all buffered data to the observer and then start sending new data.
/// Thus the observer may subscribe late to a hot observable yet still see all of the data. Later observers will not see the buffered events.
/// </summary>
/// <param name="source"></param>
/// <param name="scheduler">Scheduler to use to dump the buffered data to the observer.</param>
/// <returns></returns>
public static IConnectableObservable<T> BufferUntilSubscribed<T>(this IObservable<T> source, IScheduler scheduler)
{
return new BufferUntilSubscribedObservable<T>(source, scheduler);
}
/// <summary>
/// Returns a connectable observable, that once connected, will start buffering data until the observer subscribes, at which time it will send all buffered data to the observer and then start sending new data.
/// Thus the observer may subscribe late to a hot observable yet still see all of the data. Later observers will not see the buffered events.
/// </summary>
/// <param name="source"></param>
/// <returns></returns>
public static IConnectableObservable<T> BufferUntilSubscribed<T>(this IObservable<T> source)
{
return new BufferUntilSubscribedObservable<T>(source, Scheduler.Immediate);
}
I don't know of such a composition function, but you can write one that matches your needs.
Here's my attempt at writing one. It will keep elements in memory only until they are replayed once. But I think there should be a way to make a cleaner implementation though.
public static IObservable<T> HotConcat<T>(this IObservable<T> first, IObservable<T> second)
{
return Observable.Create<T>(observer =>
{
var queue = new Queue<Notification<T>>();
var secondSubscription = second.Materialize().Subscribe(item =>
{
if (queue == null)
return;
lock (queue)
{
queue.Enqueue(item);
}
});
var secondReplay = Observable.Create<T>(secondObserver =>
{
while (true)
{
Notification<T> item = null;
lock (queue)
{
if (queue.Count > 0)
{
item = queue.Dequeue();
}
else
{
secondObserver.OnCompleted();
secondSubscription.Dispose();
queue = null;
break;
}
}
if (item != null)
item.Accept(secondObserver);
}
return secondSubscription;
});
return first.Concat(secondReplay).Concat(second).Subscribe(observer);
});
}
I have been reading on how to compare a list with one annother. I have tried to implement the IEquatable interface. Here is what i have done so far:
/// <summary>
/// A object holder that contains a service and its current failcount
/// </summary>
public class ServiceHolder : IEquatable<ServiceHolder>
{
/// <summary>
/// Constructor
/// </summary>
/// <param name="service"></param>
public ServiceHolder(Service service)
{
Service = service;
CurrentFailCount = 0;
}
public Service Service { get; set; }
public UInt16 CurrentFailCount { get; set; }
/// <summary>
/// Public equal method
/// </summary>
/// <param name="obj"></param>
/// <returns></returns>
public override bool Equals(object obj)
{
if (obj == null)
{
return false;
}
ServiceHolder tmp = obj as ServiceHolder;
if (tmp == null)
{
return false;
}
else
{
return Equals(tmp);
}
}
/// <summary>
/// Checks the internal components compared to one annother
/// </summary>
/// <param name="serviceHolder"></param>
/// <returns>tru eif they are the same else false</returns>
public bool Equals(ServiceHolder serviceHolder)
{
if (serviceHolder == null)
{
return false;
}
if (this.Service.Id == serviceHolder.Service.Id)
{
if (this.Service.IpAddress == serviceHolder.Service.IpAddress)
{
if (this.Service.Port == serviceHolder.Service.Port)
{
if (this.Service.PollInterval == serviceHolder.Service.PollInterval)
{
if (this.Service.ServiceType == serviceHolder.Service.ServiceType)
{
if (this.Service.Location == serviceHolder.Service.Location)
{
if (this.Service.Name == this.Service.Name)
{
return true;
}
}
}
}
}
}
}
return false;
}
}
and this is where I use it:
private void CheckIfServicesHaveChangedEvent()
{
IList<ServiceHolder> tmp;
using (var db = new EFServiceRepository())
{
tmp = GetServiceHolders(db.GetAll());
}
if (tmp.Equals(Services))
{
StateChanged = true;
}
else
{
StateChanged = false;
}
}
Now when I debug and I put a break point in the equals function it never gets hit.
This leads me to think I have implemented it incorrectly or Im not calling it correctly?
If you want to compare the contents of two lists then the best method is SequenceEqual.
if (tmp.SequenceEquals(Services))
This will compare the contents of both lists using equality semantics on the values in the list. In this case the element type is ServiceHolder and as you've already defined equality semantics for this type it should work just fine
EDIT
OP commented that order of the collections shouldn't matter. For that scenario you can do the following
if (!tmp.Except(Services).Any())
You can compare lists without the order most easily with linq.
List<ServiceHolder> result = tmp.Except(Services).ToList();