I've been playing with collections and threading and came across the nifty extension methods people have created to ease the use of ReaderWriterLockSlim by allowing the IDisposable pattern.
However, I believe I have come to realize that something in the implementation is a performance killer. I realize that extension methods are not supposed to really impact performance, so I am left assuming that something in the implementation is the cause... the amount of Disposable structs created/collected?
Here's some test code:
using System;
using System.Collections.Generic;
using System.Threading;
using System.Diagnostics;
namespace LockPlay {
static class RWLSExtension {
struct Disposable : IDisposable {
readonly Action _action;
public Disposable(Action action) {
_action = action;
}
public void Dispose() {
_action();
}
} // end struct
public static IDisposable ReadLock(this ReaderWriterLockSlim rwls) {
rwls.EnterReadLock();
return new Disposable(rwls.ExitReadLock);
}
public static IDisposable UpgradableReadLock(this ReaderWriterLockSlim rwls) {
rwls.EnterUpgradeableReadLock();
return new Disposable(rwls.ExitUpgradeableReadLock);
}
public static IDisposable WriteLock(this ReaderWriterLockSlim rwls) {
rwls.EnterWriteLock();
return new Disposable(rwls.ExitWriteLock);
}
} // end class
class Program {
class MonitorList<T> : List<T>, IList<T> {
object _syncLock = new object();
public MonitorList(IEnumerable<T> collection) : base(collection) { }
T IList<T>.this[int index] {
get {
lock(_syncLock)
return base[index];
}
set {
lock(_syncLock)
base[index] = value;
}
}
} // end class
class RWLSList<T> : List<T>, IList<T> {
ReaderWriterLockSlim _rwls = new ReaderWriterLockSlim();
public RWLSList(IEnumerable<T> collection) : base(collection) { }
T IList<T>.this[int index] {
get {
try {
_rwls.EnterReadLock();
return base[index];
} finally {
_rwls.ExitReadLock();
}
}
set {
try {
_rwls.EnterWriteLock();
base[index] = value;
} finally {
_rwls.ExitWriteLock();
}
}
}
} // end class
class RWLSExtList<T> : List<T>, IList<T> {
ReaderWriterLockSlim _rwls = new ReaderWriterLockSlim();
public RWLSExtList(IEnumerable<T> collection) : base(collection) { }
T IList<T>.this[int index] {
get {
using(_rwls.ReadLock())
return base[index];
}
set {
using(_rwls.WriteLock())
base[index] = value;
}
}
} // end class
static void Main(string[] args) {
const int ITERATIONS = 100;
const int WORK = 10000;
const int WRITE_THREADS = 4;
const int READ_THREADS = WRITE_THREADS * 3;
// create data - first List is for comparison only... not thread safe
int[] copy = new int[WORK];
IList<int>[] l = { new List<int>(copy), new MonitorList<int>(copy), new RWLSList<int>(copy), new RWLSExtList<int>(copy) };
// test each list
Thread[] writeThreads = new Thread[WRITE_THREADS];
Thread[] readThreads = new Thread[READ_THREADS];
foreach(var list in l) {
Stopwatch sw = Stopwatch.StartNew();
for(int k=0; k < ITERATIONS; k++) {
for(int i = 0; i < writeThreads.Length; i++) {
writeThreads[i] = new Thread(p => {
IList<int> il = p as IList<int>;
int c = il.Count;
for(int j = 0; j < c; j++) {
il[j] = j;
}
});
writeThreads[i].Start(list);
}
for(int i = 0; i < readThreads.Length; i++) {
readThreads[i] = new Thread(p => {
IList<int> il = p as IList<int>;
int c = il.Count;
for(int j = 0; j < c; j++) {
int temp = il[j];
}
});
readThreads[i].Start(list);
}
for(int i = 0; i < readThreads.Length; i++)
readThreads[i].Join();
for(int i = 0; i < writeThreads.Length; i++)
writeThreads[i].Join();
};
sw.Stop();
Console.WriteLine("time: {0} class: {1}", sw.Elapsed, list.GetType());
}
Console.WriteLine("DONE");
Console.ReadLine();
}
} // end class
} // end namespace
Here's a typical result:
time: 00:00:03.0965242 class: System.Collections.Generic.List`1[System.Int32]
time: 00:00:11.9194573 class: LockPlay.Program+MonitorList`1[System.Int32]
time: 00:00:08.9510258 class: LockPlay.Program+RWLSList`1[System.Int32]
time: 00:00:16.9888435 class: LockPlay.Program+RWLSExtList`1[System.Int32]
DONE
As you can see, using the extensions actually makes the performance WORSE than just using lock (monitor).
Looks like its the price of instantiating millions of structs and the extra bit of invocations.
I would go as far as to say that the ReaderWriterLockSlim is being misused in this sample, a lock is good enough in this case and the performance edge you get with the ReaderWriterLockSlim is negligible compared to the price of explaining these concepts to junior devs.
You get a huge advantage with reader writer style locks when it takes a non-negligable amount of time to perform reads and writes. The boost will be biggest when you have a predominantly read based system.
Try inserting a Thread.Sleep(1) while the locks are acquired to see how huge a difference it makes.
See this benchmark:
Time for Test.SynchronizedList`1[System.Int32] Time Elapsed 12310 ms
Time for Test.ReaderWriterLockedList`1[System.Int32] Time Elapsed 547 ms
Time for Test.ManualReaderWriterLockedList`1[System.Int32] Time Elapsed 566 ms
In my benchmarking I do not really notice much of a difference between the two styles, I would feel comfortable using it provided it had some finalizer protection in case people forget to dispose ....
using System.Threading;
using System.Diagnostics;
using System.Collections.Generic;
using System;
using System.Linq;
namespace Test {
static class RWLSExtension {
struct Disposable : IDisposable {
readonly Action _action;
public Disposable(Action action) {
_action = action;
}
public void Dispose() {
_action();
}
}
public static IDisposable ReadLock(this ReaderWriterLockSlim rwls) {
rwls.EnterReadLock();
return new Disposable(rwls.ExitReadLock);
}
public static IDisposable UpgradableReadLock(this ReaderWriterLockSlim rwls) {
rwls.EnterUpgradeableReadLock();
return new Disposable(rwls.ExitUpgradeableReadLock);
}
public static IDisposable WriteLock(this ReaderWriterLockSlim rwls) {
rwls.EnterWriteLock();
return new Disposable(rwls.ExitWriteLock);
}
}
class SlowList<T> {
List<T> baseList = new List<T>();
public void AddRange(IEnumerable<T> items) {
baseList.AddRange(items);
}
public virtual T this[int index] {
get {
Thread.Sleep(1);
return baseList[index];
}
set {
baseList[index] = value;
Thread.Sleep(1);
}
}
}
class SynchronizedList<T> : SlowList<T> {
object sync = new object();
public override T this[int index] {
get {
lock (sync) {
return base[index];
}
}
set {
lock (sync) {
base[index] = value;
}
}
}
}
class ManualReaderWriterLockedList<T> : SlowList<T> {
ReaderWriterLockSlim slimLock = new ReaderWriterLockSlim();
public override T this[int index] {
get {
T item;
try {
slimLock.EnterReadLock();
item = base[index];
} finally {
slimLock.ExitReadLock();
}
return item;
}
set {
try {
slimLock.EnterWriteLock();
base[index] = value;
} finally {
slimLock.ExitWriteLock();
}
}
}
}
class ReaderWriterLockedList<T> : SlowList<T> {
ReaderWriterLockSlim slimLock = new ReaderWriterLockSlim();
public override T this[int index] {
get {
using (slimLock.ReadLock()) {
return base[index];
}
}
set {
using (slimLock.WriteLock()) {
base[index] = value;
}
}
}
}
class Program {
private static void Repeat(int times, int asyncThreads, Action action) {
if (asyncThreads > 0) {
var threads = new List<Thread>();
for (int i = 0; i < asyncThreads; i++) {
int iterations = times / asyncThreads;
if (i == 0) {
iterations += times % asyncThreads;
}
Thread thread = new Thread(new ThreadStart(() => Repeat(iterations, 0, action)));
thread.Start();
threads.Add(thread);
}
foreach (var thread in threads) {
thread.Join();
}
} else {
for (int i = 0; i < times; i++) {
action();
}
}
}
static void TimeAction(string description, Action func) {
var watch = new Stopwatch();
watch.Start();
func();
watch.Stop();
Console.Write(description);
Console.WriteLine(" Time Elapsed {0} ms", watch.ElapsedMilliseconds);
}
static void Main(string[] args) {
int threadCount = 40;
int iterations = 200;
int readToWriteRatio = 60;
var baseList = Enumerable.Range(0, 10000).ToList();
List<SlowList<int>> lists = new List<SlowList<int>>() {
new SynchronizedList<int>() ,
new ReaderWriterLockedList<int>(),
new ManualReaderWriterLockedList<int>()
};
foreach (var list in lists) {
list.AddRange(baseList);
}
foreach (var list in lists) {
TimeAction("Time for " + list.GetType().ToString(), () =>
{
Repeat(iterations, threadCount, () =>
{
list[100] = 99;
for (int i = 0; i < readToWriteRatio; i++) {
int ignore = list[i];
}
});
});
}
Console.WriteLine("DONE");
Console.ReadLine();
}
}
}
The code appears to use a struct to avoid object creation overhead, but doesn't take the other necessary steps to keep this lightweight. I believe it boxes the return value from ReadLock, and if so negates the entire advantage of the struct. This should fix all the issues and perform just as well as not going through the IDisposable interface.
Edit: Benchmarks demanded. These results are normalized so the manual method (call Enter/ExitReadLock and Enter/ExitWriteLock inline with the protected code) have a time value of 1.00. The original method is slow because it allocates objects on the heap that the manual method does not. I fixed this problem, and in release mode even the extension method call overhead goes away leaving it identically as fast as the manual method.
Debug Build:
Manual: 1.00
Original Extensions: 1.62
My Extensions: 1.24
Release Build:
Manual: 1.00
Original Extensions: 1.51
My Extensions: 1.00
My code:
internal static class RWLSExtension
{
public static ReadLockHelper ReadLock(this ReaderWriterLockSlim readerWriterLock)
{
return new ReadLockHelper(readerWriterLock);
}
public static UpgradeableReadLockHelper UpgradableReadLock(this ReaderWriterLockSlim readerWriterLock)
{
return new UpgradeableReadLockHelper(readerWriterLock);
}
public static WriteLockHelper WriteLock(this ReaderWriterLockSlim readerWriterLock)
{
return new WriteLockHelper(readerWriterLock);
}
public struct ReadLockHelper : IDisposable
{
private readonly ReaderWriterLockSlim readerWriterLock;
public ReadLockHelper(ReaderWriterLockSlim readerWriterLock)
{
readerWriterLock.EnterReadLock();
this.readerWriterLock = readerWriterLock;
}
public void Dispose()
{
this.readerWriterLock.ExitReadLock();
}
}
public struct UpgradeableReadLockHelper : IDisposable
{
private readonly ReaderWriterLockSlim readerWriterLock;
public UpgradeableReadLockHelper(ReaderWriterLockSlim readerWriterLock)
{
readerWriterLock.EnterUpgradeableReadLock();
this.readerWriterLock = readerWriterLock;
}
public void Dispose()
{
this.readerWriterLock.ExitUpgradeableReadLock();
}
}
public struct WriteLockHelper : IDisposable
{
private readonly ReaderWriterLockSlim readerWriterLock;
public WriteLockHelper(ReaderWriterLockSlim readerWriterLock)
{
readerWriterLock.EnterWriteLock();
this.readerWriterLock = readerWriterLock;
}
public void Dispose()
{
this.readerWriterLock.ExitWriteLock();
}
}
}
My guess (you would need to profile to verify) is that the performance drop isn't from creating the Disposable instances (they should be fairly cheap, being structs). Instead I expect it's from creating the Action delegates. You could try changing the implementation of your Disposable struct to store the instance of ReaderWriterLockSlim instead of creating an Action delegate.
Edit: #280Z28's post confirms that it's the heap allocation of Action delegates that's causing the slowdown.
Related
What differance below two locking ways is going to make. I am doubtful on significance of this way of locking as using exention method and want to understand the internal working.
public static int DoJob(this MapperClass Mapper)
{
object MyLock = new object();
lock (MyLock)
{
if (Mapper.MapId != null)
{
//Do some Work
}
else
{
//Do something else
}
}
}
Calling Method:
private void InvokeDoJob(List<MapperClass> Mapper)
{
Mapper.ForEach(item =>
{
item.DoJob();
});
}
and this InvokeDojob is calling on multithreading model.
Suggest me how locking would be done and is this the right approach.
Also what difference below code is making. I see locking done on class level only.
class Department
{
Object thisLock = new Object();
int salary;
Random r = new Random();
public Department(int initial)
{
salary = initial;
}
int Withdraw(int amount)
{
lock (thisLock)
{
if (salary >= amount)
{
//Console.WriteLine("salary before Withdrawal : " + salary);
return amount = 10;
}
else
{
return amount = 20;
}
}
}
}
I am using C# and I have an enumerator and I am reading the data inside the enumerator sequentially.
This is a third party library object and does not support Parallel.Foreach
while(enumerator.Next())
{
var item = enumerator.Read();
ProcessItem(item);
}
ProcessItem(Item item)
{
// Is lock required here
if(item.prop == "somevalue")
this._list.Add(item);
}
I want to achieve multithreading here while reading the content.
while(enumerator.Next())
{
// This code should run in a multi-threaded way
var item = enumerator.Read();
// ProcessItem method puts these items on a class level list property
// Is there any Lock required?
ProcessItem(item);
}
I am new to multithreading. Please share any code samples which satisfies the above requirement.
Yes, some locking required. you can achieve it using lock or using a concurrent collection type.
using lock:
ProcessItem(Item item)
{
if(item.prop == "somevalue")
{
lock(_list)
{
_list.Add(item);
}
}
}
Edit: based on detail you provided, you can wrap the enumerator from external lib using your own enumerator like below so you can use Parallel.ForEach on it:
We assume the enumerator you got is something like MockEnumerator, we wrap it in a normal IEnumerator, and IEnumerable so we are able to use Parallel.ForEach to read in parallel.
class Program
{
class Item
{
public int SomeProperty { get; }
public Item(int prop)
{
SomeProperty = prop;
}
}
class MockEnumerator
{
private Item[] _items = new Item[] { new Item(1), new Item(2) };
private int _position = 0;
public bool Next()
{
return _position++ < _items.Length;
}
public Item Read()
{
return _items[_position];
}
}
class EnumeratorWrapper : IEnumerator<Item>, IEnumerable<Item>
{
private readonly MockEnumerator _enumerator;
public EnumeratorWrapper(MockEnumerator enumerator)
{
this._enumerator = enumerator;
}
public Item Current => _enumerator.Read();
object IEnumerator.Current => Current;
public void Dispose()
{
}
public IEnumerator<Item> GetEnumerator()
{
throw new NotImplementedException();
}
public bool MoveNext()
{
return _enumerator.Next();
}
public void Reset()
{
}
IEnumerator IEnumerable.GetEnumerator()
{
return this;
}
}
private static List<Item> _list = new List<Item>();
static void Main(string[] args)
{
var enumerator = new EnumeratorWrapper(new MockEnumerator());
Parallel.ForEach(enumerator, item =>
{
if (item.SomeProperty == 1)//someval
{
lock (_list)
{
_list.Add(item);
}
}
});
}
}
This is a good example for task-based parallelization. Each processing of an item corresponds to a task. Hence, you can change the loop to the following:
var tasks = new List<Task<int>>();
while(enumerator.MoveNext())
{
var item = enumerator.Current;
Task<int> task = new Task<int>(() => ProcessItem(item));
task.Start();
tasks.Add(task);
}
foreach(Task<int> task in tasks)
{
int i = task.Result;
classList.Add(i);
}
Note that the synchronization on the classList is implicitly given by first spawning all tasks in the while loop and then merging the results in the foreach loop. The synchronization is specifically given by the access to Result which waits until the corresponding task is finished.
I want to be able to get the return values from all the methods in my delegate. This is the code I have written in c#.
using UnityEngine;
using System.Collections;
public static class DelagetsAndEvents {
public delegate int UnitEventHandler(string _unit);
public static event UnitEventHandler unitSpawn;
public static int UnitSpawn(string _unit)
{
if(unitSpawn != null)
{
unitSpawn(_unit);
}
// here I want to return 1 + 2 from Planet/UnitSpawn and SolarSystem/UnitSpawn
// is it possible to run a foreach on every method in the delegate and add their returns?
return (method 1's return value) + (method 2's return value) (Or both seperately, that would be even better)
}
}
public class Planet {
public Planet()
{
DelagetsAndEvents.unitSpawn += UnitSpawn;
}
int UnitSpawn(string _unit)
{
Debug.Log("yo");
return 1;
}
}
public class SolarSystem{
public SolarSystem()
{
DelagetsAndEvents.unitSpawn += UnitSpawn;
}
int UnitSpawn(string _unit)
{
Debug.Log("bla");
return 2;
}
}
As you can see, the delegate has a return type of int. Then the methods I put into my delegate also have the return type of int. One of them return 1 and the other one return 2. Is there a way to get those results to the location where I execute my delegate? That will be here:
using UnityEngine;
using System.Collections;
public class TestDelagets : MonoBehaviour {
void Start () {
SolarSystem s = new SolarSystem();
Planet p = new Planet();
string g = "";
int i = DelagetsAndEvents.UnitSpawn(g);
Debug.Log(i);
}
}
Well, in the "regular" .NET framework, you could use Delegate.GetInvocationList. For example, to combine that with LINQ:
// Note: do all of this after checking that unitSpawn is non-null...
var results = unitSpawn.GetInvocationList()
.Cast<UnitEventHandler>()
.Select(d => d(_unit))
.ToList();
I don't know offhand whether that will work with Unity, but I'd hope it would...
If the LINQ part doesn't work, you could use:
var invocations = unitSpawn.GetInvocationList();
var results = new int[invocations.Length];
for (int i = 0; i < invocations.Length; i++)
{
results[i] = ((UnitEventHandler)invocations[i]).Invoke(_unit);
}
As you mention that you would need to get the added value or the two separate values, I would choose a different approach.
You could use Linq but Unity recommends to avoid it. Most likely due to the process of serialization between C++ and C# and GC.
You could store your methods in an array of actions. Then you can either get the full amount, or one by one with a basic foreach loop.
public class DelegateContainer : IDisposable{
private IList<Func<string, int>> container = null;
public DelegateContainer(){
this.container = new List<Func<string,int>>();
}
public void Dispose(){
this.container.Clear();
this.container = null;
}
public bool AddMethod(Func<string, int> func){
if(func != null && this.container.Contains(func) == false){
this.container.Add(func);
return true;
}
return false;
}
public bool RemoveMethod(Func<string, int>func){
if(func != null && this.container.Contains(func) == true){
this.container.Remove(func);
return true;
}
return false;
}
public int GetFullValue(){
int total = 0;
foreach(var meth in this.container){
if(meth != null) { total += meth(""); }
}
return total;
}
public IEnumerable<int> GetAllValues(){
IList <int> list = new List<int>();
foreach(var meth in this.container){
if(meth != null) { list.Add(meth("");); }
}
return list as IEnumerable<int>;
}
}
Thanks guys! It helped alot. I solved it with the folowing code:
using UnityEngine;
using System.Collections;
public static class DelagetsAndEvents {
public delegate int UnitEventHandler(string _unit);
public static event UnitEventHandler unitSpawn;
public static int[] UnitSpawn(string _unit)
{
if(unitSpawn != null)
{
unitSpawn(_unit);
}
System.Delegate[] funcs = unitSpawn.GetInvocationList();
int[] TIntArray = new int[funcs.Length];
for (int i = 0; i < funcs.Length; ++i)
{
TIntArray[i] = (int) funcs[i].DynamicInvoke(_unit);
}
return TIntArray;
}
}
public class Planet {
public Planet()
{
DelagetsAndEvents.unitSpawn += UnitSpawn;
}
int UnitSpawn(string _unit)
{
Debug.Log("yo");
return 1;
}
}
public class SolarSystem{
public SolarSystem()
{
DelagetsAndEvents.unitSpawn += UnitSpawn;
}
int UnitSpawn(string _unit)
{
Debug.Log("bla");
return 2;
}
}
and:
using UnityEngine;
using System.Collections;
using System.Collections;
public class TestDelagets : MonoBehaviour {
void Start () {
SolarSystem s = new SolarSystem();
Planet p = new Planet();
string g = "";
int[] i = DelagetsAndEvents.UnitSpawn(g);
foreach(int f in i)
{
Debug.Log(f);
}
}
}
I want to implement following data-type
public class MyType
{
void Set(int i);
void AddHandler(int i, Action action);
}
Semantics are as follows.
Both methods must be concurrency safe.
Maximum value of 'i' is known and is relatively low (~100).
Trying to set i more than once should fail.
Calling set with value i should call all handlers registered for that i.
AddHandler registers new handler for given i. If i is already set, action is immediately called.
For example, consider the following sequence
Set(1)
Set(2)
AddHandler(3, f1)
AddHandler(3, f2)
Set(1) // Fails, 1 is already set
AddHandler(2, g) // g is called as 2 is already set
Set(3) // f1, f2 are called
AddHandler(3, h) // h is called as 3 is now set
Goal is to minimize allocations needed to be done for each method call. Here is code for my attempt to implement it.
public class MyType
{
const int N = 10;
static readonly Action[] s_emptyHandler = new Action[0];
readonly bool[] m_vars = new bool[N];
readonly List<Action>[] m_handlers = new List<Action>[N];
public void Set(int i)
{
Action[] handlers;
lock (this)
{
if (m_vars[i]) throw new InvalidOperationException();
m_vars[i] = true;
handlers = m_handlers[i] != null ? m_handlers[i].ToArray() : s_emptyHandler;
}
foreach (var action in handlers)
action();
}
public void AddHandler(int i, Action action)
{
var done = false;
lock (this)
{
if (m_vars[i])
done = true;
else
{
if(m_handlers[i] == null)
m_handlers[i] = new List<Action>();
m_handlers[i].Add(action);
}
}
if (done)
action();
}
}
Taking array snapshot at every Set method is ineffective. From the other side, since you need additional synchronization, using BlockingCollection doesn't make sense. For your case, some immutable collection would fit better.
There are even simple method taking advantage of the fact that you are only adding handlers. We can use an explicit array with count field pair instead of a list class, so all we need to do inside the Set method is to take array reference and count value inside the protected block. Then we can safely iterate the array up to count and invoke the handlers. Here is a code using the approach described:
public class MyType
{
struct Entry
{
public bool IsSet;
public int HandlerCount;
public Action[] HandlerList;
public void Add(Action handler)
{
if (HandlerList == null) HandlerList = new Action[4];
else if (HandlerList.Length == HandlerCount) Array.Resize(ref HandlerList, 2 * HandlerCount);
HandlerList[HandlerCount++] = handler;
}
}
const int N = 10;
readonly Entry[] entries = new Entry[N];
readonly object syncLock = new object();
public void Set(int index)
{
int handlerCount;
Action[] handlerList;
lock (syncLock)
{
if (entries[index].IsSet) throw new InvalidOperationException();
entries[index].IsSet = true;
handlerCount = entries[index].HandlerCount;
handlerList = entries[index].HandlerList;
}
for (int i = 0; i < handlerCount; i++)
handlerList[i]();
}
public void AddHandler(int index, Action handler)
{
if (handler == null) throw new ArgumentException("handler");
lock (syncLock)
{
entries[index].Add(handler);
if (!entries[index].IsSet) return;
}
handler();
}
}
public class MyType
{
private HashSet<int> set = new HashSet<int>();
private Dictionary<int, BlockingCollection<Action>> actions = new Dictionary<int, BlockingCollection<Action>>();
private void ExecuteActions(BlockingCollection<Action> toExecute)
{
Task.Run(() =>
{
while (!toExecute.IsCompleted)
{
try
{
Action action = toExecute.Take();
action();
}
catch { }
}
});
}
public void Set(int i)
{
lock (this)
{
if (!set.Contains(i))
{
set.Add(i);
BlockingCollection<Action> toExecute;
if (!actions.TryGetValue(i, out toExecute))
{
actions[i] = toExecute = new BlockingCollection<Action>();
}
ExecuteActions(toExecute);
}
}
}
public void AddHandler(int i, Action action)
{
lock (this)
{
BlockingCollection<Action> toExecute;
if (!actions.TryGetValue(i, out toExecute))
{
actions[i] = toExecute = new BlockingCollection<Action>();
}
toExecute.Add(action);
}
}
}
The thing is I've been using the lock statement to protect a critical part of my code, but now, I realize I could allow concurrent execution of that critical code is some conditions are met.
Is there a way to condition the lock?
bool locked = false;
if (condition) {
Monitor.Enter(lockObject);
locked = true;
}
try {
// possibly critical section
}
finally {
if (locked) Monitor.Exit(lockObject);
}
EDIT: yes, there is a race condition unless you can assure that the condition is constant while threads are entering.
I'm no threading expert, but it sounds like you might be looking for something like this (double-checked locking). The idea is to check the condition both before and after acquiring the lock.
private static object lockHolder = new object();
if (ActionIsValid()) {
lock(lockHolder) {
if (ActionIsValid()) {
DoSomething();
}
}
}
Action doThatThing = someMethod;
if (condition)
{
lock(thatThing)
{
doThatThing();
}
}
else
{
doThatThing();
}
Actually, to avoid a race condition, I'd be tempted to use a ReaderWriterLockSlim here - treat concurrent access as a read lock, and exclusive access as a write lock. That way, if the conditions change you won't end up with some inappropriate code still executing blindly in the region (under the false assumption that it is safe); a bit verbose, but
(formatted for space):
if (someCondition) {
lockObj.EnterReadLock();
try { Foo(); }
finally { lockObj.ExitReadLock(); }
} else {
lockObj.EnterWriteLock();
try { Foo(); }
finally { lockObj.ExitWriteLock(); }
}
If you have many methods/properties that require conditional locking, you don't want to repeat the same pattern over and over again. I propose the following trick:
Non-repetitive conditional-lock pattern
With a private helper struct implementing IDisposable we can encapsulate the condition/lock without measurable overhead.
public void DoStuff()
{
using (ConditionalLock())
{
// Thread-safe code
}
}
It's quite easy to implement. Here's a sample class demonstrating this pattern:
public class Counter
{
private static readonly int MAX_COUNT = 100;
private readonly bool synchronized;
private int count;
private readonly object lockObject = new object();
private int lockCount;
public Counter(bool synchronized)
{
this.synchronized = synchronized;
}
public int Count
{
get
{
using (ConditionalLock())
{
return count;
}
}
}
public int LockCount
{
get
{
using (ConditionalLock())
{
return lockCount;
}
}
}
public void Increase()
{
using (ConditionalLock())
{
if (count < MAX_COUNT)
{
Thread.Sleep(10);
++count;
}
}
}
private LockHelper ConditionalLock() => new LockHelper(this);
// This is where the magic happens!
private readonly struct LockHelper : IDisposable
{
private readonly Counter counter;
private readonly bool lockTaken;
public LockHelper(Counter counter)
{
this.counter = counter;
lockTaken = false;
if (counter.synchronized)
{
Monitor.Enter(counter.lockObject, ref lockTaken);
counter.lockCount++;
}
}
private void Exit()
{
if (lockTaken)
{
Monitor.Exit(counter.lockObject);
}
}
void IDisposable.Dispose() => Exit();
}
}
Now, let's create a small sample program demonstrating its correctness.
class Program
{
static void Main(string[] args)
{
var onlyOnThisThread = new Counter(synchronized: false);
IncreaseToMax(c1);
var onManyThreads = new Counter(synchronized: true);
var t1 = Task.Factory.StartNew(() => IncreaseToMax(c2));
var t2 = Task.Factory.StartNew(() => IncreaseToMax(c2));
var t3 = Task.Factory.StartNew(() => IncreaseToMax(c2));
Task.WaitAll(t1, t2, t3);
Console.WriteLine($"Counter(false) => Count = {c1.Count}, LockCount = {c1.LockCount}");
Console.WriteLine($"Counter(true) => Count = {c2.Count}, LockCount = {c2.LockCount}");
}
private static void IncreaseToMax(Counter counter)
{
for (int i = 0; i < 1000; i++)
{
counter.Increase();
}
}
}
Output:
Counter(false) => Count = 100, LockCount = 0
Counter(true) => Count = 100, LockCount = 3002
Now you can let the caller decide whether locking (costly) is needed.
I'm guessing you've got some code that looks a little like this:
private Monkey GetScaryMonkey(int numberOfHeads){
Monkey ape = null;
lock(this) {
ape = new Monkey();
ape.AddHeads(numberOfHeads);
}
return ape;
}
To make this conditional couldn't you just do this:
private Monkey GetScaryMonkey(int numberOfHeads){
if ( numberOfHeads > 1 ) {
lock(this) {
return CreateNewMonkey( numberOfHeads );
}
}
return CreateNewMonkey( numberOfHeads );
}
Should work, no?
Use Double-checked locking pattern, as suggested above. that's the trick IMO :)
make sure you have your lock object as a static, as listed in not.that.dave.foley.myopenid.com's example.