I have got an embedded debian board with mono running an .NET 4.0 application with a fixed number of threads (no actions, no tasks). Because of memory issues I used CLR-Profiler in Windows to analyse memory heap.
Following diagram shows now, that IThreadPoolWorkItems are not (at least not in generation 0) collected:
Now, I really dont have any idea where this objects are possibly used and why they arent collected.
Where could the issue be for this behaviour and where would the IThreadPoolWorkItem being used?
What can I do to find out where they are being used (I couldnt find them through searching the code or looking in CLR-Profiler yet).
Edit
...
private Dictionary<byte, Telegram> _incoming = new Dictionary<byte, Telegram>();
private Queue<byte> _serialDataQueue;
private byte[] _receiveBuffer = new byte[2048];
private Dictionary<Telegram, Telegram> _resultQueue = new Dictionary<Telegram, Telegram>();
private static Telegram _currentTelegram;
ManualResetEvent _manualReset = new ManualResetEvent(false);
...
// Called from other thread (class) to send new telegrams
public bool Send(Dictionary<byte, Telegram> telegrams, out IDictionary<Telegram, Telegram> received)
{
try
{
_manualReset.Reset();
_incoming.Clear(); // clear all prev sending telegrams
_resultQueue.Clear(); // clear the receive queue
using (token = new CancellationTokenSource())
{
foreach (KeyValuePair<byte, Telegram> pair in telegrams)
{
_incoming.Add(pair.Key, pair.Value);
}
int result = WaitHandle.WaitAny(new[] { token.Token.WaitHandle, _manualReset });
received = _resultQueue.Clone<Telegram, Telegram>();
_resultQueue.Clear();
return result == 1;
}
}
catch (Exception err)
{
...
return false;
}
}
// Communication-Thread
public void Run()
{
while(true)
{
...
GetNextTelegram(); // _currentTelegram is set there and _incoming Queue is dequeued
byte[] telegramArray = GenerateTelegram(_currentTelegram, ... );
bool telegramReceived = SendReceiveTelegram(3000, telegramArray);
...
}
}
// Helper method to send and receive telegrams
private bool SendReceiveTelegram(int timeOut, byte[] telegram)
{
// send telegram
try
{
// check if serial port is open
if (_serialPort != null && !_serialPort.IsOpen)
{
_serialPort.Open();
}
Thread.Sleep(10);
_serialPort.Write(telegram, 0, telegram.Length);
}
catch (Exception err)
{
log.ErrorFormat(err.Message, err);
return false;
}
// receive telegram
int offset = 0, bytesRead;
_serialPort.ReadTimeout = timeOut;
int bytesExpected = GetExpectedBytes(_currentTelegram);
if (bytesExpected == -1)
return false;
try
{
while (bytesExpected > 0 &&
(bytesRead = _serialPort.Read(_receiveBuffer, offset, bytesExpected)) > 0)
{
offset += bytesRead;
bytesExpected -= bytesRead;
}
for (int index = 0; index < offset; index++)
_serialDataQueue.Enqueue(_receiveBuffer[index]);
List<byte> resultList;
// looks if telegram is valid and removes bytes from _serialDataQueue
bool isValid = IsValid(_serialDataQueue, out resultList, currentTelegram);
if (isValid && resultList != null)
{
// only add to queue if its really needed!!
byte[] receiveArray = resultList.ToArray();
_resultQueue.Add((Telegram)currentTelegram.Clone(), respTelegram);
}
if (!isValid)
{
Clear();
}
return isValid;
}
catch (TimeOutException err) // Timeout exception
{
log.ErrorFormat(err.Message, err);
Clear();
return false;
} catch (Exception err)
{
log.ErrorFormat(err.Message, err);
Clear();
return false;
}
}
Thx for you help!
I found out, like spender mentioned already, the "issue" is the communication over SerialPort. I found an interesting topic here:
SerialPort has a background thread that's waiting for events (via WaitCommEvent). Whenever an event arrives, it queues a threadpool work
item that may result in a call to your event handler. Let's focus on
one of these threadpool threads. It tries to take a lock (quick
reason: this exists to synchronize event raising with closing; for
more details see the end) and once it gets the lock it checks whether
the number of bytes available to read is above the threshold. If so,
it calls your handler.
So this lock is the reason your handler won't be called in separate
threadpool threads at the same time.
Thats most certainly the reason why they arent collected immediatly. I also tried not using the blocking Read in my SendReceiveTelegram method, but using SerialDataReceivedEventHandler instead led to the same result.
So for me, I will leave things now as they are, unless you bring me a better solution, where these ThreadPoolWorkitems arent kept that long in the Queue anymore.
Thx for your help and also your negative assessment :-D
Related
User specifies filename and block size. Original file splits into blocks with users block size (except last block). For each block calculates hash-function SHA256 and writes to the console.
This is program with 2 threads: first thread reading the original file and put into queue byte array of block; second thread removes byte array of block from queue and calculate hash.
After first iteration memory doesn't dispose until the program complete.
On next iterations memory allocates and disposes normally.
So, during next reading of part array I get OutOfMemoryException.
How can I manage memory correctly to avoid memory leak?
class Encryption
{
static FileInfo originalFile;
static long partSize = 0;
static long lastPartSize = 0;
static long numParts = 0;
static int lastPartNumber = 0;
static string[] hash;
static Queue<byte[]> partQueue = new Queue<byte[]>();
public Encryption(string _filename, long _partSize)
{
try
{
originalFile = new FileInfo(#_filename);
partSize = _partSize;
numParts = originalFile.Length / partSize;
lastPartSize = originalFile.Length % partSize;
if (lastPartSize != 0)
{
numParts++;
}
else if (lastPartSize == 0)
{
lastPartSize = partSize;
}
lastPartNumber = (int)numParts - 1;
hash = new string[numParts];
}
catch (FileNotFoundException fe)
{
Console.WriteLine("Error: {0}\nStackTrace: {1}", fe.Message, fe.StackTrace);
return;
}
catch (Exception e)
{
Console.WriteLine("Error: {0}\nStackTrace: {1}", fe.Message, fe.StackTrace);
}
}
private void readFromFile()
{
try
{
using (FileStream fs = new FileStream(originalFile.FullName, FileMode.Open, FileAccess.Read))
{
for (int i = 0; i < numParts; i++)
{
long len = 0;
if (i == lastPartNumber)
{
len = lastPartSize;
}
else
{
len = partSize;
}
byte[] part = new byte[len];
fs.Read(part, 0, (int)len);
partQueue.Enqueue(part);
part = null;
}
}
}
catch(Exception e)
{
Console.WriteLine("Error: {0}\nStackTrace: {1}", fe.Message, fe.StackTrace);
}
}
private static void hashToArray()
{
try
{
SHA256Managed sha256HashString = new SHA256Managed();
int numPart = 0;
while (numPart < numParts)
{
long len = 0;
if (numPart == lastPartNumber)
{
len = lastPartSize;
}
else
{
len = partSize;
}
hash[numPart] = sha256HashString.ComputeHash(partQueue.Dequeue()).ToString();
numPart++;
}
}
catch (Exception e)
{
Console.WriteLine("Error: {0}\nStackTrace: {1}", fe.Message, fe.StackTrace);
}
}
private void hashWrite()
{
try
{
Console.WriteLine("\nResult:\n");
for (int i = 0; i < numParts; i++)
{
Console.WriteLine("{0} : {1}", i, hash[i]);
}
}
catch(Exception e)
{
Console.WriteLine("Error: {0}\nStackTrace: {1}", fe.Message, fe.StackTrace);
}
}
public void threadsControl()
{
try
{
Thread readingThread = new Thread(readFromFile);
Thread calculateThread = new Thread(hashToArray);
readingThread.Start();
calculateThread.Start();
readingThread.Join();
calculateThread.Join();
hashWrite();
}
catch (Exception e)
{
Console.WriteLine("Error: {0}\nStackTrace: {1}", fe.Message, fe.StackTrace);
}
}
}
You should read some books about .NET internals before you writing such code. Your understanding of .NET memory model is completely wrong, this is why you getting such error. OutOfMemoryException occurs very rarely, if you care about your resourses, especially if you are dealing with arrays.
You should know that in .NET runtime there are two heaps for reference objects, basic one, and Large Objects Heap, and the most important difference between them is that LOH doesn't being compacted even after garbage collection.
You should know that all the arrays, even small ones, are going to the LOH, and the memory is being consumed very quickly. Also you should know that this line:
part = null;
doesn't dispose memory immidiately. Even worse, this line doesn't do anything at all, because you still have a reference to the part of the file you've read in the queue. This is why your memory goes out. You can try to fix this by calling the GC after each hash computing, but this is highly not recommended solution.
You should rewrite your algorithm (which is very simple case of the Producer/Consumer pattern) without storing whole file contents in memory simultaneously. This is quite easy - simply move out your part variable to the static field, and read the next file part into it. Introduce the EventWaitHandle (or one of it's child classes) in your code instead of queue, and simply compute the next hash right after you've read the next part of file.
I recommend you to start from the basics in threading in C# by reading the great series of Joe Albahari, and only after that try to implement such solutions. Good luck with your projects.
I'm using a function to add some values in an Dynamic Array (I know that I could use a list but it's a requirement that I must use an Array).
Right now everything is working but I need to know when a thread fails adding a value (because it's locked and to save that time) and when it adds it (I think when it adds, I already have it as you can see in the function Add.
Insert Data:
private void button6_Click(object sender, EventArgs e)
{
showMessage(numericUpDown5.Value.ToString());
showMessage(numericUpDown6.Value.ToString());
for (int i = 0; i < int.Parse(numericUpDown6.Value.ToString()); i++)
{
ThreadStart start = new ThreadStart(insertDataSecure);
new Thread(start).Start();
}
}
private void insertDataSecure()
{
for (int i = 0; i < int.Parse(numericUpDown5.Value.ToString()); i++)
sArray.addSecure(i);
MessageBox.Show(String.Format("Finished data inserted, you can check the result in: {0}", Path.Combine(
Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location),
"times.txt")), "Result", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
Function to Add:
private object padLock = new object();
public void addSecure(int value)
{
Stopwatch sw = Stopwatch.StartNew();
string values = "";
lock (padLock)
{
try
{
if (array == null)
{
this.size = 1;
Resize(this.size);
array[0] = value;
count++;
}
else
{
count++;
if (size == count)
{
size *= 2;
Resize(size);
}
array[count - 1] = value;
}
}
catch
{
throw new System.ArgumentException("It was impossible to insert, try again later.", "insert");
}
values=String.Format("Element {0}, Time taken: {1}ms", value.ToString(), sw.Elapsed.TotalMilliseconds);
sw.Stop();
saveFile(values);
}
Sorry for asking this question but I have read different articles and this is the last one that I tried to use: http://msdn.microsoft.com/en-us/library/4tssbxcw.aspx but when I tried to implement in my code finally crashed in an strange error.
I'm afraid I might not completely understand the question. It sounds like you want to know how long it takes between the time the thread starts and when it actually acquires the lock. But in that case, the thread does not actually fail to add a value; it is simply delayed some period of time.
On the other hand, you do have an exception handler, so presumably there's some scenario you expect where the Resize() method can throw an exception (but you should catch only those exceptions you expect and know you can handle…a bare catch clause is not a good idea, though the harm is mitigated somewhat by the fact that you do throw some exception the exception handler). So I can't help but wonder if that is the failure you're talking about.
That said, assuming the former interpretation is correct – that you want to time how long it takes to acquire the lock – then the following change to your code should do that:
public void addSecure(int value)
{
Stopwatch sw = Stopwatch.StartNew();
string values = "";
lock (padLock)
{
// Save the current timer value here
TimeSpan elapsedToAcquireLock = sw.Elapsed;
try
{
if (array == null)
{
this.size = 1;
Resize(this.size);
array[0] = value;
count++;
}
else
{
count++;
if (size == count)
{
size *= 2;
Resize(size);
}
array[count - 1] = value;
}
}
catch
{
throw new System.ArgumentException("It was impossible to insert, try again later.", "insert");
}
sw.Stop();
values = string.Format(
"Element {0}, Time taken: for lock acquire: {1}ms, for append operation: {2}ms",
value.ToString(),
elapsedToAcquireLock.TotalMilliseconds,
sw.Elapsed.TotalMilliseconds - elapsedToAcquireLock.TotalMilliseconds);
saveFile(values);
}
}
That will display the individual times for the sections of code: acquiring the lock, and then actually adding the value to the array (i.e. the latter not including the time taken to acquire the lock).
If that's not actually what you are trying to do, please edit your question so that it is more clear.
I recently started on a new job and there is a windows service here that consumes messages from a private windows queue. This service consumes the messages only from 9am to 6pm. So, during 7pm to 8:59am it accumulates a lot of messages on the queue. When it starts processing at 9pm, the cpu usage of the service goes to high(98, 99 percent), screwing with the server's performance.
This service use threads to process the messages of the queue, but as I had never worked with threads before I am a little lost.
Here's the part of code that I am sure this is happening:
private Thread[] th;
//in the constructor of the class, the variable th is initialized like this:
this.th = new Thread[4];
//the interval of this method calling is 1sec, it only goes high cpu usage when there is a lot of messages in the queue
public void Exec()
{
try
{
AutoResetEvent autoEvent = new AutoResetEvent(false);
int vQtd = queue.GetAllMessages().Length;
while (vQtd > 0)
{
for (int y = 0; y < th.Length; y++)
{
if (this.th[y] == null || !this.th[y].IsAlive)
{
this.th[y] = new Thread(new ParameterizedThreadStart(ProcessMessage));
this.th[y].Name = string.Format("Thread_{0}", y);
this.th[y].Start(new Controller(queue.Receive(), autoEvent));
vQtd--;
}
}
}
}
catch (Exception ex)
{
ExceptionPolicy.HandleException(ex, "RECOVERABLE");
}
}
EDIT: I am trying the second approach posted by Brian Gideon. But I'll by honest: I'm deeply confused with the code and I don't have a clue about what it's doing.
I haven't changed the way the 4 threads are created and the other code I showed, just changed my Exec(exec is the method called every second when it's 9am to 6pm) method to this:
public void Exec()
{
try
{
AutoResetEvent autoEvent = new AutoResetEvent(false);
int vQtd = queue.GetAllMessages().Length;
while (vQtd > 0)
{
for (int i = 0; i < 4; i++)
{
var thread = new Thread(
(ProcessMessage) =>
{
while (true)
{
Message message = queue.Receive();
Controller controller = new Controller(message, autoEvent);
//what am I supposed to do with the controller?
}
});
thread.IsBackground = true;
thread.Start();
}
vQtd--;
}
}
catch (Exception ex)
{
ExceptionPolicy.HandleException(ex, "RECOVERABLE");
}
}
Ouch. I have to be honest. That is not a very good design. It could very well be spinning around that while loop waiting for previous threads to finish processing. Here is a much better way of doing it. Notice that the 4 threads are only created once and hang around forever. The code below uses the BlockingCollection from the .NET 4.0 BCL. If you are using an earlier version you can replace it with Stephen Toub's BlockingQueue.
Note: Further refactoring may be warranted in your case. This code tries to preserve some common elements from the original.
public class Example
{
private BlockingCollection<Controller> m_Queue = new BlockingCollection<Controller>();
public Example()
{
for (int i = 0; i < 4; i++)
{
var thread = new Thread(
() =>
{
while (true)
{
Controller controller = m_Queue.Take();
// Do whatever you need to with Contoller here.
}
});
thread.IsBackground = true;
thread.Start();
}
}
public void Exec()
{
try
{
AutoResetEvent autoEvent = new AutoResetEvent(false);
int vQtd = Queue.GetAllMessages().Length
while (vQtd > 0)
{
m_Queue.Add(new Controller(Queue.Receive(), autoEvent));
}
}
catch (Exception ex)
{
ExceptionPolicy.HandleException(ex, "RECOVERABLE");
}
}
}
Edit:
Or better yet since MessageQueue is thread-safe:
public class Example
{
public Example()
{
for (int i = 0; i < 4; i++)
{
var thread = new Thread(
() =>
{
while (true)
{
if (/* between 9am and 6pm */)
{
Message message = queue.Receive();
Controller controller = new Controller(message, /* AutoResetEvent? */);
// Do whatever you need to with Contoller here.
// Is the AutoResetEvent really needed?
}
}
});
thread.IsBackground = true;
thread.Start();
}
}
}
The method you show runs in a tight loop when all threads are busy. Try something like this:
while (vQtd > 0)
{
bool full = true;
for (int y = 0; y < th.Length; y++)
{
if (this.th[y] == null || !this.th[y].IsAlive)
{
this.th[y] = new Thread(new ParameterizedThreadStart(ProcessMessage));
this.th[y].Name = string.Format("Thread_{0}", y);
this.th[y].Start(new Controller(queue.Receive(), autoEvent));
vQtd--;
full = false;
}
}
if (full)
{
Thread.Sleep(500); // Or whatever it may take for a thread to become free.
}
}
You have two options. Either you insert delays after each message with Thread.Sleep() or lower the thread priority of the polling threads. If you lower the thread priority the CPU usage will still be high, but should not affect performance that much.
Edit: or you can lower the number of threads from 4 to 3 to leave one core for other processing (assuming you have a quad core). This of course reduces your dequeuing throughput.
Edit2: or you could rewrite the whole think with task parallel library if you are running .NET 4. Look for Parallel.ForEach(). That should save you from some of the footwork if you are not familiar with threads.
Question: I want to search the subnet for all computers in it.
So I send a ping to all IP addresses in the subnet.
The problem is it works fine if I only scan 192.168.0.".
But if I scan 192.168..*", then I get an "Out of memory" exception.
Why ? Do I have to limit the threads, or is the problem the memory consumed by new ping which doesn't get destructed once finished, or do I need to call gc.collect() ?
static void Main(string[] args)
{
string strFromIP = "192.168.0.1";
string strToIP = "192.168.255.255";
Oyster.Math.IntX omiFromIP = 0;
Oyster.Math.IntX omiToIP = 0;
IsValidIP(strFromIP, ref omiFromIP);
IsValidIP(strToIP, ref omiToIP);
for (Oyster.Math.IntX omiThisIP = omiFromIP; omiThisIP <= omiToIP; ++omiThisIP)
{
Console.WriteLine(IPn2IPv4(omiThisIP));
System.Net.IPAddress sniIPaddress = System.Net.IPAddress.Parse(IPn2IPv4(omiThisIP));
SendPingAsync(sniIPaddress);
}
Console.WriteLine(" --- Press any key to continue --- ");
Console.ReadKey();
} // Main
// http://pberblog.com/post/2009/07/21/Multithreaded-ping-sweeping-in-VBnet.aspx
// http://www.cyberciti.biz/faq/how-can-ipv6-address-used-with-webbrowser/#comments
// http://www.kloth.net/services/iplocate.php
// http://bytes.com/topic/php/answers/829679-convert-ipv4-ipv6
// http://stackoverflow.com/questions/1434342/ping-class-sendasync-help
public static void SendPingAsync(System.Net.IPAddress sniIPaddress)
{
int iTimeout = 5000;
System.Net.NetworkInformation.Ping myPing = new System.Net.NetworkInformation.Ping();
System.Net.NetworkInformation.PingOptions parmPing = new System.Net.NetworkInformation.PingOptions();
System.Threading.AutoResetEvent waiter = new System.Threading.AutoResetEvent(false);
myPing.PingCompleted += new System.Net.NetworkInformation.PingCompletedEventHandler(AsyncPingCompleted);
string data = "ABC";
byte[] dataBuffer = Encoding.ASCII.GetBytes(data);
parmPing.DontFragment = true;
parmPing.Ttl = 32;
myPing.SendAsync(sniIPaddress, iTimeout, dataBuffer, parmPing, waiter);
//waiter.WaitOne();
}
private static void AsyncPingCompleted(Object sender, System.Net.NetworkInformation.PingCompletedEventArgs e)
{
System.Net.NetworkInformation.PingReply reply = e.Reply;
((System.Threading.AutoResetEvent)e.UserState).Set();
if (reply.Status == System.Net.NetworkInformation.IPStatus.Success)
{
Console.WriteLine("Address: {0}", reply.Address.ToString());
Console.WriteLine("Roundtrip time: {0}", reply.RoundtripTime);
}
}
According to this thread, System.Net.NetworkInformation.Ping seems to allocate one thread per async request, and "ping-sweeping a class-B network creates 100's of threads and eventually results in an out-of-memory error."
The workaround that person used was to write their own implementation using raw sockets. You don't have to do that in F#, of course, but there are a number of advantages in doing so.
First: Only start like 1000 pings the first time (in the loop in Main)
Second: Move the following parameters to Program class (member variables)
Oyster.Math.IntX omiFromIP = 0;
Oyster.Math.IntX omiToIP = 0;
Oyster.Math.IntX omiCurrentIp = 0;
object syncLock = new object();
Third: In AsyncPingCompleted do something like this in the bottom:
public void AsyncPingCompleted (bla bla bla)
{
//[..other code..]
lock (syncLock)
{
if (omiToIP < omiCurrentIp)
{
++omiCurrentIp;
System.Net.IPAddress sniIPaddress = System.Net.IPAddress.Parse(IPn2IPv4(omiCurrentIp));
SendPingAsync(sniIPaddress);
}
}
}
Update with complete code example
public class Example
{
// Number of pings that can be pending at the same time
private const int InitalRequests = 10000;
// variables from your Main method
private Oyster.Math.IntX _omiFromIP = 0;
private Oyster.Math.IntX _omiToIP = 0;
private Oyster.Math.IntX _omiCurrentIp = 0;
// synchronoize so that two threads
// cannot ping the same IP.
private object _syncLock = new object();
static void Main(string[] args)
{
string strFromIP = "192.168.0.1";
string strToIP = "192.168.255.255";
IsValidIP(strFromIP, ref _omiFromIP);
IsValidIP(strToIP, ref _omiToIP);
for (_omiCurrentIp = _omiFromIP; _omiCurrentIp <= _omiFromIP + InitalRequests; ++_omiCurrentIp)
{
Console.WriteLine(IPn2IPv4(_omiCurrentIp));
System.Net.IPAddress sniIPaddress = System.Net.IPAddress.Parse(IPn2IPv4(_omiCurrentIp));
SendPingAsync(sniIPaddress);
}
Console.WriteLine(" --- Press any key to continue --- ");
Console.ReadKey();
} // Main
// http://pberblog.com/post/2009/07/21/Multithreaded-ping-sweeping-in-VBnet.aspx
// http://www.cyberciti.biz/faq/how-can-ipv6-address-used-with-webbrowser/#comments
// http://www.kloth.net/services/iplocate.php
// http://bytes.com/topic/php/answers/829679-convert-ipv4-ipv6
// http://stackoverflow.com/questions/1434342/ping-class-sendasync-help
public void SendPingAsync(System.Net.IPAddress sniIPaddress)
{
int iTimeout = 5000;
System.Net.NetworkInformation.Ping myPing = new System.Net.NetworkInformation.Ping();
System.Net.NetworkInformation.PingOptions parmPing = new System.Net.NetworkInformation.PingOptions();
System.Threading.AutoResetEvent waiter = new System.Threading.AutoResetEvent(false);
myPing.PingCompleted += new System.Net.NetworkInformation.PingCompletedEventHandler(AsyncPingCompleted);
string data = "ABC";
byte[] dataBuffer = Encoding.ASCII.GetBytes(data);
parmPing.DontFragment = true;
parmPing.Ttl = 32;
myPing.SendAsync(sniIPaddress, iTimeout, dataBuffer, parmPing, waiter);
//waiter.WaitOne();
}
private void AsyncPingCompleted(Object sender, System.Net.NetworkInformation.PingCompletedEventArgs e)
{
System.Net.NetworkInformation.PingReply reply = e.Reply;
((System.Threading.AutoResetEvent)e.UserState).Set();
if (reply.Status == System.Net.NetworkInformation.IPStatus.Success)
{
Console.WriteLine("Address: {0}", reply.Address.ToString());
Console.WriteLine("Roundtrip time: {0}", reply.RoundtripTime);
}
// Keep starting those async pings until all ips have been invoked.
lock (_syncLock)
{
if (_omiToIP < _omiCurrentIp)
{
++_omiCurrentIp;
System.Net.IPAddress sniIPaddress = System.Net.IPAddress.Parse(IPn2IPv4(_omiCurrentIp));
SendPingAsync(sniIPaddress);
}
}
}
}
I guess the problem is that you are spawning roughly 63K ping requests near-simultaneously. Without further memory profiling it is hard to say which parts consume the memory. You are working with network resources, which probably are limited. Throttling the number of active pings will ease the use of local resources, and also network traffic.
Again I would look into the Task Parallel Library, the Parallel.For construct combined with the Task<T> should make it easy for you.
Note: for .Net 3.5 users, there is hope.
I did something similar to this. The way I solved the problem on my project was to cast the ping instance to IDisposable:
(myPing as IDisposable).Dispose()
So get a list of say 254 ping instances running asynchronously (X.X.X.1/254) and keep track of when all of them have reported in. When they have, iterate through your list of ping instances, run the above code on each instance, and then dump the list.
Works like a charm.
pseudo-code
do
if pings_running > 100 then
sleep 100ms.
else
start ping
endif
loop while morepings
Finally... No ping requried at all...
http://www.codeproject.com/KB/cs/c__ip_scanner.aspx
All I needed to do is to make it thread-safe for debugging.
Changing Add to:
void Add( string m )
{
Invoke(new MethodInvoker(
delegate
{
add.Items.Add(m);
}));
//add.Items.Add( m );
}
Private Sub Add(m As String)
Invoke(New MethodInvoker(Function() Do
add.Items.Add(m)
End Function))
'add.Items.Add(m);'
End Sub
I'm facing a dilemma (!).
In a first scenario, I implemented a solution that replicates data from one data base to another using SQLBulkCopy synchronously and I had no problem at all.
Now, using ThreadPool, I implemented the same in a assynchronously scenario, a thread per table, and all works fine, but past some time (usualy 1 hour because the operations of copy takes about the same time), the operations send to the ThreadPool stop being executed. There is one diferent SQLBulkCopy using one diferent SQLConnection per thread.
I already see the number of free threads, and they are all free at the beginning of the invocation. I have one AutoResetEvent to wait that the threads finish their job before launching again, and a Semaphore FIFO that hold the counter of active threads.
Is there some issue that I have forgotten or that I should avaliate when using SqlBulkCopy? I appreciate some help, because my ideas are over;)
->Usage
SemaphoreFIFO waitingThreads = new SemaphoreFIFO();
AutoResetEvent autoResetEvent = new AutoResetEvent(false);
(...)
List<TableMappingHolder> list = BulkCopy.Mapping(tables);
waitingThreads.Put(list.Count, 300000);
for (int i = 0; i < list.Count; i++){
ThreadPool.QueueUserWorkItem(call =>
//Replication
(...)
waitingThreads.Get();
if (waitingThreads.Counter == 0)
autoResetEvent.Set();
);
}
bool finalized = finalized = autoResetEvent.WaitOne(300000);
(...)
//Bulk Copy
public bool SetData(SqlDataReader reader, string _destinationTableName, List<SqlBulkCopyColumnMapping> _sqlBulkCopyColumnMappings)
{
using (SqlConnection destinationConnection =
new SqlConnection(ConfigurationManager.ConnectionStrings["dconn"].ToString()))
{
destinationConnection.Open();
// Set up the bulk copy object.
// Note that the column positions in the source
// data reader match the column positions in
// the destination table so there is no need to
// map columns.
using (SqlBulkCopy bulkCopy =
new SqlBulkCopy(destinationConnection)) {
bulkCopy.BulkCopyTimeout = 300000;
bulkCopy.DestinationTableName = _destinationTableName;
// Set up the column mappings by name.
foreach (SqlBulkCopyColumnMapping columnMapping in _sqlBulkCopyColumnMappings)
bulkCopy.ColumnMappings.Add(columnMapping);
try{
// Write from the source to the destination.
bulkCopy.WriteToServer(reader);
}
catch (Exception ex){return false;}
finally
{
try{reader.Close();}
catch (Exception e){//log}
try{bulkCopy.Close();}
catch (Exception e){//log}
try{destinationConnection.Close(); }
catch (Exception e){ //log }
}
}
}
return true;
}
#
Semaphore
public sealed class SemaphoreFIFO
{
private int _counter;
private readonly LinkedList<int> waitQueue = new LinkedList<int>();
public int Counter
{
get { return _counter; }
}
private void internalNotify()
{
if (waitQueue.Count > 0 && _counter == 0)
{
Monitor.PulseAll(waitQueue);
}
}
public void Get()
{
lock (waitQueue)
{
_counter --;
internalNotify();
}
}
public bool Put(int n, int timeout)
{
if (timeout < 0 && timeout != Timeout.Infinite)
throw new ArgumentOutOfRangeException("timeout");
if (n < 0)
throw new ArgumentOutOfRangeException("n");
lock (waitQueue)
{
if (waitQueue.Count == 0 && _counter ==0)
{
_counter +=n;
internalNotify();
return true;
}
int endTime = Environment.TickCount + timeout;
LinkedListNode<int> me = waitQueue.AddLast(n);
try
{
while (true)
{
Monitor.Wait(waitQueue, timeout);
if (waitQueue.First == me && _counter ==0)
{
_counter += n;
waitQueue.RemoveFirst();
internalNotify();
return true;
}
if (timeout != Timeout.Infinite)
{
int remainingTime = endTime - Environment.TickCount;
if (remainingTime <= 0)
{
// TIMEOUT
if (waitQueue.First == me)
{
waitQueue.RemoveFirst();
internalNotify();
}
else
waitQueue.Remove(me);
return false;
}
timeout = remainingTime;
}
}
}
catch (ThreadInterruptedException e)
{
// INTERRUPT
if (waitQueue.First == me)
{
waitQueue.RemoveFirst();
internalNotify();
}
else
waitQueue.Remove(me);
throw e;
}
}
}
}
I would just go back to using SQLBulkCopy synchronously. I'm not sure what you gain by doing a bunch of bulk copies all at the same time (instead of one after the other). It may complete everything a bit faster, but I'm not even sure of that.