I constantly get the following message in my output/debug windows. The app doesn't crash but I was wondering what the deal with it is:
A first chance exception of type 'System.InvalidOperationException' occurred in System.dll
my code :sol.cs
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace ConsoleApplication1
{
class Sol
{
public LinkedList<int> tower1 = new LinkedList<int>();
public LinkedList<int> tower2 = new LinkedList<int>();
public LinkedList<int> tower3 = new LinkedList<int>();
public static LinkedList<string> BFS = new LinkedList<string>();
public static LinkedList<string> DFS = new LinkedList<string>();
public static LinkedList<string> IDS = new LinkedList<string>();
public int depth;
public LinkedList<Sol> neighbors;
public Sol()
{
}
public Sol(LinkedList<int> tower1, LinkedList<int> tower2, LinkedList<int> tower3)
{
this.tower1 = tower1;
this.tower2 = tower2;
this.tower3 = tower3;
neighbors = new LinkedList<Sol>();
}
public virtual void getneighbors()
{
Sol temp = this.copy();
Sol neighbor1 = this.copy();
Sol neighbor2 = this.copy();
Sol neighbor3 = this.copy();
Sol neighbor4 = this.copy();
Sol neighbor5 = this.copy();
Sol neighbor6 = this.copy();
if (temp.tower1.Count != 0)
{
if (neighbor1.tower2.Count != 0)
{
if (neighbor1.tower1.First.Value < neighbor1.tower2.First.Value)
{
neighbor1.tower2.AddFirst(neighbor1.tower1.First);
neighbor1.tower1.RemoveFirst();
neighbors.AddLast(neighbor1);
}
}
else
{
neighbor1.tower2.AddFirst(neighbor1.tower1.First);
neighbor1.tower1.RemoveFirst();
neighbors.AddLast(neighbor1);
}
if (neighbor2.tower3.Count != 0)
{
if (neighbor2.tower1.First.Value < neighbor2.tower3.First.Value)
{
neighbor2.tower3.AddFirst(neighbor2.tower1.First);
neighbor2.tower1.RemoveFirst();
neighbors.AddLast(neighbor2);
}
}
else
{
neighbor2.tower3.AddFirst(neighbor2.tower1.First);
neighbor2.tower1.RemoveFirst();
neighbors.AddLast(neighbor2);
}
}
//-------------
if (temp.tower2.Count != 0)
{
if (neighbor3.tower1.Count != 0)
{
if (neighbor3.tower2.First.Value < neighbor3.tower1.First.Value)
{
neighbor3.tower1.AddFirst(neighbor3.tower2.First);
neighbor3.tower2.RemoveFirst();
neighbors.AddLast(neighbor3);
}
}
else
{
neighbor3.tower1.AddFirst(neighbor3.tower2.First);
neighbor3.tower2.RemoveFirst();
neighbors.AddLast(neighbor3);
}
if (neighbor4.tower3.Count != 0)
{
if (neighbor4.tower2.First.Value < neighbor4.tower3.First.Value)
{
neighbor4.tower3.AddFirst(neighbor4.tower2.First);
neighbor4.tower2.RemoveFirst();
neighbors.AddLast(neighbor4);
}
}
else
{
neighbor4.tower3.AddFirst(neighbor4.tower2.First);
neighbor4.tower2.RemoveFirst();
neighbors.AddLast(neighbor4);
}
}
//------------------------
if (temp.tower3.Count() != 0)
{
if (neighbor5.tower1.Count() != 0)
{
if (neighbor5.tower3.ElementAtOrDefault(0) < neighbor5.tower1.ElementAtOrDefault(0))
{
neighbor5.tower1.AddFirst(neighbor5.tower3.First);
neighbor5.tower3.RemoveFirst();
neighbors.AddLast(neighbor5);
}
}
else
{
neighbor5.tower1.AddFirst(neighbor5.tower3.First);
neighbor5.tower3.RemoveFirst();
neighbors.AddLast(neighbor5);
}
if (neighbor6.tower2.Count() != 0)
{
if (neighbor6.tower3.ElementAtOrDefault(0) < neighbor6.tower2.ElementAtOrDefault(0))
{
neighbor6.tower2.AddFirst(neighbor6.tower3.First);
neighbor6.tower3.RemoveFirst();
neighbors.AddLast(neighbor6);
}
}
else
{
neighbor6.tower2.AddFirst(neighbor6.tower3.First);
neighbor6.tower3.RemoveFirst();
neighbors.AddLast(neighbor6);
}
}
}
public override string ToString()
{
string str;
str = "tower1" + tower1.ToString() + " tower2" + tower2.ToString() + " tower3" + tower3.ToString();
return str;
}
public Sol copy()
{
Sol So;
LinkedList<int> l1 = new LinkedList<int>();
LinkedList<int> l2 = new LinkedList<int>();
LinkedList<int> l3 = new LinkedList<int>();
for (int i = 0; i <= this.tower1.Count() - 1; i++)
{
l1.AddLast(tower1.ElementAt(i));
}
for (int i = 0; i <= this.tower2.Count - 1; i++)
{
l2.AddLast(tower2.ElementAt(i));
}
for (int i = 0; i <= this.tower3.Count - 1; i++)
{
l3.AddLast(tower3.ElementAt(i));
}
So = new Sol(l1, l2, l3);
return So;
}
public bool Equals(Sol sol)
{
if (this.tower1.Equals(sol.tower1) & this.tower2.Equals(sol.tower2) & this.tower3.Equals(sol.tower3))
return true;
return false;
}
public virtual bool containedin(Stack<Sol> vec)
{
bool found = false;
for (int i = 0; i <= vec.Count - 1; i++)
{
if (vec.ElementAt(i).tower1.Equals(this.tower1) && vec.ElementAt(i).tower2.Equals(this.tower2) && vec.ElementAt(i).tower3.Equals(this.tower3))
{
found = true;
break;
}
}
return found;
}
public virtual bool breadthFirst(Sol start, Sol goal)
{
Stack<Sol> nextStack = new Stack<Sol>();
Stack<Sol> traversed = new Stack<Sol>();
bool found = false;
start.depth = 0;
nextStack.Push(start);
while (nextStack.Count != 0)
{
Sol sol = nextStack.Pop();
BFS.AddFirst("poped State:" + sol.ToString() + "level " + sol.depth);
traversed.Push(sol);
if (sol.Equals(goal))
{
found = true;
BFS.AddFirst("Goal:" + sol.ToString());
break;
}
else
{
sol.getneighbors();
foreach (Sol neighbor in sol.neighbors)
{
if (!neighbor.containedin(traversed) && !neighbor.containedin(nextStack))
{
neighbor.depth = (sol.depth + 1);
nextStack.Push(neighbor);
}
}
}
}
return found;
}
public virtual bool depthFirst(Sol start, Sol goal)
{
Stack<Sol> nextStack = new Stack<Sol>();
Stack<Sol> traversed = new Stack<Sol>();
bool found = false;
start.depth = 0;
nextStack.Push(start);
while (nextStack.Count != 0)
{
//Dequeue next State for comparison
//And add it 2 list of traversed States
Sol sol = nextStack.Pop();
DFS.AddFirst("poped State:" + sol.ToString() + "level " + sol.depth);
traversed.Push(sol);
if (sol.Equals(goal))
{
found = true;
DFS.AddFirst("Goal:" + sol.ToString());
break;
}
else
{
sol.getneighbors();
foreach (Sol neighbor in sol.neighbors)
{
if (!neighbor.containedin(traversed) && !neighbor.containedin(nextStack))
{
neighbor.depth = sol.depth + 1;
nextStack.Push(neighbor);
}
}
}
}
return found;
}
public virtual bool iterativedeepening(Sol start, Sol goal)
{
bool found = false;
for (int level = 0; ; level++)
{
Stack<Sol> nextStack = new Stack<Sol>();
Stack<Sol> traversed = new Stack<Sol>();
start.depth = 0;
nextStack.Push(start);
while (nextStack.Count != 0)
{
Sol sol = nextStack.Pop();
IDS.AddFirst("poped State:" + sol.ToString() + "Level" + sol.depth);
traversed.Push(sol);
if (sol.Equals(goal))
{
found = true;
IDS.AddFirst("Goal:" + sol.ToString());
break;
}
else if (sol.depth < level)
{
sol.getneighbors();
foreach (Sol neighbor in sol.neighbors)
{
if (!neighbor.containedin(traversed) && !neighbor.containedin(nextStack))
{
neighbor.depth = sol.depth + 1;
nextStack.Push(neighbor);
} //end if
} //end for each
} //end else if
} // end while
if (found == true)
break;
} // end for
return found;
}
}
}
Just wondering if I may be doing something wrong somewhere or something. >>>
First-chance Exceptions
A first-chance exception indicates that something threw an exception - the debugger is reporting it because it gets to see all exceptions first. It then passes control back to the code that threw it - if that code doesn't handle the exception, the debugger will see it again as a second-chance exception and report it to you.
For more information on first- and second-chance exception handling, see this Microsoft support article, KB105675.
Where is it coming from?
Discovering which exact bit of code is doing this will require a little effort. Delete code bit by bit until the first-chance exception goes away - this will identify the code that is resulting in the exception.
As to whether you are doing something wrong, it is unlikely. The fact that your application didn't crash suggests that the code throwing the exception is ultimately handling it as it is some sort of expected situation (for example, a rudimentary way of detecting if some key exists in a dictionary would be to access that key and see if it threw an exception). However, you'll only know for sure once you've identified the source of the exception and investigated as to whether you're using the relevant calls correctly.
Related
I try to modify the IL of the System.String.Concat method through the ICorProfilerCallback interface, and add my own tracking code before the method is executed。
I modified the IL of other methods and added the tracking code without any problems. After the compilation passed, it can be run.
....
if (!isStatic) {
reWriterWrapper.LoadNull();// Ldnull
reWriterWrapper.StLocal(indexMethodTrace); //stloc
reWriterWrapper.LoadNull(); //Ldnull
reWriterWrapper.StLocal(indexEx);
reWriterWrapper.LoadNull();
reWriterWrapper.StLocal(indexRet);
ILInstr* pTryStartInstr = reWriterWrapper.CallMember0(getInstanceMemberRef, false);
reWriterWrapper.Cast(traceAgentTypeRef); //castclass
reWriterWrapper.LoadToken(functionInfo.type.id);//ldtoken
reWriterWrapper.CallMember(moduleMetaInfo->getTypeFromHandleToken, false);
reWriterWrapper.LoadArgument(0); //ldarg 0
auto argNum = functionInfo.signature.NumberOfArguments();
reWriterWrapper.CreateArray(objectTypeRef, argNum);
auto arguments = functionInfo.signature.GetMethodArguments();
for (unsigned i = 0; i < argNum; i++) {
reWriterWrapper.BeginLoadValueIntoArray(i);
reWriterWrapper.LoadArgument(i + 1);//ldarg
auto argTypeFlags = arguments[i].GetTypeFlags(elementType);
if (argTypeFlags & TypeFlagByRef) {
reWriterWrapper.LoadIND(elementType); //ldind 中
}
if (argTypeFlags & TypeFlagBoxedType) {
auto tok = arguments[i].GetTypeTok(pEmit, corLibAssemblyRef); /
if (tok == mdTokenNil) {
return S_OK;
}
reWriterWrapper.Box(tok);
}
reWriterWrapper.EndLoadValueIntoArray(); //stelem_ref
}
reWriterWrapper.LoadInt32((INT32)function_token); //ldc_i4
reWriterWrapper.CallMember(beforeMemberRef, true); // call
reWriterWrapper.Cast(methodTraceTypeRef); //Castclass
reWriterWrapper.StLocal(rewriter.cNewLocals - 1); //STLOC
ILInstr* pRetInstr = pReWriter->NewILInstr();
pRetInstr->m_opcode = CEE_RET;
pReWriter->InsertAfter(pReWriter->GetILList()->m_pPrev, pRetInstr);
bool isVoidMethod = (retTypeFlags & TypeFlagVoid) > 0;
auto ret = functionInfo.signature.GetRet();
bool retIsBoxedType = false;
mdToken retTypeTok;
if (!isVoidMethod) {
retTypeTok = ret.GetTypeTok(pEmit, corLibAssemblyRef);
if (ret.GetTypeFlags(elementType) & TypeFlagBoxedType) {
retIsBoxedType = true;
}
}
reWriterWrapper.SetILPosition(pRetInstr);
reWriterWrapper.StLocal(indexEx); //stloc
ILInstr* pRethrowInstr = reWriterWrapper.Rethrow(); //Rethrow
reWriterWrapper.LoadLocal(indexMethodTrace); //ldloc
ILInstr* pNewInstr = pReWriter->NewILInstr(); //Brfalse
pNewInstr->m_opcode = CEE_BRFALSE_S;
pReWriter->InsertBefore(pRetInstr, pNewInstr);
reWriterWrapper.LoadLocal(indexMethodTrace); //ldloc
reWriterWrapper.LoadLocal(indexRet);//ldloc
reWriterWrapper.LoadLocal(indexEx); //ldloc
reWriterWrapper.CallMember(endMemberRef, true); // call [
ILInstr* pEndFinallyInstr = reWriterWrapper.EndFinally();
pNewInstr->m_pTarget = pEndFinallyInstr;
if (!isVoidMethod) {
reWriterWrapper.LoadLocal(indexRet);//ldloc
if (retIsBoxedType) {
reWriterWrapper.UnboxAny(retTypeTok);
}
else {
reWriterWrapper.Cast(retTypeTok);
}
}
for (ILInstr* pInstr = pReWriter->GetILList()->m_pNext; pInstr != pReWriter->GetILList(); pInstr = pInstr->m_pNext) {
switch (pInstr->m_opcode)
{
case CEE_RET:
{
if (pInstr != pRetInstr) {
if (!isVoidMethod) {
reWriterWrapper.SetILPosition(pInstr);
if (retIsBoxedType) {
reWriterWrapper.Box(retTypeTok);
}
reWriterWrapper.StLocal(indexRet); //ldloc
}
pInstr->m_opcode = CEE_LEAVE_S; //Leave_S
pInstr->m_pTarget = pEndFinallyInstr->m_pNext;
}
break;
}
default:
break;
}
}
EHClause exClause{};
exClause.m_Flags = COR_ILEXCEPTION_CLAUSE_NONE;
exClause.m_pTryBegin = pTryStartInstr;
exClause.m_pTryEnd = pRethrowInstr->m_pPrev;
exClause.m_pHandlerBegin = pRethrowInstr->m_pPrev;
exClause.m_pHandlerEnd = pRethrowInstr;
exClause.m_ClassToken = exTypeRef;
EHClause finallyClause{};
finallyClause.m_Flags = COR_ILEXCEPTION_CLAUSE_FINALLY;
finallyClause.m_pTryBegin = pTryStartInstr;
finallyClause.m_pTryEnd = pRethrowInstr->m_pNext;
finallyClause.m_pHandlerBegin = pRethrowInstr->m_pNext;
finallyClause.m_pHandlerEnd = pEndFinallyInstr;
auto m_pEHNew = new EHClause[rewriter.m_nEH + 2];
for (unsigned i = 0; i < rewriter.m_nEH; i++) {
m_pEHNew[i] = rewriter.m_pEH[i];
}
rewriter.m_nEH += 2;
m_pEHNew[rewriter.m_nEH - 2] = exClause;
m_pEHNew[rewriter.m_nEH - 1] = finallyClause;
rewriter.m_pEH = m_pEHNew;
}
else
{
//static method
reWriterWrapper.LoadNull();// Ldnull
reWriterWrapper.StLocal(indexMethodTrace); //stloc
reWriterWrapper.LoadNull(); //Ldnull
reWriterWrapper.StLocal(indexEx); //stloc
reWriterWrapper.LoadNull();// Ldnull
reWriterWrapper.StLocal(indexRet);
ILInstr* pTryStartInstr = reWriterWrapper.CallMember0(getInstanceMemberRef, false);
reWriterWrapper.Cast(traceAgentTypeRef); //castclass
reWriterWrapper.LoadNull(); //ldstr
reWriterWrapper.LoadNull();
auto argNum = functionInfo.signature.NumberOfArguments();
reWriterWrapper.CreateArray(objectTypeRef, argNum); //newarr
auto arguments = functionInfo.signature.GetMethodArguments();
for (unsigned i = 0; i < argNum; i++) {
reWriterWrapper.BeginLoadValueIntoArray(i);
reWriterWrapper.LoadArgument(i);//ldarg Static index 0
auto argTypeFlags = arguments[i].GetTypeFlags(elementType);
if (argTypeFlags & TypeFlagByRef) {
reWriterWrapper.LoadIND(elementType); //ldind
}
if (argTypeFlags & TypeFlagBoxedType) {
auto tok = arguments[i].GetTypeTok(pEmit, corLibAssemblyRef);
if (tok == mdTokenNil) {
return S_OK;
}
reWriterWrapper.Box(tok);
}
reWriterWrapper.EndLoadValueIntoArray(); //stelem_ref
}
reWriterWrapper.LoadInt32((INT32)function_token); //ldc_i4
reWriterWrapper.CallMember(beforeMemberRef, true); // call
reWriterWrapper.Cast(methodTraceTypeRef); //Castclass
reWriterWrapper.StLocal(rewriter.cNewLocals - 1); //STLOC
ILInstr* pRetInstr = pReWriter->NewILInstr();/
pRetInstr->m_opcode = CEE_RET;
pReWriter->InsertAfter(pReWriter->GetILList()->m_pPrev, pRetInstr);
bool isVoidMethod = (retTypeFlags & TypeFlagVoid) > 0;
auto ret = functionInfo.signature.GetRet();
bool retIsBoxedType = false;
mdToken retTypeTok;
if (!isVoidMethod) {
retTypeTok = ret.GetTypeTok(pEmit, corLibAssemblyRef);
if (ret.GetTypeFlags(elementType) & TypeFlagBoxedType) {
retIsBoxedType = true;
}
}
reWriterWrapper.SetILPosition(pRetInstr);
reWriterWrapper.StLocal(indexEx); //stloc
ILInstr* pRethrowInstr = reWriterWrapper.Rethrow(); //Rethrow
reWriterWrapper.LoadLocal(indexMethodTrace); //ldloc
ILInstr* pNewInstr = pReWriter->NewILInstr(); //Brfalse
pNewInstr->m_opcode = CEE_BRFALSE_S;
pReWriter->InsertBefore(pRetInstr, pNewInstr);
reWriterWrapper.LoadLocal(indexMethodTrace); //ldloc
reWriterWrapper.LoadLocal(indexRet);//ldloc
reWriterWrapper.LoadLocal(indexEx); //ldloc
reWriterWrapper.CallMember(endMemberRef, true); // call
ILInstr* pEndFinallyInstr = reWriterWrapper.EndFinally(); //Endfinally
pNewInstr->m_pTarget = pEndFinallyInstr;
if (!isVoidMethod) {
reWriterWrapper.LoadLocal(indexRet);//ldloc
if (retIsBoxedType) {
reWriterWrapper.UnboxAny(retTypeTok); // Unbox_Any
}
else {
reWriterWrapper.Cast(retTypeTok);
}
}
for (ILInstr* pInstr = pReWriter->GetILList()->m_pNext; pInstr != pReWriter->GetILList(); pInstr = pInstr->m_pNext) {
switch (pInstr->m_opcode)
{
case CEE_RET:
{
if (pInstr != pRetInstr) {
if (!isVoidMethod) {
reWriterWrapper.SetILPosition(pInstr);
if (retIsBoxedType) {
reWriterWrapper.Box(retTypeTok);
}
reWriterWrapper.StLocal(indexRet); //ldloc
}
pInstr->m_opcode = CEE_LEAVE_S; //Leave_S
pInstr->m_pTarget = pEndFinallyInstr->m_pNext;
}
break;
}
default:
break;
}
}
EHClause exClause{};
exClause.m_Flags = COR_ILEXCEPTION_CLAUSE_NONE;
exClause.m_pTryBegin = pTryStartInstr;
exClause.m_pTryEnd = pRethrowInstr->m_pPrev;
exClause.m_pHandlerBegin = pRethrowInstr->m_pPrev;
exClause.m_pHandlerEnd = pRethrowInstr;
exClause.m_ClassToken = exTypeRef;
EHClause finallyClause{};
finallyClause.m_Flags = COR_ILEXCEPTION_CLAUSE_FINALLY;
finallyClause.m_pTryBegin = pTryStartInstr;
finallyClause.m_pTryEnd = pRethrowInstr->m_pNext;
finallyClause.m_pHandlerBegin = pRethrowInstr->m_pNext;
finallyClause.m_pHandlerEnd = pEndFinallyInstr;
auto m_pEHNew = new EHClause[rewriter.m_nEH + 2];
for (unsigned i = 0; i < rewriter.m_nEH; i++) {
m_pEHNew[i] = rewriter.m_pEH[i];
}
rewriter.m_nEH += 2;
m_pEHNew[rewriter.m_nEH - 2] = exClause;
m_pEHNew[rewriter.m_nEH - 1] = finallyClause;
rewriter.m_pEH = m_pEHNew;
}
hr = rewriter.Export();
....
LocalSigCode
HRESULT ModifyLocalSig(CComPtr<IMetaDataImport2>& pImport, CComPtr<IMetaDataEmit2>& pEmit, ILRewriter& reWriter, mdTypeRef exTypeRef,mdTypeRef methodTraceTypeRef)
{
HRESULT hr;
PCCOR_SIGNATURE rgbOrigSig = NULL;
ULONG cbOrigSig = 0;
UNALIGNED INT32 temp = 0;
if (reWriter.m_tkLocalVarSig != mdTokenNil)
{
IfFailRet(pImport->GetSigFromToken(reWriter.m_tkLocalVarSig, &rgbOrigSig, &cbOrigSig));
const auto len = CorSigCompressToken(methodTraceTypeRef, &temp);
if(cbOrigSig - len > 0){
if(rgbOrigSig[cbOrigSig - len -1]== ELEMENT_TYPE_CLASS){
if (memcmp(&rgbOrigSig[cbOrigSig - len], &temp, len) == 0) {
return E_FAIL;
}
}
}
}
auto exTypeRefSize = CorSigCompressToken(exTypeRef, &temp);
auto methodTraceTypeRefSize = CorSigCompressToken(methodTraceTypeRef, &temp);
ULONG cbNewSize = cbOrigSig + 1 + 1 + methodTraceTypeRefSize + 1 + exTypeRefSize;
ULONG cOrigLocals;
ULONG cNewLocalsLen;
ULONG cbOrigLocals = 0;
if (cbOrigSig == 0) {
cbNewSize += 2;
reWriter.cNewLocals = 3;
cNewLocalsLen = CorSigCompressData(reWriter.cNewLocals, &temp);
}
else {
cbOrigLocals = CorSigUncompressData(rgbOrigSig + 1, &cOrigLocals);
reWriter.cNewLocals = cOrigLocals + 3;
cNewLocalsLen = CorSigCompressData(reWriter.cNewLocals, &temp);
cbNewSize += cNewLocalsLen - cbOrigLocals;
}
auto rgbNewSig = new COR_SIGNATURE[cbNewSize];
*rgbNewSig = IMAGE_CEE_CS_CALLCONV_LOCAL_SIG;
ULONG rgbNewSigOffset = 1;
memcpy(rgbNewSig + rgbNewSigOffset, &temp, cNewLocalsLen);
rgbNewSigOffset += cNewLocalsLen;
if (cbOrigSig > 0) {
const auto cbOrigCopyLen = cbOrigSig - 1 - cbOrigLocals;
memcpy(rgbNewSig + rgbNewSigOffset, rgbOrigSig + 1 + cbOrigLocals, cbOrigCopyLen);
rgbNewSigOffset += cbOrigCopyLen;
}
rgbNewSig[rgbNewSigOffset++] = ELEMENT_TYPE_OBJECT;
rgbNewSig[rgbNewSigOffset++] = ELEMENT_TYPE_CLASS;
exTypeRefSize = CorSigCompressToken(exTypeRef, &temp);
memcpy(rgbNewSig + rgbNewSigOffset, &temp, exTypeRefSize);
rgbNewSigOffset += exTypeRefSize;
rgbNewSig[rgbNewSigOffset++] = ELEMENT_TYPE_CLASS;
methodTraceTypeRefSize = CorSigCompressToken(methodTraceTypeRef, &temp);
memcpy(rgbNewSig + rgbNewSigOffset, &temp, methodTraceTypeRefSize);
rgbNewSigOffset += methodTraceTypeRefSize;
IfFailRet(pEmit->GetTokenFromSig(&rgbNewSig[0], cbNewSize, &reWriter.m_tkLocalVarSig));
return S_OK;
}
Modification method template:
//before fixing method
private Task DataRead(string a, int b)
{
return Task.Delay(10);
}
//After modification
private Task DataReadWrapper(string a, int b)
{
object ret = null;
Exception ex = null;
MethodTrace methodTrace = null;
try
{
methodTrace = (MethodTrace)((TraceAgent)TraceAgent.GetInstance())
.BeforeMethod(this.GetType(), this, new object[]{ a, b }, functiontoken);
ret = Task.Delay(10);
goto T;
}
catch (Exception e)
{
ex = e;
throw;
}
finally
{
if (methodTrace != null)
{
methodTrace.EndMethod(ret, ex);
}
}
T:
return (Task)ret;
}
Error:
Unhandled exception.
Cannot print exception string because Exception.ToString() failed.
Application '/LM/W3SVC/1/ROOT' with physical root 'C:\inetpub\wwwroot\DotNetRangeCore\' failed to load coreclr. Exception message: CLR worker thread exited prematurely
There is no problem if I modify the IL through this code in other methods。such as:set_CommandText、System.Random.Next、System.Web.HttpContext.FinishPipelineRequest....
I don’t know why it would be wrong to modify Concat。
I solved it by replacing it. If an error is reported after this IL is modified, I replaced it with a packaging method.
This question already has answers here:
C# compiler error: "not all code paths return a value"
(9 answers)
Closed 3 years ago.
not all code paths return a value error is thrown while executing it.please help us to resolve it at the earliest.
There seems to be some code path that is not returning any value.
can someone please help to fix it up?
There are many for loops in the code. i am not able to figure which one is causing this issue.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
//namespace ConsoleApp7
//{
class Solution
{
static bool CheckElementSymbol(string elementName, string symbol)
{
symbol = symbol.ToLower();
int symbol_length = symbol.Length;
int numberofchars = 0;
int firstletter = 0;
bool firstcharfound = false;
bool secondcharfound = false;
//bool symbolfound = false;
//int symbolpresent = 0;
int secondcharmatch = 0;
if (symbol_length == 2)
{
foreach (char sym in symbol)
{
numberofchars = numberofchars + 1;
var firstcharmatch = new List<int>();
//int index = 0;
int sourcelength = elementName.Length;
if (numberofchars == 1)
{
for (int index = 0; index < sourcelength; index++)
{
int matchfound1stchar = elementName.IndexOf(sym, index, 1);
if (matchfound1stchar != -1)
{
firstletter = 1;
firstcharmatch.Add(matchfound1stchar + 1);
}
}
if (firstletter == 1)
{
firstcharfound = true;
}
else
{
firstcharfound = false;
}
}
//int matchingchar = elementName
if (numberofchars == 2)
{
secondcharmatch = elementName.LastIndexOf(elementName, sym);
//yield return index;
if (secondcharmatch != -1)
{
secondcharfound = true;
secondcharmatch = secondcharmatch + 1;
}
else
{ secondcharfound = false; }
}
//int matchingchar = elementName
if (secondcharfound == true && firstcharfound == true)
{
foreach (int value in firstcharmatch)
{
if (secondcharmatch > value)
{
//symbolfound = true;
//return symbolfound;
return true;
}
return false;
}
}
else
{
return false;
}
}
}
else
{
return false;
}
}
static void Main(string[] args)
{
TextWriter textWriter = new StreamWriter(#System.Environment.GetEnvironmentVariable("OUTPUT_PATH"), true);
string elementName = Console.ReadLine();
string symbol = Console.ReadLine();
bool res = CheckElementSymbol(elementName, symbol);
textWriter.WriteLine((res ? 1 : 0));
textWriter.Flush();
textWriter.Close();
}
}
//}
Found the problem. If the string has no characters then it should return false
if (symbol_length == 2)
{
foreach (char sym in symbol)(...)//this code is irrelevant.
return false; //here is the solution, if there are no characters in the string, then return false .
}
else
{
return false;
}
Next time make your code more easily read and only show the relevant parts.
First of all, please post only relevant and minimal code in your question in order to get quick response.
For your query, you need to understand that the compiler error you are getting is
error CS0161: 'Solution.CheckElementSymbol(string, string)': not all
code paths return a value
This error is because all your return statements are inside either IF or ELSE statements.
The error would get fixed if you add a return statement at the end of CheckElementSymbol method.
return false;
Hope this helps.
I have to make a module in an Insurance application that deals with clearing and settlement (I think this is the correct financial terminology) between insurance companies enroled in the system. Practically, the system must pair all the amounts that companies have to pay to one another, and only the unpaired (remaining) sums to be paid through the bank. For now there are about 30 companies in the system.
All the readings I did about clearing and settlement pointed me towards graphs and graphs theory (which I have studied in the highschool quite a long time ago).
For a system with 4 companies the graph would look like this:
where each company represents a node (N1 ... N4) and each weighted edge represents the amount that a company has to pay to the other. In my code, the nodes are int, representing the id's of the companies.
What I did so far... I created the graph (for test I used the Random generator for the amounts) and made a recursive function to calculate all posible cycles in the graph. Then I made another recursive function that takes all non-zero cycles starting with the longest path with maximum common sum to pair.
The algorithm seems valid in terms of final results, but for graphs bigger than 7-8 nodes it takes too long to complete. The problem is in the recursive function that creates the possible cycles in the graph. Here is my code:
static void Main(string[] args)
{
int nodes = 4;
try
{
nodes = Convert.ToInt32(args[0]);
}
catch { }
DateTime start = DateTime.Now;
Graph g = new Graph(nodes);
int step = 0;
double CompensatedAmount = 0;
double TotalCompensatedAmount = 0;
DateTime endGeneration = DateTime.Now;
Console.WriteLine("Graph generated in: " + (endGeneration - start).TotalSeconds + " seconds.");
Compensare.RunCompensation(false, g, step, CompensatedAmount, TotalCompensatedAmount, out CompensatedAmount, out TotalCompensatedAmount);
DateTime endCompensation = DateTime.Now;
Console.WriteLine("Graph compensated in: " + (endCompensation - endGeneration).TotalSeconds + " seconds.");
}
... and the main class:
public static class Compensare
{
public static void RunCompensation(bool exit, Graph g, int step, double prevCompensatedAmount, double prevTotalCompensatedAmount, out double CompensatedAmount, out double TotalCompensatedAmount)
{
step++;
CompensatedAmount = prevCompensatedAmount;
TotalCompensatedAmount = prevTotalCompensatedAmount;
if (!exit)
{
List<Cycle> orderedList = g.Cycles.OrderByDescending(x => x.CycleCompensatedAmount).ToList();
g.ListCycles(orderedList, "OrderedCycles" + step.ToString() + ".txt");
using (Graph clona = g.Clone())
{
int maxCycleIndex = clona.GetMaxCycleByCompensatedAmount();
double tmpCompensatedAmount = clona.Cycles[maxCycleIndex].CycleMin;
exit = tmpCompensatedAmount <= 0 ? true : false;
CompensatedAmount += tmpCompensatedAmount;
TotalCompensatedAmount += (tmpCompensatedAmount * clona.Cycles[maxCycleIndex].EdgesCount);
clona.CompensateCycle(maxCycleIndex);
clona.UpdateCycles();
Console.WriteLine(String.Format("{0} - edges: {4} - min: {3} - {1} - {2}\r\n", step, CompensatedAmount, TotalCompensatedAmount, tmpCompensatedAmount, clona.Cycles[maxCycleIndex].EdgesCount));
RunCompensation(exit, clona, step, CompensatedAmount, TotalCompensatedAmount, out CompensatedAmount, out TotalCompensatedAmount);
}
}
}
}
public class Edge
{
public int Start { get; set; }
public int End { get; set; }
public double Weight { get; set; }
public double InitialWeight {get;set;}
public Edge() { }
public Edge(int _start, int _end, double _weight)
{
this.Start = _start;
this.End = _end;
this.Weight = _weight;
this.InitialWeight = _weight;
}
}
public class Cycle
{
public List<Edge> Edges = new List<Edge>();
public double CycleWeight = 0;
public double CycleMin = 0;
public double CycleMax = 0;
public double CycleAverage = 0;
public double CycleCompensatedAmount = 0;
public int EdgesCount = 0;
public Cycle() { }
public Cycle(List<Edge> _edges)
{
this.Edges = new List<Edge>(_edges);
UpdateCycle();
}
public void UpdateCycle()
{
UpdateCycle(this);
}
public void UpdateCycle(Cycle c)
{
double sum = 0;
double min = c.Edges[0].Weight;
double max = c.Edges[0].Weight;
for(int i=0;i<c.Edges.Count;i++)
{
sum += c.Edges[i].Weight;
min = c.Edges[i].Weight < min ? c.Edges[i].Weight : min;
max = c.Edges[i].Weight > max ? c.Edges[i].Weight : max;
}
c.EdgesCount = c.Edges.Count;
c.CycleWeight = sum;
c.CycleMin = min;
c.CycleMax = max;
c.CycleAverage = sum / c.EdgesCount;
c.CycleCompensatedAmount = min * c.EdgesCount;
}
}
public class Graph : IDisposable
{
public List<int> Nodes = new List<int>();
public List<Edge> Edges = new List<Edge>();
public List<Cycle> Cycles = new List<Cycle>();
public int NodesCount { get; set; }
public Graph() { }
public Graph(int _nodes)
{
this.NodesCount = _nodes;
GenerateNodes();
GenerateEdges();
GenerateCycles();
}
private int FindNode(string _node)
{
for(int i = 0; i < this.Nodes.Count; i++)
{
if (this.Nodes[i].ToString() == _node)
return i;
}
return 0;
}
private int FindEdge(string[] _edge)
{
for(int i = 0; i < this.Edges.Count; i++)
{
if (this.Edges[i].Start.ToString() == _edge[0] && this.Edges[i].End.ToString() == _edge[1] && Convert.ToDouble(this.Edges[i].Weight) == Convert.ToDouble(_edge[2]))
return i;
}
return 0;
}
public Graph Clone()
{
Graph clona = new Graph();
clona.Nodes = new List<int>(this.Nodes);
clona.Edges = new List<Edge>(this.Edges);
clona.Cycles = new List<Cycle>(this.Cycles);
clona.NodesCount = this.NodesCount;
return clona;
}
public void CompensateCycle(int cycleIndex)
{
for(int i = 0; i < this.Cycles[cycleIndex].Edges.Count; i++)
{
this.Cycles[cycleIndex].Edges[i].Weight -= this.Cycles[cycleIndex].CycleMin;
}
}
public int GetMaxCycleByCompensatedAmount()
{
int toReturn = 0;
for (int i = 0; i < this.Cycles.Count; i++)
{
if (this.Cycles[i].CycleCompensatedAmount > this.Cycles[toReturn].CycleCompensatedAmount)
{
toReturn = i;
}
}
return toReturn;
}
public void GenerateNodes()
{
for (int i = 0; i < this.NodesCount; i++)
{
this.Nodes.Add(i + 1);
}
}
public void GenerateEdges()
{
Random r = new Random();
for(int i = 0; i < this.Nodes.Count; i++)
{
for(int j = 0; j < this.Nodes.Count; j++)
{
if(this.Nodes[i] != this.Nodes[j])
{
int _weight = r.Next(0, 500);
Edge e = new Edge(this.Nodes[i], this.Nodes[j], _weight);
this.Edges.Add(e);
}
}
}
}
public void GenerateCycles()
{
for(int i = 0; i < this.Edges.Count; i++)
{
FindCycles(new Cycle(new List<Edge>() { this.Edges[i] }));
}
this.UpdateCycles();
}
public void UpdateCycles()
{
for (int i = 0; i < this.Cycles.Count; i++)
{
this.Cycles[i].UpdateCycle();
}
}
private void FindCycles(Cycle path)
{
List<Edge> nextPossibleEdges = GetNextEdges(path.Edges[path.Edges.Count - 1].End);
for (int i = 0; i < nextPossibleEdges.Count; i++)
{
if (path.Edges.IndexOf(nextPossibleEdges[i]) < 0) // the edge shouldn't be already in the path
{
Cycle temporaryPath = new Cycle(path.Edges);
temporaryPath.Edges.Add(nextPossibleEdges[i]);
if (nextPossibleEdges[i].End == temporaryPath.Edges[0].Start) // end of path - valid cycle
{
if (!CycleExists(temporaryPath))
{
this.Cycles.Add(temporaryPath);
break;
}
}
else
{
FindCycles(temporaryPath);
}
}
}
}
private bool CycleExists(Cycle cycle)
{
bool toReturn = false;
if (this.Cycles.IndexOf(cycle) > -1) { toReturn = true; }
else
{
for (int i = 0; i < this.Cycles.Count; i++)
{
if (this.Cycles[i].Edges.Count == cycle.Edges.Count && !CompareEdges(this.Cycles[i].Edges[0], cycle.Edges[0]))
{
bool cycleExists = true;
for (int j = 0; j < cycle.Edges.Count; j++)
{
bool edgeExists = false; // if there is an edge not in the path, then the searched cycle is diferent from the current cycle and we can pas to the next iteration
for (int k = 0; k < this.Cycles[i].Edges.Count; k++)
{
if (CompareEdges(cycle.Edges[j], this.Cycles[i].Edges[k]))
{
edgeExists = true;
break;
}
}
if (!edgeExists)
{
cycleExists = false;
break;
}
}
if (cycleExists) // if we found an cycle with all edges equal to the searched cycle, then the cycle is not valid
{
toReturn = true;
break;
}
}
}
}
return toReturn;
}
private bool CompareEdges(Edge e1, Edge e2)
{
return (e1.Start == e2.Start && e1.End == e2.End && e1.Weight == e2.Weight);
}
private List<Edge> GetNextEdges(int endNode)
{
List<Edge> tmp = new List<Edge>();
for(int i = 0; i < this.Edges.Count; i++)
{
if(endNode == this.Edges[i].Start)
{
tmp.Add(this.Edges[i]);
}
}
return tmp;
}
#region IDisposable Support
private bool disposedValue = false; // To detect redundant calls
protected virtual void Dispose(bool disposing)
{
if (!disposedValue)
{
if (disposing)
{
// TODO: dispose managed state (managed objects).
this.Nodes = null;
this.Edges = null;
this.Cycles = null;
this.NodesCount = 0;
}
// TODO: free unmanaged resources (unmanaged objects) and override a finalizer below.
// TODO: set large fields to null.
disposedValue = true;
}
}
// TODO: override a finalizer only if Dispose(bool disposing) above has code to free unmanaged resources.
// ~Graph() {
// // Do not change this code. Put cleanup code in Dispose(bool disposing) above.
// Dispose(false);
// }
// This code added to correctly implement the disposable pattern.
public void Dispose()
{
// Do not change this code. Put cleanup code in Dispose(bool disposing) above.
Dispose(true);
// TODO: uncomment the following line if the finalizer is overridden above.
// GC.SuppressFinalize(this);
}
#endregion
}
I've found several articles/answers about graphs, both in Java and C# (including quickgraph), but they mainly focus on directed graphs (without cycles).
I have also read about tail call optimization, for recursion, but I don't know if/how to implement in my case.
I now there is a lot to grasp about this subject, but maybe someone had to deal with something similar and can either help me optimize the code (which as I said seems to do the job in the end), either point me to another direction to rethink the whole process.
I think you can massively simplify this.
All money is the same, so (using your example) N1 doesn't care whether it gets 350 from N2 and pays 150 to N2 and so on - N1 merely cares that overall it ends up 145 down (if I've done the arithmetic correctly). Similarly, each other N only cares about its overall position. So, summing the inflows and outflows at each node, we get:
Company Net position
N1 -145
N2 -65
N3 +195
N4 +15
So with someone to act as a central clearing house - the bank - simply arrange for N1 and N2 to pay the clearing house 145 and 65 respectively, and for N3 and N4 to receive 195 and 15 respectively from the clearing house. And everyone's happy.
I may have missed some aspect, of course, in which case I'm sure someone will point it out...
This is a bit of a doozy and it's been a while since I worked with C#, so bear with me:
I'm running a jruby script to iterate through 900 files (5 Mb - 1500 Mb in size) to figure out how many dupes STILL exist within these (already uniq'd) files. I had little luck with awk.
My latest idea was to insert them into a local MongoDB instance like so:
db.collection('hashes').update({ :_id => hash}, { $inc: { count: 1} }, { upsert: true)
... so that later I could just query it like db.collection.where({ count: { $gt: 1 } }) to get all the dupes.
This is working great except it's been over 24 hours and at the time of writing I'm at 72,532,927 Mongo entries and growing.
I think Ruby's .each_line is bottlnecking the IO hardcore:
So what I'm thinking now is compiling a C# program which fires up a thread PER EACH FILE and inserts the line (md5 hash) into a Redis list.
From there, I could have another compiled C# program simply pop the values off and ignore the save if the count is 1.
So the questions are:
Will using a compiled file reader and multithreading the file reads significantly improve performance?
Is using Redis even necessary? With a tremendous amount of AWS memory, could I not just use the threads to fill some sort of a list atomically and proceed from there?
Thanks in advance.
Updated
New solution. Old solution. The main idea is to calculate dummy hashes(just sum of all chars in string) of each line and store it in Dictionary<ulong, List<LinePosition>> _hash2LinePositions. It's possible to have multiple hashes in the same stream and it solves by List in Dictionary Value. When the hashes are the same, we read and compare the strings from the streams. LinePosition is using for storing info about line - position in stream and its length. I don't have such huge files as you, but my tests shows that it works. Here is the full code:
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
public class Solution
{
struct LinePosition
{
public long Start;
public long Length;
public LinePosition(long start, long count)
{
Start = start;
Length = count;
}
public override string ToString()
{
return string.Format("Start: {0}, Length: {1}", Start, Length);
}
}
class TextFileHasher : IDisposable
{
readonly Dictionary<ulong, List<LinePosition>> _hash2LinePositions;
readonly Stream _stream;
bool _isDisposed;
public HashSet<ulong> Hashes { get; private set; }
public string Name { get; private set; }
public TextFileHasher(string name, Stream stream)
{
Name = name;
_stream = stream;
_hash2LinePositions = new Dictionary<ulong, List<LinePosition>>();
Hashes = new HashSet<ulong>();
}
public override string ToString()
{
return Name;
}
public void CalculateFileHash()
{
int readByte = -1;
ulong dummyLineHash = 0;
// Line start position in file
long startPosition = 0;
while ((readByte = _stream.ReadByte()) != -1) {
// Read until new line
if (readByte == '\r' || readByte == '\n') {
// If there was data
if (dummyLineHash != 0) {
// Add line hash and line position to the dict
AddToDictAndHash(dummyLineHash, startPosition, _stream.Position - 1 - startPosition);
// Reset line hash
dummyLineHash = 0;
}
}
else {
// Was it new line ?
if (dummyLineHash == 0)
startPosition = _stream.Position - 1;
// Calculate dummy hash
dummyLineHash += (uint)readByte;
}
}
if (dummyLineHash != 0) {
// Add line hash and line position to the dict
AddToDictAndHash(dummyLineHash, startPosition, _stream.Position - startPosition);
// Reset line hash
dummyLineHash = 0;
}
}
public List<LinePosition> GetLinePositions(ulong hash)
{
return _hash2LinePositions[hash];
}
public List<string> GetDuplicates()
{
List<string> duplicates = new List<string>();
foreach (var key in _hash2LinePositions.Keys) {
List<LinePosition> linesPos = _hash2LinePositions[key];
if (linesPos.Count > 1) {
duplicates.AddRange(FindExactDuplicates(linesPos));
}
}
return duplicates;
}
public void Dispose()
{
if (_isDisposed)
return;
_stream.Dispose();
_isDisposed = true;
}
private void AddToDictAndHash(ulong hash, long start, long count)
{
List<LinePosition> linesPosition;
if (!_hash2LinePositions.TryGetValue(hash, out linesPosition)) {
linesPosition = new List<LinePosition>() { new LinePosition(start, count) };
_hash2LinePositions.Add(hash, linesPosition);
}
else {
linesPosition.Add(new LinePosition(start, count));
}
Hashes.Add(hash);
}
public byte[] GetLineAsByteArray(LinePosition prevPos)
{
long len = prevPos.Length;
byte[] lineBytes = new byte[len];
_stream.Seek(prevPos.Start, SeekOrigin.Begin);
_stream.Read(lineBytes, 0, (int)len);
return lineBytes;
}
private List<string> FindExactDuplicates(List<LinePosition> linesPos)
{
List<string> duplicates = new List<string>();
linesPos.Sort((x, y) => x.Length.CompareTo(y.Length));
LinePosition prevPos = linesPos[0];
for (int i = 1; i < linesPos.Count; i++) {
if (prevPos.Length == linesPos[i].Length) {
var prevLineArray = GetLineAsByteArray(prevPos);
var thisLineArray = GetLineAsByteArray(linesPos[i]);
if (prevLineArray.SequenceEqual(thisLineArray)) {
var line = System.Text.Encoding.Default.GetString(prevLineArray);
duplicates.Add(line);
}
#if false
string prevLine = System.Text.Encoding.Default.GetString(prevLineArray);
string thisLine = System.Text.Encoding.Default.GetString(thisLineArray);
Console.WriteLine("PrevLine: {0}\r\nThisLine: {1}", prevLine, thisLine);
StringBuilder sb = new StringBuilder();
sb.Append(prevPos);
sb.Append(" is '");
sb.Append(prevLine);
sb.Append("'. ");
sb.AppendLine();
sb.Append(linesPos[i]);
sb.Append(" is '");
sb.Append(thisLine);
sb.AppendLine("'. ");
sb.Append("Equals => ");
sb.Append(prevLine.CompareTo(thisLine) == 0);
Console.WriteLine(sb.ToString());
#endif
}
else {
prevPos = linesPos[i];
}
}
return duplicates;
}
}
public static void Main(String[] args)
{
List<TextFileHasher> textFileHashers = new List<TextFileHasher>();
string text1 = "abc\r\ncba\r\nabc";
TextFileHasher tfh1 = new TextFileHasher("Text1", new MemoryStream(System.Text.Encoding.Default.GetBytes(text1)));
tfh1.CalculateFileHash();
textFileHashers.Add(tfh1);
string text2 = "def\r\ncba\r\nwet";
TextFileHasher tfh2 = new TextFileHasher("Text2", new MemoryStream(System.Text.Encoding.Default.GetBytes(text2)));
tfh2.CalculateFileHash();
textFileHashers.Add(tfh2);
string text3 = "def\r\nbla\r\nwat";
TextFileHasher tfh3 = new TextFileHasher("Text3", new MemoryStream(System.Text.Encoding.Default.GetBytes(text3)));
tfh3.CalculateFileHash();
textFileHashers.Add(tfh3);
List<string> totalDuplicates = new List<string>();
Dictionary<ulong, Dictionary<TextFileHasher, List<LinePosition>>> totalHashes = new Dictionary<ulong, Dictionary<TextFileHasher, List<LinePosition>>>();
textFileHashers.ForEach(tfh => {
foreach(var dummyHash in tfh.Hashes) {
Dictionary<TextFileHasher, List<LinePosition>> tfh2LinePositions = null;
if (!totalHashes.TryGetValue(dummyHash, out tfh2LinePositions))
totalHashes[dummyHash] = new Dictionary<TextFileHasher, List<LinePosition>>() { { tfh, tfh.GetLinePositions(dummyHash) } };
else {
List<LinePosition> linePositions = null;
if (!tfh2LinePositions.TryGetValue(tfh, out linePositions))
tfh2LinePositions[tfh] = tfh.GetLinePositions(dummyHash);
else
linePositions.AddRange(tfh.GetLinePositions(dummyHash));
}
}
});
HashSet<TextFileHasher> alreadyGotDuplicates = new HashSet<TextFileHasher>();
foreach(var hash in totalHashes.Keys) {
var tfh2LinePositions = totalHashes[hash];
var tfh = tfh2LinePositions.Keys.FirstOrDefault();
// Get duplicates in the TextFileHasher itself
if (tfh != null && !alreadyGotDuplicates.Contains(tfh)) {
totalDuplicates.AddRange(tfh.GetDuplicates());
alreadyGotDuplicates.Add(tfh);
}
if (tfh2LinePositions.Count <= 1) {
continue;
}
// Algo to get duplicates in more than 1 TextFileHashers
var tfhs = tfh2LinePositions.Keys.ToArray();
for (int i = 0; i < tfhs.Length; i++) {
var tfh1Positions = tfhs[i].GetLinePositions(hash);
for (int j = i + 1; j < tfhs.Length; j++) {
var tfh2Positions = tfhs[j].GetLinePositions(hash);
for (int k = 0; k < tfh1Positions.Count; k++) {
var tfh1Pos = tfh1Positions[k];
var tfh1ByteArray = tfhs[i].GetLineAsByteArray(tfh1Pos);
for (int m = 0; m < tfh2Positions.Count; m++) {
var tfh2Pos = tfh2Positions[m];
if (tfh1Pos.Length != tfh2Pos.Length)
continue;
var tfh2ByteArray = tfhs[j].GetLineAsByteArray(tfh2Pos);
if (tfh1ByteArray.SequenceEqual(tfh2ByteArray)) {
var line = System.Text.Encoding.Default.GetString(tfh1ByteArray);
totalDuplicates.Add(line);
}
}
}
}
}
}
Console.WriteLine();
if (totalDuplicates.Count > 0) {
Console.WriteLine("Total number of duplicates: {0}", totalDuplicates.Count);
Console.WriteLine("#######################");
totalDuplicates.ForEach(x => Console.WriteLine("{0}", x));
Console.WriteLine("#######################");
}
// Free resources
foreach (var tfh in textFileHashers)
tfh.Dispose();
}
}
If you have tons of ram... You guys are overthinking it...
var fileLines = File.ReadAllLines(#"c:\file.csv").Distinct();
How can i count number of functions in C program File using C# program? I have create a simple C# class to count the LOC in C file.
private bool IsInMultipleComment = false;
private int getNumberOFFuncions(FileInfo fs)
{
StreamReader rdr;
int count = 0;
string tempStr;
// initialize
rdr = fs.OpenText();
tempStr = rdr.ReadLine();
while (true)
{
if (tempStr == null)
break;
if (IsFunction(tempStr))
count++;
tempStr = rdr.ReadLine();
}
return count;
}
Supportive method:
private bool IsFunction(string line)
{
if (line.Contains("//"))
return false;
if (line.Contains("/*"))
IsInMultipleComment = true;
if (line.Contains("*/"))
IsInMultipleComment = false;
if (!IsInMultipleComment)
{
if (line.Contains("void") || line.Contains("int") || line.Contains("short") || line.Contains("long") || line.Contains("float") || line.Contains("char") || line.Contains("double"))
{
if (!line.Contains(";"))
{
return true;
}
}
}
return false;
}
This is how I count variables:
private int getNumberOfVariables(FileInfo fs)
{
StreamReader rdr;
int count = 0;
string tempStr;
// initialize
rdr = fs.OpenText();
tempStr = rdr.ReadLine();
while (true)
{
if (tempStr == null)
break;
count += getVariblesOfLine(tempStr);
tempStr = rdr.ReadLine();
}
return count;
}
Supportive method:
private int getVariblesOfLine(string line)
{
line = line.Trim(); // trim the lines
if (line.Contains("#")) // remove preprocessive declarations
return 0;
if (line.Contains("//"))
return 0;
if (line.Contains("/*"))
IsInMultipleComment = true;
if (line.Contains("*/"))
IsInMultipleComment = false;
if (!IsInMultipleComment)
{
if (line.Contains("unsigned") || line.Contains("signed") || line.Contains("int") || line.Contains("short") || line.Contains("long") || line.Contains("float") || line.Contains("char") || line.Contains("double"))
{
if (!line.Contains("(")) // remove if this is function
{
Console.WriteLine(line);
if (line.Contains(",")) // count at multiple declarations
{
int y = line.Count(f => f == ',');
return y + 1;
}
return 1;
}
}
}
return 0;
}
Learn about Regex(s). There is a string pattern what a function declaration looks like. You won't catch every possible contortion of a function, you can get most of them, if you use declare functions in the general accepted .net way. Expresso is learning good learning tool for helping you to get the pattern of the Regex.
Here is a pattern to identify a function. It looks crazy but it's not. Expresso will decode it for you. It's not fully developed in that it won't catch private functions where you don't put the word private in front of it and it doesn't do protected internal. There is probably many more that it won't catch.
Regex regex = new Regex("\s*(private|public|internal|protected)\s*\w+\s+([a-zA-Z_0-9.]+)\s*\(.*\)",RegexOptions.Compiled)
if (regex.IsMatch(lineOfCode)
{
//then it's a function
}
On another note, don't keep opening and re-reading the file. Open it once, make a pass, that's it.
I've got some code (in javascript) to do line counts and such on Csharp files you might be able to pull out some of the regex patterns. Note how the regexes are kept in an object (dictionary in .net) In javascript, /pattern/ is the same as .net "pattern"
module.exports = ( function() {
var classifiers = [] ;
classifiers.push(
{
ctype: "using",
regex: /^(\s*using\s*)([a-zA-Z_0-9.]+)/,
extractMethod: function(lineInfo) {
lineInfo.extractValue = lineInfo.line.split(this.regex)[2] ;
}
},
{
ctype: "namespace",
regex: /^(\s*namespace\s*)([a-zA-Z_0-9.]+)/,
extractMethod: function(lineInfo) {
lineInfo.extractValue = lineInfo.line.split(this.regex)[2] ;
}
},
{
ctype: "comment",
regex: /^\s*\/\/[/ A-Za-z,*]*/,
extractMethod: function(lineInfo) {
lineInfo.extractValue = null ;
}
},
{
ctype: "n/a",
regex: /^\s*$|^\s*[;{}]+?\s*$/,
extractMethod: function(lineInfo) {
lineInfo.extractValue = null ;
}
}
);
function classifyLine(line, lineNo) {
var lineInfo = {} ;
lineInfo.line = line ;
lineInfo.lineNo = lineNo;
for (var index = 0; index < classifiers.length; index++) {
var classifier = classifiers[index];
if (classifier.regex.test(line)) {
lineInfo.ctype = classifier.ctype;
lineInfo.line = line ;
classifier.extractMethod(lineInfo) ;
break ;
}
}
if (lineInfo.ctype == undefined){
lineInfo.ctype = "code" ;
}
return lineInfo ;
}
return {
classifyLine : classifyLine
};
} )();