I have the query below and it is working fine, I can see the results in the console. But I need to write all the result into a txt file to load it into a table. The query as is only write one line to the txt file. How can I make it write all lines from the output into the txt file? Really appreciate any help on this.
using System;
using Microsoft.AnalysisServices.AdomdClient;
using System.IO;
using System.Diagnostics;
using System.Text;
namespace PowerQry
{
class Program
{
#pragma warning disable IDE0060 // Remove unused parameter
static void Main(string[] args)
#pragma warning restore IDE0060 // Remove unused parameter
{
AdomdConnection adomdConnection = new AdomdConnection("Data Source=localhost:49971");
String query = #"
EVALUATE
SUMMARIZECOLUMNS(
Customer[City],
Customer[Country-Region],
Customer[Customer],
Customer[Customer ID],
Customer[CustomerKey]
)
";
AdomdCommand adomdCommand = new AdomdCommand(query, adomdConnection);
/*******************************************************
connection
*******************************************************/
adomdConnection.Open();
AdomdDataReader reader = adomdCommand.ExecuteReader();
// Create a loop for every row in the resultset
while (reader.Read())
{
String rowResults = "";
// Create a loop for every column in the current row --need to add header
for (
int columnNumber = 0;
columnNumber < reader.FieldCount;
columnNumber++
)
{
rowResults += $"\t{reader.GetValue(columnNumber)}";
}
//Console.WriteLine(rowResults);
//--write all lines to txt
{
string UserName = System.Environment.UserName;
string fileName = #"C:\Temp\nm.txt";
FileStream ostrm;
StreamWriter writer;
TextWriter oldOut = Console.Out;
try
{
ostrm = new FileStream(fileName, FileMode.OpenOrCreate, FileAccess.Write);
writer = new StreamWriter(ostrm);
for (int i = 0; i < 10; i++) ;
}
catch (Exception e)
{
Console.WriteLine("Cannot open Redirect.txt for writing");
Console.WriteLine(e.Message);
return;
}
Console.SetOut(writer);
Console.WriteLine(rowResults);
Console.SetOut(oldOut);
writer.Close();
ostrm.Close();
Console.WriteLine("Done");
}
//==
}
adomdConnection.Close();
}
}
}
You can use File.AppendAllLines.
Instead of append to a single string instance you can use List<string>
//String rowResults = "";
List<string> rows = new List<string>();
// Create a loop for every column in the current row --need to add header
for (int columnNumber = 0; columnNumber < reader.FieldCount;columnNumber++)
{
// tabulator should be removed
rows.Add($"\t{reader.GetValue(columnNumber)}");
// rowResults += $"\t{reader.GetValue(columnNumber)}";
}
File.AppendAllLines(your path - for example #"C:\output.txt", rows);
Related
I am trying to insert data into csv file. I tried using XLWorkbook reference to access and insert data but I know that XLWorkbook can only support extension which are xlsx,xslm,xltx and xltm.
I am trying to find something similar to what I am trying to achieve through which I can insert data into specified column in csv file. I have used XLWorkbook for some other purpose but I am not aware to what I can use when I have to use csv.
//Accessing the csv file where I am trying to insert data.
string rootPath = Path.GetDirectoryName(Assembly.GetExecutingAssembly().CodeBase);
string filelocation = #"\csv\TestData.csv";
string location = rootPath + filelocation;
XLWorkbook workbook = new XLWorkbook(pathfile);
IXLWorksheet worksheet = workbook.Worksheet("Sheet1");
//Insert data after first row as first row contains column header
int lastrow = worksheet.LastRowUsed().RowNumber() + 1;
//through previous function I am trying get data from database and insert those data into csv cells
worksheet.Cell(String.Format("B{0}", lastrow)).Value = dummydata.FirstName;
worksheet.Cell(String.Format("C{0}", lastrow)).Value = dummydata.LastName;
worksheet.Cell(String.Format("D{0}", lastrow)).Value = dummydata.Address1;
worksheet.Cell(String.Format("E{0}", lastrow)).Value = dummydata.Address2;
worksheet.Cell(String.Format("F{0}", lastrow)).Value = dummydata.City;
worksheet.Cell(String.Format("G{0}", lastrow)).Value = dummydata.StateProvinceCode;
worksheet.Cell(String.Format("H{0}", lastrow)).Value = dummydata.ZipCode;
worksheet.Cell(String.Format("I{0}", lastrow)).Value = dummydata.Country;
worksheet.Cell(String.Format("J{0}", lastrow)).Value = dummydata.HomePhone;
worksheet.Cell(String.Format("L{0}", lastrow)).Value = dummydata.HomePhone;
worksheet.Cell(String.Format("M{0}", lastrow)).Value = dummydata.CellPhone;
worksheet.Cell(String.Format("T{0}", lastrow)).Value = dummydata.Email;
worksheet.Cell(String.Format("U{0}", lastrow)).Value = dummydata.Country;
//After inserting save the file
workbook.Save();
You can simply copy and use this code as is. It should resolve your issues.
Here's the class I developed to replace and/or add csv cells:
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
namespace CSVManager
{
public class CSVWorker
{
private string m_FileName = string.Empty;
public CSVWorker(string fileName)
{
m_FileName = fileName;
}
public void AddCells(int row, int column, string newValue)
{
var encoding = Encoding.GetEncoding("iso-8859-1");
var csvLines = File.ReadAllLines(m_FileName, encoding);
if (row < csvLines.Length)
{
ReplaceCells(row, column, newValue);
}
else
{
using (FileStream stream = new FileStream(m_FileName, FileMode.Create))
{
using (StreamWriter writer = new StreamWriter(stream, encoding))
{
foreach (var line in csvLines)
{
writer.WriteLine(line);
}
int blankLines = row - csvLines.Length - 1;
for (int i = 0; i < blankLines; i++)
{
writer.WriteLine("");
}
string blankCols = string.Empty;
for (int i = 0; i < column-1; i++)
{
blankCols += ',';
}
writer.WriteLine(blankCols + newValue);
}
}
}
}
public void ReplaceCells(int row, int column, string newValue)
{
var encoding = Encoding.GetEncoding("iso-8859-1");
var csvLines = File.ReadAllLines(m_FileName, encoding);
for (int i = 0; i < csvLines.Length; i++)
{
//var values = csvLines[i].Split(',');
List <string> values = csvLines[i].Split(',').ToList();
if (i == row)
{
if (column < values.Count)
{
values[column] = newValue;
}
else
{
while (values.Count < column - 1)
{
values.Append(",");
}
values.Append(newValue);
}
using (FileStream stream = new FileStream(m_FileName, FileMode.Create))
{
using (StreamWriter writer = new StreamWriter(stream, encoding))
{
for (int currentLine = 0; currentLine < csvLines.Length; ++currentLine)
{
if (currentLine == i)
{
writer.WriteLine(string.Join(",", values));
}
else
{
writer.WriteLine(csvLines[currentLine]);
}
}
writer.Close();
}
stream.Close();
break;
}
}
}
}
}
}
Here's how I used it:
namespace CSVManager
{
class Program
{
static void Main(string[] args)
{
string fileName = #"C:\Users\mklig\Documents\TestCsv.csv";
CSVWorker csvWorker = new CSVWorker(fileName);
int row = 4;
int col = 4;
string newVal = "success";
//csvWorker.ReplaceCells(row, col, newVal);
csvWorker.AddCells(row, col, newVal);
}
}
}
So I am having an issue with my output being written to a CSV file. The output to this code is in the correct format when being written to the CSV file but it is only entering a single row in the file. There should be much more. Around 150 lines. Current out put is:
(859.85 7N830127 185)
Which is correct, but there should be more of these. It seems like to me that it is only writing the first line of the parsed EDI file and then stopping. I need to find a way to make sure it writes all data that is being parsed can anyone help me?
static void Main(string[] args)
{
StreamReader sr = new StreamReader("edifile.txt");
string[] ediMapTemp = sr.ReadLine().Split('|');
List<string[]> ediMap = new List<string[]>();
List<object[]> outputMatrix = new List<object[]>();
foreach (var line in ediMapTemp)
{
ediMap.Add(line.Split('~'));
}
DetailNode node = new DetailNode(0, null, 0);
int hierarchicalDepth = 0;
int hierarchicalIdNumber;
int hierarchicalParentIdNumber;
int hierarchicalLevelCode;
int hierarchicalChildCode = 0;
for (int i = 0; i < ediMap.Count; i++)
{
string segmentHeader = ediMap[i][0];
if (segmentHeader == "HL")
{
hierarchicalIdNumber = Convert.ToInt32(ediMap[i][1]);
hierarchicalParentIdNumber = Convert.ToInt32(ediMap[i][2]);
hierarchicalLevelCode = Convert.ToInt32(ediMap[i][3]);
hierarchicalChildCode = Convert.ToInt32(ediMap[i][4]);
List<string[]> levelDetails = new List<string[]>();
for (int v = i + 1; v < ediMap.Count; v++)
{
if (ediMap[v][0] == "HL") break;
levelDetails.Add(ediMap[v]);
}
DetailNode getNode = node.Find(node, hierarchicalParentIdNumber);
getNode.headList.Add(new DetailNode(hierarchicalIdNumber, levelDetails, getNode.depth + 1));
}
}
node.Traversal(new VID(), node);
foreach (var vid in VIDList.vidList)
using (StreamWriter writer = new StreamWriter("Import.csv"))
{
//probably a loop here
writer.WriteLine(String.Join(",", vid.totalCurrentCharges, vid.assetId, vid.componentName, vid.recurringCharge));
}
}
Quick Review, should the code be the following:
using (StreamWriter writer = new StreamWriter("Import.csv"))
{
//a loop here
foreach (var vid in VIDList.vidList)
{
writer.WriteLine(String.Join(",", vid.totalCurrentCharges, vid.assetId, vid.componentName, vid.recurringCharge));
}
}
You would open the file once and then loop thru your collection, writing each one.
It looks to me like you're re-opening the output file for each line you're trying to write, so you're overwriting the output file with a new file for each line. That would mean only the last entry remains in the file. Try moving this line
using (StreamWriter writer = new StreamWriter("Import.csv"))
Outside the foreach loop.
Im trying to read from a textfile into an array, change one element and then read array back into the file but not appending the original text. But it says the file is being used by another process. Any help is appreciated.
using (StreamReader readName = new StreamReader("fileA"))
{
using (StreamReader readColour = new StreamReader("fileB"))
{
var lineCount = File.ReadLines("fileB").Count();
string[] linesA = new string[lineCount];
string[] linesB = new string[lineCount];
for (int a = 0; a < linesA.Length; a++)
{
if (linesA[a] == UserVariables.userNameC)
{
linesB[a] = UserVariables.colourC.ToString();
}
}
}
}
try
{
using (StreamWriter colourWrite = new StreamWriter("fileB"))
{
for (int a = 0; a < linesB.Length; a++)
colourWrite.WriteLine(linesB[a], false);
}
}
catch (Exception ex)
{
MessageBox.Show(ex.ToString(), "Error");
}
}
You are trying to read fileB twice,
using (StreamReader readColour = new StreamReader("fileB")) <-- this opens
the file
here and
leaves it
open
{
var lineCount = File.ReadLines("fileB").Count(); <-- this tries to open it,
read it and close it..
but it can't because
you have it open
above..
This part opens fileB two times:
using (StreamReader readColour = new StreamReader("fileB"))
{
var lineCount = File.ReadLines("fileB").Count();
Try adding the line readColour.Close(); in between your read and write sections
Want to create a generic text file parser in c# for any find of text file.Actually i have 4 application all 4 getting input data from txt file format but text files are not homogeneous in nature.i have tried fixedwithdelemition.
private static DataTable FixedWidthDiliminatedTxtRead()
{
string[] fields;
StringBuilder sb = new StringBuilder();
List<StringBuilder> lst = new List<StringBuilder>();
DataTable dtable = new DataTable();
ArrayList aList;
using (TextFieldParser tfp = new TextFieldParser(testOCC))
{
tfp.TextFieldType = FieldType.FixedWidth;
tfp.SetFieldWidths(new int[12] { 2,25,8,12,13,5,6,3,10,11,10,24 });
for (int col = 1; col < 13; ++col)
dtable.Columns.Add("COL" + col);
while (!tfp.EndOfData)
{
fields = tfp.ReadFields();
aList = new ArrayList();
for (int i = 0; i < fields.Length; ++i)
aList.Add(fields[i] as string);
if (dtable.Columns.Count == aList.Count)
dtable.Rows.Add(aList.ToArray());
}
}
return dtable;
}
but i feel its very rigid one and really varies application to application making it configgurable .any better way ..
tfp.SetFieldWidths(new int[12] { 2,25,8,12,13,5,6,3,10,11,10,24 });
File nature :
Its a report kind of file .
position of columns are very similar
row data of file id different .
I get this as a reference
http://www.codeproject.com/Articles/11698/A-Portable-and-Efficient-Generic-Parser-for-Flat-F
any other thoughts ?
If the only thing different is the field widths, you could just try sending the field widths in as a parameter:
private static DataTable FixedWidthDiliminatedTxtRead(int[] fieldWidthArray)
{
string[] fields;
StringBuilder sb = new StringBuilder();
List<StringBuilder> lst = new List<StringBuilder>();
DataTable dtable = new DataTable();
ArrayList aList;
using (TextFieldParser tfp = new TextFieldParser(testOCC))
{
tfp.TextFieldType = FieldType.FixedWidth;
tfp.SetFieldWidths(fieldWidthArray);
for (int col = 1; col < 13; ++col)
dtable.Columns.Add("COL" + col);
while (!tfp.EndOfData)
{
fields = tfp.ReadFields();
aList = new ArrayList();
for (int i = 0; i < fields.Length; ++i)
aList.Add(fields[i] as string);
if (dtable.Columns.Count == aList.Count)
dtable.Rows.Add(aList.ToArray());
}
}
return dtable;
}
If you will have more logic to grab the data, you might want to consider defining an interface or abstract class for a GenericTextParser and create concrete implementations for each other file.
Hey I made one of these last week.
I did not write it with the intentions of other people using it so I appologize in advance if its not documented well but I cleaned it up for you. ALSO I grabbed several segments of code from stack overflow so I am not the original author of several pieces of this.
The places you need to edit are the path and pathout and the seperators of text.
char[] delimiters = new char[]
So it searches for part of a word and then grabs the whole word. I used a c# console application for this.
Here you go:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.IO;
namespace UniqueListofStringFinder
{
class Program
{
static void Main(string[] args)
{
string path = #"c:\Your Path\in.txt";
string pathOut = #"c:\Your Path\out.txt";
string data = "!";
Console.WriteLine("Current Path In is set to: " + path);
Console.WriteLine("Current Path Out is set to: " + pathOut);
Console.WriteLine(Environment.NewLine + Environment.NewLine + "Input String to Search For:");
Console.Read();
string input = Console.ReadLine();
// Delete the file if it exists.
if (!File.Exists(path))
{
// Create the file.
using (FileStream fs = File.Create(path))
{
Byte[] info =
new UTF8Encoding(true).GetBytes("This is some text in the file.");
// Add some information to the file.
fs.Write(info, 0, info.Length);
}
}
System.IO.StreamReader file = new System.IO.StreamReader(path);
List<string> Spec = new List<string>();
using (StreamReader sr = File.OpenText(path))
{
while (!file.EndOfStream)
{
string s = file.ReadLine();
if (s.Contains(input))
{
char[] delimiters = new char[] { '\r', '\n', '\t', ')', '(', ',', '=', '"', '\'', '<', '>', '$', ' ', '#', '[', ']' };
string[] parts = s.Split(delimiters,
StringSplitOptions.RemoveEmptyEntries);
foreach (string word in parts)
{
if (word.Contains(input))
{
if( word.IndexOf(input) == 0)
{
Spec.Add(word);
}
}
}
}
}
Spec.Sort();
// Open the stream and read it back.
//while ((s = sr.ReadLine()) != null)
//{
// Console.WriteLine(s);
//}
}
Console.WriteLine();
StringBuilder builder = new StringBuilder();
foreach (string s in Spec) // Loop through all strings
{
builder.Append(s).Append(Environment.NewLine); // Append string to StringBuilder
}
string result = builder.ToString(); // Get string from StringBuilder
Program a = new Program();
data = a.uniqueness(result);
int i = a.writeFile(data,pathOut);
}
public string uniqueness(string rawData )
{
if (rawData == "")
{
return "Empty Data Set";
}
List<string> dataVar = new List<string>();
List<string> holdData = new List<string>();
bool testBool = false;
using (StringReader reader = new StringReader(rawData))
{
string line;
while ((line = reader.ReadLine()) != null)
{
foreach (string s in holdData)
{
if (line == s)
{
testBool = true;
}
}
if (testBool == false)
{
holdData.Add(line);
}
testBool = false;
// Do something with the line
}
}
int i = 0;
string dataOut = "";
foreach (string s in holdData)
{
dataOut += s + "\r\n";
i++;
}
// Write the string to a file.
return dataOut;
}
public int writeFile(string dataOut, string pathOut)
{
try
{
System.IO.StreamWriter file = new System.IO.StreamWriter(pathOut);
file.WriteLine(dataOut);
file.Close();
}
catch (Exception ex)
{
dataOut += ex.ToString();
return 1;
}
return 0;
}
}
}
private static DataTable FixedWidthTxtRead(string filename, int[] fieldWidths)
{
string[] fields;
DataTable dtable = new DataTable();
ArrayList aList;
using (TextFieldParser tfp = new TextFieldParser(filename))
{
tfp.TextFieldType = FieldType.FixedWidth;
tfp.SetFieldWidths(fieldWidths);
for (int col = 1; col <= fieldWidths.length; ++col)
dtable.Columns.Add("COL" + col);
while (!tfp.EndOfData)
{
fields = tfp.ReadFields();
aList = new ArrayList();
for (int i = 0; i < fields.Length; ++i)
aList.Add(fields[i] as string);
if (dtable.Columns.Count == aList.Count) dtable.Rows.Add(aList.ToArray());
}
}
return dtable;
}
Here's what I did:
I built a factory for the type of processor needed (based on file type/format), which abstracted the file reader.
I then built a collection object that contained a set of triggers for each field I was interested in (also contained the property name for which this field is destined). This settings collection is loaded in via an XML configuration file, so all I need to change are the settings, and the base parsing process can react to how the settings are configured. Finally I built a reflection wrapper wherein once a field is parsed, the corresponding property on the model object is set.
As the file flowed through, the triggers for each setting evaluated each lines value. When it found what it was set to find (via pattern matching, or column length values) it fired and event that bubbled up and set a property on the model object. I can show some pseudo code if you're interested. It needs some work for efficiency's sake, but I like the concept.
Morning,
I'm trying to split a large text file (15,000,000 rows) using StreamReader/StreamWriter. Is there a quicker way?
I tested it with 130,000 rows and it took 2min 40sec which implies 15,000,000 rows will take approx 5hrs which seems a bit excessive.
//Perform split.
public void SplitFiles(int[] newFiles, string filePath, int processorCount)
{
using (StreamReader Reader = new StreamReader(filePath))
{
for (int i = 0; i < newFiles.Length; i++)
{
string extension = System.IO.Path.GetExtension(filePath);
string temp = filePath.Substring(0, filePath.Length - extension.Length)
+ i.ToString();
string FilePath = temp + extension;
if (!File.Exists(FilePath))
{
for (int x = 0; x < newFiles[i]; x++)
{
DataWriter(Reader.ReadLine(), FilePath);
}
}
else
{
return;
}
}
}
}
public void DataWriter(string rowData, string filePath)
{
bool appendData = true;
using (StreamWriter sr = new StreamWriter(filePath, appendData))
{
{
sr.WriteLine(rowData);
}
}
}
Thanks for your help.
You haven't made it very clear, but I'm assuming that the value of each element of the newFiles array is the number of lines to copy from the original into that file. Note that currently you don't detect the situation where there's either extra data at the end of the input file, or it's shorter than expected. I suspect you want something like this:
public void SplitFiles(int[] newFiles, string inputFile)
{
string baseName = Path.GetFileNameWithoutExtension(inputFile);
string extension = Path.GetExtension(inputFile);
using (TextReader reader = File.OpenText(inputFile))
{
for (int i = 0; i < newFiles.Length; i++)
{
string outputFile = baseName + i + extension;
if (File.Exists(outputFile))
{
// Better than silently returning, I'd suggest...
throw new IOException("File already exists: " + outputFile);
}
int linesToCopy = newFiles[i];
using (TextWriter writer = File.CreateText(outputFile))
{
for (int j = 0; i < linesToCopy; j++)
{
string line = reader.ReadLine();
if (line == null)
{
return; // Premature end of input
}
writer.WriteLine(line);
}
}
}
}
}
Note that this still won't detect if there's any unconsumed input... it's not clear what you want to do in that situation.
One option for code clarity is to extract the middle of this into a separate method:
public void SplitFiles(int[] newFiles, string inputFile)
{
string baseName = Path.GetFileNameWithoutExtension(inputFile);
string extension = Path.GetExtension(inputFile);
using (TextReader reader = File.OpenText(inputFile))
{
for (int i = 0; i < newFiles.Length; i++)
{
string outputFile = baseName + i + extension;
// Could put this into the CopyLines method if you wanted
if (File.Exists(outputFile))
{
// Better than silently returning, I'd suggest...
throw new IOException("File already exists: " + outputFile);
}
CopyLines(reader, outputFile, newFiles[i]);
}
}
}
private static void CopyLines(TextReader reader, string outputFile, int count)
{
using (TextWriter writer = File.CreateText(outputFile))
{
for (int i = 0; i < count; i++)
{
string line = reader.ReadLine();
if (line == null)
{
return; // Premature end of input
}
writer.WriteLine(line);
}
}
}
There are utilities for splitting files that may outperform your solution - e.g. search for "split file by line".
If they don't suit, there are solutions for loading all the source file into memory and then writing out the files but that probably isn't appropriate given the size of the source file.
In terms of improving your code, a minor improvement would be the generation of the destination file path (and also clarifying the confusing between the source filePath you use and the destination files). You don't need to re-establish the source file extension each time in your loop.
The second improvement (and probably more significant improvement - as highlighted by commenters) is about how you write out the destination files - these seem to have a differing number of lines from the source (value in each newFiles entry) that you specify you want in individual destination files? So I'd suggest for each entry you read all the source file relevant to the next destination file, then output the destination rather than repeatedly opening a destination file. You could "gather" the lines in a StringBuilder/List etc - alternatively just write them directly out to the destination file (but only opening it once)
public void SplitFiles(int[] newFiles, string sourceFilePath, int processorCount)
{
string sourceDirectory = System.IO.Path.GetDirectoryName(sourceFilePath);
string sourceFileName = System.IO.Path.GetFileNameWithoutExtension(sourceFilePath);
string extension = System.IO.Path.GetExtension(sourceFilePath);
using (StreamReader Reader = new StreamReader(sourceFilePath))
{
for (int i = 0; i < newFiles.Length; i++)
{
string destinationFileNameWithExtension = string.Format("{0}{1}{2}", sourceFileName, i, extension);
string destinationFilePath = System.IO.Path.Combine(sourceDirectory, destinationFileNameWithExtension);
if (!File.Exists(destinationFilePath))
{
// Read all the lines relevant to this destination file
// and temporarily store them in memory
StringBuilder destinationText = new StringBuilder();
for (int x = 0; x < newFiles[i]; x++)
{
destinationText.Append(Reader.ReadLine());
}
DataWriter(destinationFilePath, destinationText.ToString());
}
else
{
return;
}
}
}
}
private static void DataWriter(string destinationFilePath, string content)
{
using (StreamWriter sr = new StreamWriter(destinationFilePath))
{
{
sr.Write(content);
}
}
}
I've recently had to do this for several hundred files under 2 GB each (up to 1.92 GB), and the fastest method I found (if you have the memory available) is StringBuilder. All the other methods I tried were painfully slow.
Please note that this is memory dependent. Adjust "CurrentPosition = 130000" accordingly.
string CurrentLine = String.Empty;
int CurrentPosition = 0;
int CurrentSplit = 0;
foreach (string file in Directory.GetFiles(#"C:\FilesToSplit"))
{
StringBuilder sb = new StringBuilder();
using (StreamReader sr = new StreamReader(file))
{
while ((CurrentLine = sr.ReadLine()) != null)
{
if (CurrentPosition == 130000) // Or whatever you want to split by.
{
using (StreamWriter sw = new StreamWriter(#"C:\FilesToSplit\SplitFiles\" + Path.GetFileNameWithoutExtension(file) + "-" + CurrentSplit + "." + Path.GetExtension(file)))
{
// Append this line too, so we don't lose it.
sb.Append(CurrentLine);
// Write the StringBuilder contents
sw.Write(sb.ToString());
// Clear the StringBuilder buffer, so it doesn't get too big. You can adjust this based on your computer's available memory.
sb.Clear();
// Increment the CurrentSplit number.
CurrentSplit++;
// Reset the current line position. We've found 130,001 lines of text.
CurrentPosition = 0;
}
}
else
{
sb.Append(CurrentLine);
CurrentPosition++;
}
}
}
// Reset the integers at the end of each file check, otherwise it can quickly go out of order.
CurrentPosition = 0;
CurrentSplit = 0;
}