I'm trying to write string data into a text file in a Windows Phone 8 app but the text file just would not be updated.
I'm writing with the codes below
public void update_file(Contact_List[] list) //Write to file
{
using (FileStream fs = new FileStream(#"contact_list.txt", FileMode.Open))
{
using (StreamWriter sw = new StreamWriter(fs))
{
for (int x = 0; x < list.Length; x++)
{
sw.WriteLine(list[x].first_name);
sw.WriteLine(list[x].last_name);
sw.WriteLine(list[x].number);
sw.WriteLine(list[x].email);
sw.WriteLine(list[x].company);
sw.WriteLine(list[x].favorite);
sw.WriteLine(list[x].group);
}
sw.Close();
}
fs.Close();
}
}
Where Contact_List is my custom struct which contains the following string:
public string first_name;
public string last_name;
public string email;
public string number;
public string company;
public string favorite;
public string group;
The program itself could be run without any error which including the reading and even during the program the written contents could be display in the list box but the written contents would never be updated in the actual file.
The reading part is the following
public class All_Contact : common_func //Counting number of lines in the file
{
public int count_lines()
{
int counter = 0;
var str = Application.GetResourceStream(new Uri(#"contact_list.txt", UriKind.Relative));
StreamReader sr = new StreamReader(str.Stream);
while (sr.ReadLine() != null)
{
counter++;
}
sr.Close();
sr.Dispose();
str.Stream.Close();
str.Stream.Dispose();
return counter;
}
public string[][] read_content (int ln) //Read and pick up the actual contents
{
string[][] temp = null;
int lines = ln;
temp = new string[lines / 7][];
var str = Application.GetResourceStream(new Uri(#"contact_list.txt", UriKind.Relative));
StreamReader sr = new StreamReader(str.Stream);
for (int x = 0; x < (lines / 7); x++)
{
temp[x] = new string[7];
for (int y = 0; y < 7; y++)
{
temp[x][y] = sr.ReadLine();
}
}
sr.Close();
sr.Dispose();
str.Stream.Close();
str.Stream.Dispose();
return temp;
}
I'm very new to programming with Windows Phone 8 application so I don't have any idea how are the things working in background therefore any detailed explanation will be appreciated.
Thank You
Related
How to do get the Total lines of the file when we are within in a StreamWriter scope.
Based on the total number of lines count I am writing some more lines at the end of the file.
I have tried the below code : But it throws an error message
The process cannot access the file ‘C:\a.txt ' because it is being used by another process.
var lineCount = File.ReadLines(outputFilePath).Count()
This is my Code
private string CreateAndPushFile(string fileName)
{
string outputFilePath = string.Format(#"{0}\{1}", “C:\\a.txt”, fileName);
using (StreamWriter output = new StreamWriter(outputFilePath))
{
// Creates the file header
string fileHeader =”kjhakljdhkjhkj”;
output.Write(fileHeader);
string batchControl = “1515151”; // This value comes from database
output.Write(batchControl);
// Here there is some other logic which will writes many lines to the File using foreach loop
string fileControl = “3123123”; // This value comes from database
output.WriteLine(fileControl);
// After this I need write a few more lines only if total number of lines in a File Total records multiple of 10
var lineCount = File.ReadLines(outputFilePath).Count(); // I am getting error here
int remainder;
Math.DivRem(lineCount, 10, out remainder);
for (int i = 1; i <= 10 - remainder; i++)
{
output.WriteLine(“9999999999999”);
}
}
}
private static void CreateAndPushFile(string outputFilePath) {
using (var output = new StreamWriter(outputFilePath)) {
// Creates the file header
var fileHeader = "kjhakljdhkjhkj";
output.Write(fileHeader);
var batchControl = "1515151"; // This value comes from database
output.Write(batchControl);
// Here there is some other logic which will writes many lines to the File using foreach loop
var fileControl = "3123123"; // This value comes from database
output.WriteLine(fileControl);
// After this I need write a few more lines only if total number of lines in a File Total records multiple of 10
}
var lineCount = TotalLines(outputFilePath); // I am getting error here
var remainder = lineCount % 10;
using (var output2 = new StreamWriter(outputFilePath, true)) { // second parameter is for append
for (var i = 0; i < 10 - remainder; i++) {
output2.WriteLine("9999999999999");
}
}
}
private static int TotalLines(string filePath) {
using (var reader = new StreamReader(filePath)) {
char[] buffer = new char[1024];
var lineCount = 0;
while (!reader.EndOfStream) {
var charsRead = reader.Read(buffer, 0, 1024);
lineCount += buffer.Take(charsRead).Count(character => character == '\n');
}
return lineCount;
}
}
I'm trying to read a string with StreamReader, so I don't know how to read it.
using System;
using System.Diagnostics;
using System.IO;
using System.Text;
namespace
{
class Program
{
static void Main(string[] args)
{
string itemCostsInput = "25.34\n10.99\n250.22\n21.87\n50.24\n15";
string payerCountInput = "8\n";
string individualCostInput = "52.24\n";
double individualCost = RestaurantBillCalculator.CalculateIndividualCost(reader2, totalCost);
Debug.Assert(individualCost == 54.14);
uint payerCount = RestaurantBillCalculator.CalculatePayerCount(reader3, totalCost);
Debug.Assert(payerCount == 9);
}
}
}
}
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.IO;
namespace as
{
public static class RestaurantBillCalculator
{
public static double CalculateTotalCost(StreamReader input)
{
// I want to read the input (not System.IO.StreamReader,
25.34
10.99
250.22
21.87
50.24
15
//below is what i tried..
int[] numbers = new int[6];
for (int i = 0; i < 5; i++)
{
numbers[int.Parse(input.ReadLine())]++;
}
for (int i = 0; i < 5; i++)
{
Console.WriteLine(numbers[i]);
}
return 0;
}
public static double CalculateIndividualCost(StreamReader input, double totalCost)
{
return 0;
}
public static uint CalculatePayerCount(StreamReader input, double totalCost)
{
return 0;
}
}
}
Even when I googled it, only file input/output came up with that phrase.
I want to get a simple string and read it.
int[] numbers = new int[6]; // The number at the index number
// take the given numbers
for (int i = 0; i < n; i++)
{
numbers[int. Parse(sr. ReadLine())]++;
}
I tried the above method, but it didn't work.
I just want to get the index and read the contents of itemCostsInput as it is. If I just execute Console.writeLine, String == System.IO.StreamReader
comes out I want to read and save the values of itemCostsInput respectively. I just want to do something like read.
I'm sorry I'm not good at English
I expected input Read
25.34
10.99
250.22
21.87
50.24
15
but console print System.IO.StreamReader
This lines are the ones causing (more) trouble I think:
for (int i = 0; i < 5; i++)
{
numbers[int.Parse(input.ReadLine())]++;
}
Should be
for (int i = 0; i < 5; i++)
{
numbers[i] = int.Parse(input.ReadLine());
}
But since you have a decimal input (in string format due to the streamreader), maybe numbers should be an array of decimals.
Also there are quite a few remarks about the use of StreamReader, since if the file doesn't have 5 or more lines, your program will also break. I let this here hoping will clarify something to you, though
Your code does not make sense in its current state.
Please read up on Streams.
Usually you'd get a stream from a file or from a network connection but not from a string.
You are confusing integer and double.
The double data type represents floating point numbers.
It seems to me that you just started programming and are missing out on most of the fundamentals.
First, convert your string input into a stream:
static System.IO.Stream GetStream(string input)
{
Stream stream = new MemoryStream();
StreamWriter writer = new StreamWriter(stream);
writer.Write(input);
writer.Flush();
stream.Position = 0;
return stream;
}
Now you can convert your input to a stream like this:
// ... code ...
string itemCostsInput = "25.34\n10.99\n250.22\n21.87\n50.24\n15";
var dataStream = GetStream(itemCostsInput);
// ... code ...
Now you that you converted your string input into a stream you can start to parse your data and extract the numbers:
static List<double> GetDoubleFromStream(Stream stream)
{
if (stream == null) {
return new List<double>();
}
const char NEWLINE = '\n';
List<double> result = new List<double>();
using (var reader = new StreamReader(stream))
{
// Continue until end of stream has been reached.
while (reader.Peek() > -1)
{
string temp = string.Empty;
// Read while not end of stream and char is not new line.
while (reader.Peek() != NEWLINE && reader.Peek() > -1) {
temp += (char)reader.Read();
}
// Perform another read operation
// to skip the current new line character
// and continue reading.
reader.Read();
// Parse data to double if valid.
if (!(string.IsNullOrEmpty(temp)))
{
double d;
// Allow decimal points and ignore culture.
if (double.TryParse(
temp,
NumberStyles.AllowDecimalPoint,
CultureInfo.InvariantCulture,
out d))
{
result.Add(d);
}
}
}
}
return result;
}
This would be your intermediate result:
Now you can convert your input to a stream like this:
// ... code ...
string itemCostsInput = "25.34\n10.99\n250.22\n21.87\n50.24\n15";
var dataStream = GetStream(itemCostsInput);
var result = GetDoubleFromStream(dataStream);
// ... code ...
This is a bit of a doozy and it's been a while since I worked with C#, so bear with me:
I'm running a jruby script to iterate through 900 files (5 Mb - 1500 Mb in size) to figure out how many dupes STILL exist within these (already uniq'd) files. I had little luck with awk.
My latest idea was to insert them into a local MongoDB instance like so:
db.collection('hashes').update({ :_id => hash}, { $inc: { count: 1} }, { upsert: true)
... so that later I could just query it like db.collection.where({ count: { $gt: 1 } }) to get all the dupes.
This is working great except it's been over 24 hours and at the time of writing I'm at 72,532,927 Mongo entries and growing.
I think Ruby's .each_line is bottlnecking the IO hardcore:
So what I'm thinking now is compiling a C# program which fires up a thread PER EACH FILE and inserts the line (md5 hash) into a Redis list.
From there, I could have another compiled C# program simply pop the values off and ignore the save if the count is 1.
So the questions are:
Will using a compiled file reader and multithreading the file reads significantly improve performance?
Is using Redis even necessary? With a tremendous amount of AWS memory, could I not just use the threads to fill some sort of a list atomically and proceed from there?
Thanks in advance.
Updated
New solution. Old solution. The main idea is to calculate dummy hashes(just sum of all chars in string) of each line and store it in Dictionary<ulong, List<LinePosition>> _hash2LinePositions. It's possible to have multiple hashes in the same stream and it solves by List in Dictionary Value. When the hashes are the same, we read and compare the strings from the streams. LinePosition is using for storing info about line - position in stream and its length. I don't have such huge files as you, but my tests shows that it works. Here is the full code:
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
public class Solution
{
struct LinePosition
{
public long Start;
public long Length;
public LinePosition(long start, long count)
{
Start = start;
Length = count;
}
public override string ToString()
{
return string.Format("Start: {0}, Length: {1}", Start, Length);
}
}
class TextFileHasher : IDisposable
{
readonly Dictionary<ulong, List<LinePosition>> _hash2LinePositions;
readonly Stream _stream;
bool _isDisposed;
public HashSet<ulong> Hashes { get; private set; }
public string Name { get; private set; }
public TextFileHasher(string name, Stream stream)
{
Name = name;
_stream = stream;
_hash2LinePositions = new Dictionary<ulong, List<LinePosition>>();
Hashes = new HashSet<ulong>();
}
public override string ToString()
{
return Name;
}
public void CalculateFileHash()
{
int readByte = -1;
ulong dummyLineHash = 0;
// Line start position in file
long startPosition = 0;
while ((readByte = _stream.ReadByte()) != -1) {
// Read until new line
if (readByte == '\r' || readByte == '\n') {
// If there was data
if (dummyLineHash != 0) {
// Add line hash and line position to the dict
AddToDictAndHash(dummyLineHash, startPosition, _stream.Position - 1 - startPosition);
// Reset line hash
dummyLineHash = 0;
}
}
else {
// Was it new line ?
if (dummyLineHash == 0)
startPosition = _stream.Position - 1;
// Calculate dummy hash
dummyLineHash += (uint)readByte;
}
}
if (dummyLineHash != 0) {
// Add line hash and line position to the dict
AddToDictAndHash(dummyLineHash, startPosition, _stream.Position - startPosition);
// Reset line hash
dummyLineHash = 0;
}
}
public List<LinePosition> GetLinePositions(ulong hash)
{
return _hash2LinePositions[hash];
}
public List<string> GetDuplicates()
{
List<string> duplicates = new List<string>();
foreach (var key in _hash2LinePositions.Keys) {
List<LinePosition> linesPos = _hash2LinePositions[key];
if (linesPos.Count > 1) {
duplicates.AddRange(FindExactDuplicates(linesPos));
}
}
return duplicates;
}
public void Dispose()
{
if (_isDisposed)
return;
_stream.Dispose();
_isDisposed = true;
}
private void AddToDictAndHash(ulong hash, long start, long count)
{
List<LinePosition> linesPosition;
if (!_hash2LinePositions.TryGetValue(hash, out linesPosition)) {
linesPosition = new List<LinePosition>() { new LinePosition(start, count) };
_hash2LinePositions.Add(hash, linesPosition);
}
else {
linesPosition.Add(new LinePosition(start, count));
}
Hashes.Add(hash);
}
public byte[] GetLineAsByteArray(LinePosition prevPos)
{
long len = prevPos.Length;
byte[] lineBytes = new byte[len];
_stream.Seek(prevPos.Start, SeekOrigin.Begin);
_stream.Read(lineBytes, 0, (int)len);
return lineBytes;
}
private List<string> FindExactDuplicates(List<LinePosition> linesPos)
{
List<string> duplicates = new List<string>();
linesPos.Sort((x, y) => x.Length.CompareTo(y.Length));
LinePosition prevPos = linesPos[0];
for (int i = 1; i < linesPos.Count; i++) {
if (prevPos.Length == linesPos[i].Length) {
var prevLineArray = GetLineAsByteArray(prevPos);
var thisLineArray = GetLineAsByteArray(linesPos[i]);
if (prevLineArray.SequenceEqual(thisLineArray)) {
var line = System.Text.Encoding.Default.GetString(prevLineArray);
duplicates.Add(line);
}
#if false
string prevLine = System.Text.Encoding.Default.GetString(prevLineArray);
string thisLine = System.Text.Encoding.Default.GetString(thisLineArray);
Console.WriteLine("PrevLine: {0}\r\nThisLine: {1}", prevLine, thisLine);
StringBuilder sb = new StringBuilder();
sb.Append(prevPos);
sb.Append(" is '");
sb.Append(prevLine);
sb.Append("'. ");
sb.AppendLine();
sb.Append(linesPos[i]);
sb.Append(" is '");
sb.Append(thisLine);
sb.AppendLine("'. ");
sb.Append("Equals => ");
sb.Append(prevLine.CompareTo(thisLine) == 0);
Console.WriteLine(sb.ToString());
#endif
}
else {
prevPos = linesPos[i];
}
}
return duplicates;
}
}
public static void Main(String[] args)
{
List<TextFileHasher> textFileHashers = new List<TextFileHasher>();
string text1 = "abc\r\ncba\r\nabc";
TextFileHasher tfh1 = new TextFileHasher("Text1", new MemoryStream(System.Text.Encoding.Default.GetBytes(text1)));
tfh1.CalculateFileHash();
textFileHashers.Add(tfh1);
string text2 = "def\r\ncba\r\nwet";
TextFileHasher tfh2 = new TextFileHasher("Text2", new MemoryStream(System.Text.Encoding.Default.GetBytes(text2)));
tfh2.CalculateFileHash();
textFileHashers.Add(tfh2);
string text3 = "def\r\nbla\r\nwat";
TextFileHasher tfh3 = new TextFileHasher("Text3", new MemoryStream(System.Text.Encoding.Default.GetBytes(text3)));
tfh3.CalculateFileHash();
textFileHashers.Add(tfh3);
List<string> totalDuplicates = new List<string>();
Dictionary<ulong, Dictionary<TextFileHasher, List<LinePosition>>> totalHashes = new Dictionary<ulong, Dictionary<TextFileHasher, List<LinePosition>>>();
textFileHashers.ForEach(tfh => {
foreach(var dummyHash in tfh.Hashes) {
Dictionary<TextFileHasher, List<LinePosition>> tfh2LinePositions = null;
if (!totalHashes.TryGetValue(dummyHash, out tfh2LinePositions))
totalHashes[dummyHash] = new Dictionary<TextFileHasher, List<LinePosition>>() { { tfh, tfh.GetLinePositions(dummyHash) } };
else {
List<LinePosition> linePositions = null;
if (!tfh2LinePositions.TryGetValue(tfh, out linePositions))
tfh2LinePositions[tfh] = tfh.GetLinePositions(dummyHash);
else
linePositions.AddRange(tfh.GetLinePositions(dummyHash));
}
}
});
HashSet<TextFileHasher> alreadyGotDuplicates = new HashSet<TextFileHasher>();
foreach(var hash in totalHashes.Keys) {
var tfh2LinePositions = totalHashes[hash];
var tfh = tfh2LinePositions.Keys.FirstOrDefault();
// Get duplicates in the TextFileHasher itself
if (tfh != null && !alreadyGotDuplicates.Contains(tfh)) {
totalDuplicates.AddRange(tfh.GetDuplicates());
alreadyGotDuplicates.Add(tfh);
}
if (tfh2LinePositions.Count <= 1) {
continue;
}
// Algo to get duplicates in more than 1 TextFileHashers
var tfhs = tfh2LinePositions.Keys.ToArray();
for (int i = 0; i < tfhs.Length; i++) {
var tfh1Positions = tfhs[i].GetLinePositions(hash);
for (int j = i + 1; j < tfhs.Length; j++) {
var tfh2Positions = tfhs[j].GetLinePositions(hash);
for (int k = 0; k < tfh1Positions.Count; k++) {
var tfh1Pos = tfh1Positions[k];
var tfh1ByteArray = tfhs[i].GetLineAsByteArray(tfh1Pos);
for (int m = 0; m < tfh2Positions.Count; m++) {
var tfh2Pos = tfh2Positions[m];
if (tfh1Pos.Length != tfh2Pos.Length)
continue;
var tfh2ByteArray = tfhs[j].GetLineAsByteArray(tfh2Pos);
if (tfh1ByteArray.SequenceEqual(tfh2ByteArray)) {
var line = System.Text.Encoding.Default.GetString(tfh1ByteArray);
totalDuplicates.Add(line);
}
}
}
}
}
}
Console.WriteLine();
if (totalDuplicates.Count > 0) {
Console.WriteLine("Total number of duplicates: {0}", totalDuplicates.Count);
Console.WriteLine("#######################");
totalDuplicates.ForEach(x => Console.WriteLine("{0}", x));
Console.WriteLine("#######################");
}
// Free resources
foreach (var tfh in textFileHashers)
tfh.Dispose();
}
}
If you have tons of ram... You guys are overthinking it...
var fileLines = File.ReadAllLines(#"c:\file.csv").Distinct();
Given an input file of text lines, I want duplicate lines to be identified and removed. Please show a simple snippet of C# that accomplishes this.
For small files:
string[] lines = File.ReadAllLines("filename.txt");
File.WriteAllLines("filename.txt", lines.Distinct().ToArray());
This should do (and will copy with large files).
Note that it only removes duplicate consecutive lines, i.e.
a
b
b
c
b
d
will end up as
a
b
c
b
d
If you want no duplicates anywhere, you'll need to keep a set of lines you've already seen.
using System;
using System.IO;
class DeDuper
{
static void Main(string[] args)
{
if (args.Length != 2)
{
Console.WriteLine("Usage: DeDuper <input file> <output file>");
return;
}
using (TextReader reader = File.OpenText(args[0]))
using (TextWriter writer = File.CreateText(args[1]))
{
string currentLine;
string lastLine = null;
while ((currentLine = reader.ReadLine()) != null)
{
if (currentLine != lastLine)
{
writer.WriteLine(currentLine);
lastLine = currentLine;
}
}
}
}
}
Note that this assumes Encoding.UTF8, and that you want to use files. It's easy to generalize as a method though:
static void CopyLinesRemovingConsecutiveDupes
(TextReader reader, TextWriter writer)
{
string currentLine;
string lastLine = null;
while ((currentLine = reader.ReadLine()) != null)
{
if (currentLine != lastLine)
{
writer.WriteLine(currentLine);
lastLine = currentLine;
}
}
}
(Note that that doesn't close anything - the caller should do that.)
Here's a version that will remove all duplicates, rather than just consecutive ones:
static void CopyLinesRemovingAllDupes(TextReader reader, TextWriter writer)
{
string currentLine;
HashSet<string> previousLines = new HashSet<string>();
while ((currentLine = reader.ReadLine()) != null)
{
// Add returns true if it was actually added,
// false if it was already there
if (previousLines.Add(currentLine))
{
writer.WriteLine(currentLine);
}
}
}
For a long file (and non consecutive duplications) I'd copy the files line by line building a hash // position lookup table as I went.
As each line is copied check for the hashed value, if there is a collision double check that the line is the same and move to the next. (
Only worth it for fairly large files though.
Here's a streaming approach that should incur less overhead than reading all unique strings into memory.
var sr = new StreamReader(File.OpenRead(#"C:\Temp\in.txt"));
var sw = new StreamWriter(File.OpenWrite(#"C:\Temp\out.txt"));
var lines = new HashSet<int>();
while (!sr.EndOfStream)
{
string line = sr.ReadLine();
int hc = line.GetHashCode();
if(lines.Contains(hc))
continue;
lines.Add(hc);
sw.WriteLine(line);
}
sw.Flush();
sw.Close();
sr.Close();
I am new to .net & have written something more simpler,may not be very efficient.Please fill free to share your thoughts.
class Program
{
static void Main(string[] args)
{
string[] emp_names = File.ReadAllLines("D:\\Employee Names.txt");
List<string> newemp1 = new List<string>();
for (int i = 0; i < emp_names.Length; i++)
{
newemp1.Add(emp_names[i]); //passing data to newemp1 from emp_names
}
for (int i = 0; i < emp_names.Length; i++)
{
List<string> temp = new List<string>();
int duplicate_count = 0;
for (int j = newemp1.Count - 1; j >= 0; j--)
{
if (emp_names[i] != newemp1[j]) //checking for duplicate records
temp.Add(newemp1[j]);
else
{
duplicate_count++;
if (duplicate_count == 1)
temp.Add(emp_names[i]);
}
}
newemp1 = temp;
}
string[] newemp = newemp1.ToArray(); //assigning into a string array
Array.Sort(newemp);
File.WriteAllLines("D:\\Employee Names.txt", newemp); //now writing the data to a text file
Console.ReadLine();
}
}
I have a file with the following text inside
mimi,m,70
tata,f,60
bobo,m,100
soso,f,30
I did the reading from file thing and many many other methods and functions, but how I can get the best male name and his grade according to the grade.
here is the code I wrote. Hope it's not so long
using System;
using System.Collections.Generic;
using System.Text;
using System.IO;
namespace practice_Ex
{
class Program
{
public static int[] ReadFile(string FileName, out string[] Name, out char[] Gender)
{
Name = new string[1];
int[] Mark = new int[1];
Gender = new char[1];
if (File.Exists(FileName))
{
FileStream Input = new FileStream(FileName, FileMode.Open, FileAccess.Read);
StreamReader SR = new StreamReader(Input);
string[] Current;
int Counter = 0;
string Str = SR.ReadLine();
while (Str != null)
{
Current = Str.Split(',');
Name[Counter] = Current[0];
Mark[Counter] = int.Parse(Current[2]);
Gender[Counter] = char.Parse(Current[1].ToUpper());
Counter++;
Array.Resize(ref Name, Counter + 1);
Array.Resize(ref Mark, Counter + 1);
Array.Resize(ref Gender, Counter + 1);
Str = SR.ReadLine();
}
}
return Mark;
}
public static int MostFreq(int[] M, out int Frequency)
{
int Counter = 0;
int Frequent = 0;
Frequency = 0;
for (int i = 0; i < M.Length; i++)
{
Counter = 0;
for (int j = 0; j < M.Length; j++)
if (M[i] == M[j])
Counter++;
if (Counter > Frequency)
{
Frequency = Counter;
Frequent = M[i];
}
}
return Frequent;
}
public static int Avg(int[] M)
{
int total = 0;
for (int i = 0; i < M.Length; i++)
total += M[i];
return total / M.Length;
}
public static int AvgCond(char[] G, int[] M, char S)
{
int total = 0;
int counter = 0;
for (int i = 0; i < G.Length; i++)
if (G[i] == S)
{
total += M[i];
counter++;
}
return total / counter;
}
public static int BelowAvg(int[] M, out int AboveAvg)
{
int Bcounter = 0;
AboveAvg = 0;
for (int i = 0; i < M.Length; i++)
{
if (M[i] < Avg(M))
Bcounter++;
else
AboveAvg++;
}
return Bcounter;
}
public static int CheckNames(string[] Name, char C)
{
C = char.Parse(C.ToString().ToLower());
int counter = 0;
string Str;
for (int i = 0; i < Name.Length - 1; i++)
{
Str = Name[i].ToLower();
if (Str[0] == C || Str[Str.Length - 1] == C)
counter++;
}
return counter;
}
public static void WriteFile(string FileName, string[] Output)
{
FileStream FS = new FileStream(FileName, FileMode.OpenOrCreate, FileAccess.Write);
StreamWriter SW = new StreamWriter(FS);
for (int i = 0; i < Output.Length; i++)
SW.WriteLine(Output[i]);
}
static void Main(string[] args)
{
int[] Mark;
char[] Gender;
string[] Name;
string[] Output = new string[8];
int Frequent, Frequency, AvgAll, MaleAvg, FemaleAvg, BelowAverage, AboveAverage, NamesCheck;
Mark = ReadFile("c:\\IUST1.txt", out Name, out Gender);
Frequent = MostFreq(Mark, out Frequency);
AvgAll = Avg(Mark);
MaleAvg = AvgCond(Gender, Mark, 'M');
FemaleAvg = AvgCond(Gender, Mark, 'F');
BelowAverage = BelowAvg(Mark, out AboveAverage);
NamesCheck = CheckNames(Name, 'T');
Output [0]= "Frequent Mark = " + Frequent.ToString();
Output [1]= "Frequency = " + Frequency.ToString();
Output [2]= "Average Of All = " + AvgAll.ToString();
Output [3]= "Average Of Males = " + MaleAvg.ToString();
Output [4]= "Average Of Females = " + FemaleAvg.ToString();
Output [5]= "Below Average = " + BelowAverage.ToString();
Output [6]= "Above Average = " + AboveAverage.ToString();
Output [7]= "Names With \"T\" = " + NamesCheck.ToString();
WriteFile("c:\\Output.txt", Output);
}
}
}
Well, I like LINQ (update: excluded via comments) for querying, especially if I can do it without buffering the data (so I can process a huge file efficiently). For example below (update: removed LINQ); note the use of iterator blocks (yield return) makes this fully "lazy" - only one record is held in memory at a time.
This also shows separation of concerns: one method deals with reading a file line by line; one method deals with parsing a line into a typed data record; one (or more) method(s) work with those data record(s).
using System;
using System.Collections.Generic;
using System.IO;
enum Gender { Male, Female, Unknown }
class Record
{
public string Name { get; set; }
public Gender Gender { get; set; }
public int Score { get; set; }
}
static class Program
{
static IEnumerable<string> ReadLines(string path)
{
using (StreamReader reader = File.OpenText(path))
{
string line;
while ((line = reader.ReadLine()) != null)
{
yield return line;
}
}
}
static IEnumerable<Record> Parse(string path)
{
foreach (string line in ReadLines(path))
{
string[] segments = line.Split(',');
Gender gender;
switch(segments[1]) {
case "m": gender = Gender.Male; break;
case "f": gender = Gender.Female; break;
default: gender = Gender.Unknown; break;
}
yield return new Record
{
Name = segments[0],
Gender = gender,
Score = int.Parse(segments[2])
};
}
}
static void Main()
{
Record best = null;
foreach (Record record in Parse("data.txt"))
{
if (record.Gender != Gender.Male) continue;
if (best == null || record.Score > best.Score)
{
best = record;
}
}
Console.WriteLine("{0}: {1}", best.Name, best.Score);
}
}
The advantage of writing things as iterators is that you can easily use either streaming or buffering - for example, you can do:
List<Record> data = new List<Record>(Parse("data.txt"));
and then manipulate data all day long (assuming it isn't too large) - useful for multiple aggregates, mutating data, etc.
This question asks how to find a maximal element by a certain criterion. Combine that with Marc's LINQ part and you're away.
In the real world, of course, these would be records in a database, and you would use one line of SQL to select the best record, ie:
SELECT Name, Score FROM Grades WHERE Score = MAX(Score)
(This returns more than one record where there's more than one best record, of course.) This is an example of the power of using the right tool for the job.
I think the fastest and least-code way would be to transform the txt to xml and then use Linq2Xml to select from it. Here's a link.
Edit: That might be more work than you'd like to do. Another option is to create a class called AcademicRecord that has properties for the persons name gender etc. Then when you read the file, add to a List for each line in the file. Then use a Sort predicate to sort the list; the highest record would then be the first one in the list. Here's a link.
Your assignment might have different requirements, but if you only want to get "best male name and grade" from a file you described, a compact way is:
public String FindRecord()
{
String[] lines = File.ReadAllLines("MyFile.csv");
Array.Sort(lines, CompareByBestMaleName);
return lines[0];
}
int SortByBestMaleName(String a, String b)
{
String[] ap = a.Split();
String[] bp = b.Split();
// Always rank male higher
if (ap[1] == "m" && bp[1] == "f") { return 1; }
if (ap[1] == "f" && bp[1] == "m") { return -1; }
// Compare by score
return int.Parse(ap[2]).CompareTo(int.Parse(bp[2]));
}
Note that this is neither fast nor robust.