Read a TXT file and transform it into a SQL script - c#

I have a txt file:
LoginId; No_Intervenant
EF2KBT0; 1003820030
ENHD0KE; 1003820129
E9PM7EP; 1003820153
EFT10OO; 1003820218
I need to create another txt file, that contains an sql UPDATE script from this information like:
UPDATE Contact
Set
Contact.No_Intervenant = '1003820030'
where
ISNULL (Contact.LoginId, '') = 'ER7OZXZ';
I only got this result using a Stringbuilder method, but performing hardcode. What I would like is for the header to be added automatically.
public Form1()
{
InitializeComponent();
}
private static void AddSqlCommand(StringBuilder sql, string[] columns, string[] types, string[] values)
{
sql.AppendLine("UPDATE Contact");
sql.AppendLine("SET");
//skip LoginId columns
for (int i = 1; i < columns.Length; i++)
{
switch (types[i].Trim())
{
case "int":
sql.Append($" Contact.{columns[i].Trim()} = {values[i]}");
//sql.Append($" Contact.{columns[0].TrimStart() } = {values[i]}");
break;
default:
sql.Append($" Contact.No_Intervenant = '{values[i]}'");
break;
}
if (columns.Length > 1 && i != columns.Length - 1)
{
sql.Append(",");
}
sql.AppendLine();
}
sql.AppendLine("WHERE");
sql.AppendLine($" ISNULL(Contact.LoginId, '') = '{values[0]}';");
sql.AppendLine();
}
private static StringBuilder GenerateSqlScript(string[] fileContent)
{
var sqlCommand = new StringBuilder();
string[] types = fileContent[0].Split(';');
string[] columns = fileContent[1].Split(';');
//skip the first line (header)
for (int i = 2; i < fileContent.Length; i++)
{
string[] values = fileContent[i].Split(';');
if (values.Length >= 1)
{
AddSqlCommand(sqlCommand, columns, types, values);
}
}
return sqlCommand;
}
How could I get and Add the header automaticaly? Because I'll probably have to do this for longer files, with more columns and more Update lines for other files, and I would not like to hardcode all the headers of the files, like the example I'll have to do next:
Header:
No_Intervenant;First_Name;Last_Name;Role_SE;EMail;Phone;Extension;Statut;Address_1;Address_2;Zip;CPF;Inscription_Particulier;DHM_Stat_Part;Date_via_ClicSeQur;Last_Update;
Data:
1003820030;NOEL;SANTOS;Particulier;;;;Actif;1528 STREET;VAL-D''OR CA;AAA 5T9;123456789;Actif;;2016-07-19 09:49:43;2019-02-08 14:24:19;

I believe you only need a couple of simple changes to your string interpolation, see below. If you have a update that affects multiple tables you should append your table name to the column in the array.
Here is how I tested, according to your logic the first row of the file should contain your data types (the example you posted doesn't). So either your logic is wrong or the data sample is wrong. It works with the test code.
private void button4_Click(object sender, EventArgs e)
{
var line = new List<string>();
line.Add("string;string");
line.Add("LoginId; No_Intervenant");
line.Add("EF2KBT0; 1003820030");
line.Add("ENHD0KE; 1003820129");
line.Add("E9PM7EP; 1003820153");
line.Add("EFT10OO; 1003820218");
var fileContent = line.ToArray();
var sqlCommand = new StringBuilder();
string[] types = fileContent[0].Split(';');
string[] columns = fileContent[1].Split(';');
//skip the first line (header)
for (int i = 2; i < fileContent.Length; i++)
{
string[] values = fileContent[i].Split(';');
if (values.Length >= 1)
{
AddSqlCommand(sqlCommand, columns, types, values, "client");
}
}
}
Edited Fuction:
private static void AddSqlCommand(StringBuilder sql, string[] columns, string[] types, string[] values, string table)
{
sql.AppendLine($"UPDATE {table}");
sql.AppendLine("SET");
//skip LoginId columns
for (int i = 1; i < columns.Length; i++)
{
switch (types[i].Trim())
{
case "int":
sql.Append($" {columns[i].Trim()} = {values[i]}");
break;
default:
sql.Append($" {columns[i].Trim()} = '{values[i]}'");
break;
}
if (columns.Length > 1 && i != columns.Length - 1)
{
sql.Append(",");
}
sql.AppendLine();
}
sql.AppendLine("WHERE");
sql.AppendLine($" ISNULL({columns[0].Trim()}, '') = '{values[0]}';");
sql.AppendLine();
}
}

I believe in this case the 'MERGE' will be a perfect solution.
It could be something like:
-- HEADER --
MERGE [your table] as trg
USING (VALUES
-- DATA FROM THE FILE --
(id, intervenant),
(id, intervenant)
-- FOOTER
) as src(id, intervenant)
ON [your logic from the WHERE statement]
WHEN MATCHED UPDATE SET
trg.[your column] = src.[your column];

The data from the source file can be loaded into a DataTable object, with UPDATE statements then constructed from this. The header names from the file are obtained from the Columns property of the DataTable, then used to specify the columns used in the UPDATE script. In the example below, additional lines and the GO separator are added in the script for formatting. These aren't essential and can be removed if you prefer to.
using System.Linq;
using System.Data;
using System.IO;
using System.Text;
//get source file
string fullFileName = #"C:\Input Folder\SourceFile.txt";
DataTable dt = new DataTable();
StringBuilder sb = new StringBuilder();
//output .sql script
string sqlScript = #"C:\Output Folder\UpdateScript.SQL";
using (StreamReader sr = new StreamReader(fullFileName))
{
string firstLine = sr.ReadLine();
string[] headers = firstLine.Split(';');
//define columns for data table
foreach (string h in headers)
{
dt.Columns.Add(h);
}
int columnCount = dt.Columns.Count;
string line = sr.ReadLine();
while (line != null)
{
string[] fields = line.Split(';');
int currentLength = fields.Count();
if (currentLength < columnCount)
{
while (currentLength < columnCount)
{
line += sr.ReadLine();
currentLength = line.Split(';').Count();
}
fields = line.Split(';');
}
//load data table
dt.Rows.Add(fields);
line = sr.ReadLine();
}
foreach (DataRow dr in dt.Rows)
{
sb.AppendLine("UPDATE Contact SET " + dt.Columns[1] + " = '" + dr[1] +
"' WHERE ISNULL(" + dt.Columns[0] + ", '') = '" + dr[0] + "'");
//extra lines and GO batch separator added between UPDATE statements for formating
sb.AppendLine(Environment.NewLine);
sb.AppendLine("GO");
sb.AppendLine(Environment.NewLine);
}
//output UPDATE commands as .sql script file
File.WriteAllText(sqlScript, sb.ToString());
}

Just to post an update of the code that I updated and that at the moment works perfectly. Thank you all for the answers and for helping me.
using System;
using System.IO;
using System.Text;
using System.Windows.Forms;
namespace GenererScriptSQL
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
}
private static void AddSqlCommand(StringBuilder sql, string[] columns, string[] types, string[] values)
{
sql.AppendLine("UPDATE Contact");
sql.AppendLine("SET");
//skip LoginId columns
for (int i = 1; i < columns.Length; i++)
{
switch (types[i].Trim())
{
case "int":
sql.Append($" Contact.{columns[i].Trim()} = {values[i]}");
break;
default:
sql.Append($" Contact.{columns[i].Trim()} = '{values[i]}'");
break;
}
if (columns.Length > 1 && i != columns.Length - 1)
{
sql.Append(",");
}
sql.AppendLine();
}
sql.AppendLine();
sql.AppendLine("WHERE");
sql.AppendLine();
sql.AppendLine($" Contact.{columns[0].Trim()} = '{values[0]}'");
sql.AppendLine();
}
private static StringBuilder GenerateSqlScript(string[] fileContent)
{
var sqlCommand = new StringBuilder();
string[] types = fileContent[0].Split(';');
string[] columns = fileContent[0].Split(';');
//skip the first line(header)
for (int i = 1; i < fileContent.Length; i++)
{
string[] values = fileContent[i].Split(';');
if (values.Length >= 1)
{
AddSqlCommand(sqlCommand, columns, types, values);
}
}
return sqlCommand;
}
private void buttonCreateSqlFile_Click(object sender, EventArgs e)
{
try
{
if (IsFileSelected())
{
string[] fileContent = File.ReadAllLines(textBoxFile.Text);
if (fileContent != null)
{
StringBuilder sqlCommand = GenerateSqlScript(fileContent);
if (!string.IsNullOrWhiteSpace(sqlCommand.ToString()))
{
WriteSqlFile(sqlCommand);
}
}
}
else
{
MessageBox.Show("Sélectionner le fichier de chargement.");
}
}
catch (Exception ex)
{
MessageBox.Show(ex.ToString());
}
}
private void buttonSelectFile_Click(object sender, EventArgs e)
{
try
{
using (var fileBrowser = new OpenFileDialog())
{
if (fileBrowser.ShowDialog() == DialogResult.OK)
{
textBoxFile.Text = fileBrowser.FileName;
}
}
}
catch (Exception ex)
{
MessageBox.Show(ex.ToString());
}
}
private bool IsFileSelected()
{
return !string.IsNullOrWhiteSpace(textBoxFile.Text) && File.Exists(textBoxFile.Text);
}
private void WriteSqlFile(StringBuilder sqlCommand)
{
var fileInfo = new FileInfo(textBoxFile.Text);
string BackupDate = fileInfo.Name + "_" + DateTime.Now.ToString("yyyy-MM-dd_HH-mm") + "_Update" + ".sql";
string sqlFilePath = Path.Combine(fileInfo.Directory.FullName, BackupDate);
if (File.Exists(sqlFilePath))
{
File.Delete(sqlFilePath);
}
File.WriteAllText(sqlFilePath, sqlCommand.ToString());
MessageBox.Show($#" Le fichier sql a été générée! {sqlFilePath}");
}
}
}

Related

System.indexoutofrangeexception: 'cannot find column 1'

I have a program to parse a CSV file from local filesystem to a specified SQL Server table.
Now when i execute the program i get error :
System.IndexOutOfRangeException: 'Cannot find column 1' exception on the line where i the program attempts to populate the datatable.
On closer inspection the error shows that its emanating from row number 3 as shown on this link :
CSV_ERROR
This is how i am reading and saving the CSV file :
static void Main(string[] args)
{
var absPath = #"C:\Users\user\Documents\Projects\MastercardSurveillance\fbc_mc_all_cards.csv";
ProcessFile();
void ProcessFile()
{
string realPath = #"C:\Users\user\Documents\CSV";
string appLog = "CSVERRORS";
var logPath = realPath + Convert.ToString(appLog) + DateTime.Today.ToString("dd -MM-yy") + ".txt";
if (!File.Exists(logPath))
{
File.Create(logPath).Dispose();
}
var dt = GetDATATable();
if (dt == null)
{
return;
}
if (dt.Rows.Count == 0)
{
using (StreamWriter sw = File.AppendText(logPath))
{
sw.WriteLine("No rows imported after reading file " + absPath);
sw.Flush();
sw.Close();
}
return;
}
ClearData();
InsertDATA();
}
DataTable GetDATATable()
{
var FilePath = absPath;
string TableName = "Cards";
string realPath = #"C:\Users\user\Documents\CSV";
string appLog = "CSVERRORS";
var logPath = realPath + Convert.ToString(appLog) + DateTime.Today.ToString("dd -MM-yy") + ".txt";
if (!File.Exists(logPath))
{
File.Create(logPath).Dispose();
}
var dt = new DataTable(TableName);
using (var csvReader = new TextFieldParser(FilePath))
{
csvReader.SetDelimiters(new string[] { "," });
csvReader.HasFieldsEnclosedInQuotes = true;
var readFields = csvReader.ReadFields();
if (readFields == null)
{
using (StreamWriter sw = File.AppendText(logPath))
{
sw.WriteLine("Could not read header fields for file " + FilePath);
sw.Flush();
sw.Close();
}
return null;
}
foreach (var dataColumn in readFields.Select(column => new DataColumn(column, typeof(string)) { AllowDBNull = true, DefaultValue = string.Empty }))
{
dt.Columns.Add(dataColumn);
}
while (!csvReader.EndOfData)
{
var data = csvReader.ReadFields();
if (data == null)
{
using (StreamWriter sw = File.AppendText(logPath))
{
sw.WriteLine(string.Format("Could not read fields on line {0} for file {1}", csvReader.LineNumber, FilePath));
sw.Flush();
sw.Close();
}
continue;
}
var dr = dt.NewRow();
for (var i = 0; i < data.Length; i++)
{
if (!string.IsNullOrEmpty(data[i]))
{
dr[i] = data[i];
}
}
dt.Rows.Add(dr);
}
}
return dt;
}
void ClearData()
{
string SqlSvrConn = #"Server=XXXXXX-5QFK4BL\MSDEVOPS;Database=McardSurveillance;Trusted_Connection=True;MultipleActiveResultSets=true;";
using (var sqlConnection = new SqlConnection(SqlSvrConn))
{
sqlConnection.Open();
// Truncate the live table
using (var sqlCommand = new SqlCommand(_truncateLiveTableCommandText, sqlConnection))
{
sqlCommand.ExecuteNonQuery();
}
}
}
void InsertDATA()
{
string SqlSvrConn = #"Server=XXXXXX-5QFK4BL\MSDEVOPS;Database=McardSurveillance;Trusted_Connection=True;MultipleActiveResultSets=true;";
DataTable table = GetDATATable();
using (var sqlBulkCopy = new SqlBulkCopy(SqlSvrConn))
{
sqlBulkCopy.DestinationTableName = "dbo.Cards";
for (var count = 0; count < table.Columns.Count; count++)
{
sqlBulkCopy.ColumnMappings.Add(count, count);
}
sqlBulkCopy.WriteToServer(table);
}
}
}
How can i identify and possibly exclude the extra data columns being returned from the CSV file?
It appears there is a mismatch between number of columns in datatable and number of columns being read from the CSV file.
Im not sure however how i can account for this with my logic. For now i did not want to switch to using a CSV parse package but rather i need insight on how i can remove the extra column or rather ensure that the splitting takes account of all possible dubious characters.
For clarity i have a copy of the CSV file here :
CSV_FILE

How to access csv file and insert data into csv

I am trying to insert data into csv file. I tried using XLWorkbook reference to access and insert data but I know that XLWorkbook can only support extension which are xlsx,xslm,xltx and xltm.
I am trying to find something similar to what I am trying to achieve through which I can insert data into specified column in csv file. I have used XLWorkbook for some other purpose but I am not aware to what I can use when I have to use csv.
//Accessing the csv file where I am trying to insert data.
string rootPath = Path.GetDirectoryName(Assembly.GetExecutingAssembly().CodeBase);
string filelocation = #"\csv\TestData.csv";
string location = rootPath + filelocation;
XLWorkbook workbook = new XLWorkbook(pathfile);
IXLWorksheet worksheet = workbook.Worksheet("Sheet1");
//Insert data after first row as first row contains column header
int lastrow = worksheet.LastRowUsed().RowNumber() + 1;
//through previous function I am trying get data from database and insert those data into csv cells
worksheet.Cell(String.Format("B{0}", lastrow)).Value = dummydata.FirstName;
worksheet.Cell(String.Format("C{0}", lastrow)).Value = dummydata.LastName;
worksheet.Cell(String.Format("D{0}", lastrow)).Value = dummydata.Address1;
worksheet.Cell(String.Format("E{0}", lastrow)).Value = dummydata.Address2;
worksheet.Cell(String.Format("F{0}", lastrow)).Value = dummydata.City;
worksheet.Cell(String.Format("G{0}", lastrow)).Value = dummydata.StateProvinceCode;
worksheet.Cell(String.Format("H{0}", lastrow)).Value = dummydata.ZipCode;
worksheet.Cell(String.Format("I{0}", lastrow)).Value = dummydata.Country;
worksheet.Cell(String.Format("J{0}", lastrow)).Value = dummydata.HomePhone;
worksheet.Cell(String.Format("L{0}", lastrow)).Value = dummydata.HomePhone;
worksheet.Cell(String.Format("M{0}", lastrow)).Value = dummydata.CellPhone;
worksheet.Cell(String.Format("T{0}", lastrow)).Value = dummydata.Email;
worksheet.Cell(String.Format("U{0}", lastrow)).Value = dummydata.Country;
//After inserting save the file
workbook.Save();
You can simply copy and use this code as is. It should resolve your issues.
Here's the class I developed to replace and/or add csv cells:
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
namespace CSVManager
{
public class CSVWorker
{
private string m_FileName = string.Empty;
public CSVWorker(string fileName)
{
m_FileName = fileName;
}
public void AddCells(int row, int column, string newValue)
{
var encoding = Encoding.GetEncoding("iso-8859-1");
var csvLines = File.ReadAllLines(m_FileName, encoding);
if (row < csvLines.Length)
{
ReplaceCells(row, column, newValue);
}
else
{
using (FileStream stream = new FileStream(m_FileName, FileMode.Create))
{
using (StreamWriter writer = new StreamWriter(stream, encoding))
{
foreach (var line in csvLines)
{
writer.WriteLine(line);
}
int blankLines = row - csvLines.Length - 1;
for (int i = 0; i < blankLines; i++)
{
writer.WriteLine("");
}
string blankCols = string.Empty;
for (int i = 0; i < column-1; i++)
{
blankCols += ',';
}
writer.WriteLine(blankCols + newValue);
}
}
}
}
public void ReplaceCells(int row, int column, string newValue)
{
var encoding = Encoding.GetEncoding("iso-8859-1");
var csvLines = File.ReadAllLines(m_FileName, encoding);
for (int i = 0; i < csvLines.Length; i++)
{
//var values = csvLines[i].Split(',');
List <string> values = csvLines[i].Split(',').ToList();
if (i == row)
{
if (column < values.Count)
{
values[column] = newValue;
}
else
{
while (values.Count < column - 1)
{
values.Append(",");
}
values.Append(newValue);
}
using (FileStream stream = new FileStream(m_FileName, FileMode.Create))
{
using (StreamWriter writer = new StreamWriter(stream, encoding))
{
for (int currentLine = 0; currentLine < csvLines.Length; ++currentLine)
{
if (currentLine == i)
{
writer.WriteLine(string.Join(",", values));
}
else
{
writer.WriteLine(csvLines[currentLine]);
}
}
writer.Close();
}
stream.Close();
break;
}
}
}
}
}
}
Here's how I used it:
namespace CSVManager
{
class Program
{
static void Main(string[] args)
{
string fileName = #"C:\Users\mklig\Documents\TestCsv.csv";
CSVWorker csvWorker = new CSVWorker(fileName);
int row = 4;
int col = 4;
string newVal = "success";
//csvWorker.ReplaceCells(row, col, newVal);
csvWorker.AddCells(row, col, newVal);
}
}
}

adding uers to groups in sharepoint

I'm trying to add a user to a sharepoint group based on data from a csv file. I hope that the code marked in bold might be the error.
1.User x=web.Ensureuser("domain\logonname") function--> shows the following error when tried to print any of its file like (x.Title, x.email) --> "The fiels is not assigned" error.
2.Execytequery()--> "The given key is not fount in the dictionary" error.
Please help me with this.
static void Main(string[] args)
{
DataTable dtErrors = new DataTable();
dtErrors.Columns.Add("Links");
dtErrors.Columns.Add("Message");
DataRow drOutputError = dtErrors.NewRow();
DataTable dtCsv = csvToDataTable(System.Configuration.ConfigurationSettings.AppSettings["FilePath"].ToString(), true);
string url = string.Empty;
try
{
foreach (DataRow drCSV in dtCsv.Rows)
{
try
{
url = drCSV[0].ToString();
string grpName = drCSV[1].ToString();
string users = drCSV[2].ToString();
string[] users1 = users.Split(';');
Console.WriteLine("URL picked from CSV: " + url);
using (ClientContext context = new ClientContext(url))
{
Web web = context.Web;
GroupCollection groupColl = web.SiteGroups;
context.Load(groupColl, groups => groups.Include(group => group.Title, group => group.Id));
context.ExecuteQuery();
Console.WriteLine("Groups Count: " + groupColl.Count);
foreach (Group grp in groupColl)
{
try
{
int grpId = grp.Id;
Console.WriteLine("SiteURL: " + url);
Console.WriteLine("Group Name: " + grpName);
//For test purpose
Console.WriteLine(grp.Title);
if (grpName == grp.Title)
{
Console.WriteLine("Match found");
for (int i = 1; i < users1.Length; i++)
{
string temp = users1[i].Remove(0,8);
Console.WriteLine(temp);
**User user = web.EnsureUser(temp);**
Console.WriteLine(user);
addUsersToGroup(grpId, url, user);
}
break;
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message.ToString());
drOutputError["Links"] = url;
drOutputError["Message"] = ex.Message.ToString();
dtErrors.Rows.Add(drOutputError);
drOutputError = dtErrors.NewRow();
}
}
}
}
catch (Exception ex)
{
drOutputError["Links"] = url;
drOutputError["Message"] = ex.Message.ToString();
dtErrors.Rows.Add(drOutputError);
drOutputError = dtErrors.NewRow();
}
}
}
catch (Exception ex)
{
drOutputError["Links"] = url;
drOutputError["Message"] = ex.Message.ToString();
dtErrors.Rows.Add(drOutputError);
drOutputError = dtErrors.NewRow();
}
ToCSVError(dtErrors, ",", true);
Console.WriteLine("=======================Completed==================");
Console.ReadLine();
}
public static void addUsersToGroup(int grpId, string url,User user)
{
try
{
using (ClientContext clientContext = new ClientContext(url))
{
Web web = clientContext.Web;
Group testingOwnersGroup = web.SiteGroups.GetById(grpId);
clientContext.Load(testingOwnersGroup);
clientContext.ExecuteQuery();
Console.WriteLine(testingOwnersGroup.Title);
UserCollection collUser = testingOwnersGroup.Users;
collUser.AddUser(user);
clientContext.Load(collUser);
clientContext.Load(testingOwnersGroup);
**clientContext.ExecuteQuery();**
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message.ToString());
}
}
public static DataTable csvToDataTable(string file, bool isRowOneHeader)
{
DataTable csvDataTable = new DataTable();
//no try/catch - add these in yourselfs or let exception happen
String[] csvData = System.IO.File.ReadAllLines(file);
//if no data in file ‘manually’ throw an exception
if (csvData.Length == 0)
{
// throw new Exception(CSV File Appears to be Empty”);
}
String[] headings = csvData[0].Split(',');
int index = 0; //will be zero or one depending on isRowOneHeader
if (isRowOneHeader) //if first record lists headers
{
index = 1; //so we won’t take headings as data
//for each heading
for (int i = 0; i < headings.Length; i++)
{
//replace spaces with underscores for column names
headings[i] = headings[i].Replace(" ", "_");
//add a column for each heading
csvDataTable.Columns.Add(headings[i], typeof(string));
}
}
else //if no headers just go for col1, col2 etc.
{
for (int i = 0; i < headings.Length; i++)
{
//create arbitary column names
csvDataTable.Columns.Add("col" + (i + 1).ToString(), typeof(string));
}
}
//populate the DataTable
for (int i = index; i < csvData.Length; i++)
{
//create new rows
DataRow row = csvDataTable.NewRow();
for (int j = 0; j < headings.Length; j++)
{
//fill them
row[j] = csvData[i].Split(',')[j];
}
//add rows to over DataTable
csvDataTable.Rows.Add(row);
}
//return the CSV DataTable
return csvDataTable;
}
static void ToCSVError(DataTable table, string delimiter, bool includeHeader)
{
StringBuilder result = new StringBuilder();
if (includeHeader)
{
foreach (DataColumn column in table.Columns)
{
result.Append(column.ColumnName);
result.Append(delimiter);
}
result.Remove(--result.Length, 0);
result.Append(Environment.NewLine);
}
foreach (DataRow row in table.Rows)
{
foreach (object item in row.ItemArray)
{
if (item is System.DBNull)
result.Append(delimiter);
else
{
string itemAsString = item.ToString();
// Double up all embedded double quotes
itemAsString = itemAsString.Replace("\"", "\"\"");
// To keep things simple, always delimit with double-quotes
// so we don't have to determine in which cases they're necessary
// and which cases they're not.
itemAsString = "\"" + itemAsString + "\"";
result.Append(itemAsString + delimiter);
}
}
result.Remove(--result.Length, 0);
result.Append(Environment.NewLine);
}
using (StreamWriter writer = new StreamWriter(System.Configuration.ConfigurationSettings.AppSettings["ErrorLog"].ToString(), true))
{
writer.Write(result.ToString());
}
}
}
}

SqlDataReader change column names when exporting to csv

I have a query that gets report data via a SqlDataReader and sends the SqlDataReader to a method that exports the content out to a .CSV file; however, the column names are showing up in the .CSV file the way that they appear in the database which is not ideal.
I do not want to alter the query itself (changing the names to have spaces) because this query is called in another location where it maps to an object and spaces would not work. I would prefer not to create a duplicate query because maintenance could be problematic. I also do not want to modify the method that writes out the .CSV as this is a method that is globally used.
Can I modify the column names after I fill the data reader but before I send it to the .CSV method? If so, how?
If I can't do it this way, could I do it if it was a DataTable instead?
Here is the general flow:
public static SqlDataReader RunMasterCSV(Search search)
{
SqlDataReader reader = null;
using (Network network = new Network())
{
using (SqlCommand cmd = new SqlCommand("dbo.MasterReport"))
{
cmd.CommandType = CommandType.StoredProcedure;
//Parameters here...
network.FillSqlReader(cmd, ref reader);
<-- Ideally would like to find a solution here -->
return reader;
}
}
}
public FileInfo CSVFileWriter(SqlDataReader reader)
{
DeleteOldFolders();
FileInfo file = null;
if (reader != null)
{
using (reader)
{
var WriteDirectory = GetExcelOutputDirectory();
double folderToSaveInto = Math.Ceiling((double)DateTime.Now.Hour / Folder_Age_Limit.TotalHours);
string uploadFolder = GetExcelOutputDirectory() + "\\" + DateTime.Now.ToString("ddMMyyyy") + "_" + folderToSaveInto.ToString();
//Add directory for today if one does not exist
if (!Directory.Exists(uploadFolder))
Directory.CreateDirectory(uploadFolder);
//Generate random GUID fileName
file = new FileInfo(uploadFolder + "\\" + Guid.NewGuid().ToString() + ".csv");
if (file.Exists)
file.Delete();
using (file.Create()) { /*kill the file stream immediately*/};
StringBuilder sb = new StringBuilder();
if (reader.Read())
{
//write the column names
for (int i = 0; i < reader.FieldCount; i++)
{
AppendValue(sb, reader.GetName(i), (i == reader.FieldCount - 1));
}
//write the column names
for (int i = 0; i < reader.FieldCount; i++)
{
AppendValue(sb, reader[i] == DBNull.Value ? "" : reader[i].ToString(), (i == reader.FieldCount - 1));
}
int rowcounter = 1;
while (reader.Read())
{
for (int i = 0; i < reader.FieldCount; i++)
{
AppendValue(sb, reader[i] == DBNull.Value ? "" : reader[i].ToString(), (i == reader.FieldCount - 1));
}
rowcounter++;
if (rowcounter == MaxRowChunk)
{
using (var sw = file.AppendText())
{
sw.Write(sb.ToString());
sw.Close();
sw.Dispose();
}
sb = new StringBuilder();
rowcounter = 0;
}
}
if (sb.Length > 0)
{
//write the last bit
using (var sw = file.AppendText())
{
sw.Write(sb.ToString());
sw.Close();
sw.Dispose();
sb = new StringBuilder();
}
}
}
}
}
return file;
}
I would try a refactoring of your CSVFileWriter.
First you should add a delegate declaration
public delegate string onColumnRename(string);
Then create an overload of your CSVFileWriter where you pass the delegate together with the reader
public FileInfo CSVFileWriter(SqlDataReader reader, onColumnRename renamer)
{
// Move here all the code of the old CSVFileWriter
.....
}
Move the code of the previous CSVFileWriter to the new method and, from the old one call the new one
public FileInfo CSVFileWriter(SqlDataReader reader)
{
// Pass null for the delegate to the new version of CSVFileWriter....
return this.CSVFileWriter(reader, null)
}
This will keep existing clients of the old method happy. For them nothing has changed.....
Inside the new version of CSVFileWriter you change the code that prepare the column names
for (int i = 0; i < reader.FieldCount; i++)
{
string colName = (renamer != null ? renamer(reader.GetName(i))
: reader.GetName(i))
AppendValue(sb, colName, (i == reader.FieldCount - 1));
}
Now it is just a matter to create the renamer function that translates your column names
private string myColumnRenamer(string columnName)
{
if(columnName == "yourNameWithoutSpaces")
return "your Name with Spaces";
else
return text;
}
This could be optimized with a static dictionary to remove the list of ifs
At this point your could call the new CSVFileWriter passing your function
FileInfo fi = CSVFileWrite(reader, myColumnRenamer);

How to create a generic text file parser for any find of text file?

Want to create a generic text file parser in c# for any find of text file.Actually i have 4 application all 4 getting input data from txt file format but text files are not homogeneous in nature.i have tried fixedwithdelemition.
private static DataTable FixedWidthDiliminatedTxtRead()
{
string[] fields;
StringBuilder sb = new StringBuilder();
List<StringBuilder> lst = new List<StringBuilder>();
DataTable dtable = new DataTable();
ArrayList aList;
using (TextFieldParser tfp = new TextFieldParser(testOCC))
{
tfp.TextFieldType = FieldType.FixedWidth;
tfp.SetFieldWidths(new int[12] { 2,25,8,12,13,5,6,3,10,11,10,24 });
for (int col = 1; col < 13; ++col)
dtable.Columns.Add("COL" + col);
while (!tfp.EndOfData)
{
fields = tfp.ReadFields();
aList = new ArrayList();
for (int i = 0; i < fields.Length; ++i)
aList.Add(fields[i] as string);
if (dtable.Columns.Count == aList.Count)
dtable.Rows.Add(aList.ToArray());
}
}
return dtable;
}
but i feel its very rigid one and really varies application to application making it configgurable .any better way ..
tfp.SetFieldWidths(new int[12] { 2,25,8,12,13,5,6,3,10,11,10,24 });
File nature :
Its a report kind of file .
position of columns are very similar
row data of file id different .
I get this as a reference
http://www.codeproject.com/Articles/11698/A-Portable-and-Efficient-Generic-Parser-for-Flat-F
any other thoughts ?
If the only thing different is the field widths, you could just try sending the field widths in as a parameter:
private static DataTable FixedWidthDiliminatedTxtRead(int[] fieldWidthArray)
{
string[] fields;
StringBuilder sb = new StringBuilder();
List<StringBuilder> lst = new List<StringBuilder>();
DataTable dtable = new DataTable();
ArrayList aList;
using (TextFieldParser tfp = new TextFieldParser(testOCC))
{
tfp.TextFieldType = FieldType.FixedWidth;
tfp.SetFieldWidths(fieldWidthArray);
for (int col = 1; col < 13; ++col)
dtable.Columns.Add("COL" + col);
while (!tfp.EndOfData)
{
fields = tfp.ReadFields();
aList = new ArrayList();
for (int i = 0; i < fields.Length; ++i)
aList.Add(fields[i] as string);
if (dtable.Columns.Count == aList.Count)
dtable.Rows.Add(aList.ToArray());
}
}
return dtable;
}
If you will have more logic to grab the data, you might want to consider defining an interface or abstract class for a GenericTextParser and create concrete implementations for each other file.
Hey I made one of these last week.
I did not write it with the intentions of other people using it so I appologize in advance if its not documented well but I cleaned it up for you. ALSO I grabbed several segments of code from stack overflow so I am not the original author of several pieces of this.
The places you need to edit are the path and pathout and the seperators of text.
char[] delimiters = new char[]
So it searches for part of a word and then grabs the whole word. I used a c# console application for this.
Here you go:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.IO;
namespace UniqueListofStringFinder
{
class Program
{
static void Main(string[] args)
{
string path = #"c:\Your Path\in.txt";
string pathOut = #"c:\Your Path\out.txt";
string data = "!";
Console.WriteLine("Current Path In is set to: " + path);
Console.WriteLine("Current Path Out is set to: " + pathOut);
Console.WriteLine(Environment.NewLine + Environment.NewLine + "Input String to Search For:");
Console.Read();
string input = Console.ReadLine();
// Delete the file if it exists.
if (!File.Exists(path))
{
// Create the file.
using (FileStream fs = File.Create(path))
{
Byte[] info =
new UTF8Encoding(true).GetBytes("This is some text in the file.");
// Add some information to the file.
fs.Write(info, 0, info.Length);
}
}
System.IO.StreamReader file = new System.IO.StreamReader(path);
List<string> Spec = new List<string>();
using (StreamReader sr = File.OpenText(path))
{
while (!file.EndOfStream)
{
string s = file.ReadLine();
if (s.Contains(input))
{
char[] delimiters = new char[] { '\r', '\n', '\t', ')', '(', ',', '=', '"', '\'', '<', '>', '$', ' ', '#', '[', ']' };
string[] parts = s.Split(delimiters,
StringSplitOptions.RemoveEmptyEntries);
foreach (string word in parts)
{
if (word.Contains(input))
{
if( word.IndexOf(input) == 0)
{
Spec.Add(word);
}
}
}
}
}
Spec.Sort();
// Open the stream and read it back.
//while ((s = sr.ReadLine()) != null)
//{
// Console.WriteLine(s);
//}
}
Console.WriteLine();
StringBuilder builder = new StringBuilder();
foreach (string s in Spec) // Loop through all strings
{
builder.Append(s).Append(Environment.NewLine); // Append string to StringBuilder
}
string result = builder.ToString(); // Get string from StringBuilder
Program a = new Program();
data = a.uniqueness(result);
int i = a.writeFile(data,pathOut);
}
public string uniqueness(string rawData )
{
if (rawData == "")
{
return "Empty Data Set";
}
List<string> dataVar = new List<string>();
List<string> holdData = new List<string>();
bool testBool = false;
using (StringReader reader = new StringReader(rawData))
{
string line;
while ((line = reader.ReadLine()) != null)
{
foreach (string s in holdData)
{
if (line == s)
{
testBool = true;
}
}
if (testBool == false)
{
holdData.Add(line);
}
testBool = false;
// Do something with the line
}
}
int i = 0;
string dataOut = "";
foreach (string s in holdData)
{
dataOut += s + "\r\n";
i++;
}
// Write the string to a file.
return dataOut;
}
public int writeFile(string dataOut, string pathOut)
{
try
{
System.IO.StreamWriter file = new System.IO.StreamWriter(pathOut);
file.WriteLine(dataOut);
file.Close();
}
catch (Exception ex)
{
dataOut += ex.ToString();
return 1;
}
return 0;
}
}
}
private static DataTable FixedWidthTxtRead(string filename, int[] fieldWidths)
{
string[] fields;
DataTable dtable = new DataTable();
ArrayList aList;
using (TextFieldParser tfp = new TextFieldParser(filename))
{
tfp.TextFieldType = FieldType.FixedWidth;
tfp.SetFieldWidths(fieldWidths);
for (int col = 1; col <= fieldWidths.length; ++col)
dtable.Columns.Add("COL" + col);
while (!tfp.EndOfData)
{
fields = tfp.ReadFields();
aList = new ArrayList();
for (int i = 0; i < fields.Length; ++i)
aList.Add(fields[i] as string);
if (dtable.Columns.Count == aList.Count) dtable.Rows.Add(aList.ToArray());
}
}
return dtable;
}
Here's what I did:
I built a factory for the type of processor needed (based on file type/format), which abstracted the file reader.
I then built a collection object that contained a set of triggers for each field I was interested in (also contained the property name for which this field is destined). This settings collection is loaded in via an XML configuration file, so all I need to change are the settings, and the base parsing process can react to how the settings are configured. Finally I built a reflection wrapper wherein once a field is parsed, the corresponding property on the model object is set.
As the file flowed through, the triggers for each setting evaluated each lines value. When it found what it was set to find (via pattern matching, or column length values) it fired and event that bubbled up and set a property on the model object. I can show some pseudo code if you're interested. It needs some work for efficiency's sake, but I like the concept.

Categories