I have several strongly typed datasets throughout my application. Writing methods to update the data is getting tedious as each has several tables. I want to create one generic function that I can update all of the tables easily. I don't mind if I have to create one of these for each DataSet but if one function could handle all of them, that would be amazing!
There will be any number of new, updated, or deleted records and each row should be flagged properly. This function should just be handling the actual saving. Here is what I have so far:
private bool SaveData(object oTableAdaptor, object ds)
{
try
{
Type oType = oTableAdaptor.GetType();
MethodInfo[] oMethodInfoArray = oType.GetMethods();
foreach (MethodInfo oMI in oMethodInfoArray)
{
if (oMI.Name == "Update")
{
ParameterInfo[] oParamaterInfoArray = oMI.GetParameters();
foreach (ParameterInfo oPI in oParamaterInfoArray)
{
Type DsType = null;
if (oPI.ParameterType.Name == "NameOfDataSet")
{
DsType = typeof(MyDataSet);
// get a list of the changed tables???
}
if (((DataSet)ds).HasChanges() == true)
{
if (oPI.ParameterType == DsType)
{
object[] values = { ds };
try
{
oMI.Invoke(oTableAdaptor, values);
}
catch (Exception ex)
{
System.Diagnostics.Debug.WriteLine(oTableAdaptor.GetType().Name + Environment.NewLine + ex.Message);
}
}
}
}
}
}
}
catch (Exception Exp)
{
System.Diagnostics.Debug.WriteLine(Exp.Message);
if (Exp.InnerException != null) System.Diagnostics.Debug.WriteLine(Exp.InnerException.Message);
return false;
}
return true;
I have adapted this from another bit of code another developer has in a different application. The main difference thus far is he is passing in an array (of type object) of dataadaptors and has each of the three DataSets (globally instantiated) set up as individual if blocks inside the foreach (ParameterInfo oPI in oParamaterInfoArray) block (where my 'NameOfDataSet' would be one of the datasets)
Can anybody give me a little push (or a shove?) in the direction of finishing this function up? I know I am right there but it feels like I am over looking something. This code does compile without error.
I've been using this. It would need some optimizations though. This also takes care of updating the tables in correct order depending on the relations in dataset (in case there are no self-references, which can be handled by sorting the rows, but for simplicity I'm not posting it here).
public static void Save(DataSet data, SqlConnection connection)
{
/// Dictionary for associating adapters to tables.
Dictionary<DataTable, SqlDataAdapter> adapters = new Dictionary<DataTable, SqlDataAdapter>();
foreach (DataTable table in data.Tables)
{
/// Find the table adapter using Reflection.
Type adapterType = GetTableAdapterType(table);
SqlDataAdapter adapter = SetupTableAdapter(adapterType, connection, validityEnd);
adapters.Add(table, adapter);
}
/// Save the data.
Save(data, adapters);
}
static Type GetTableAdapterType(DataTable table)
{
/// Find the adapter type for the table using the namespace conventions generated by dataset code generator.
string nameSpace = table.GetType().Namespace;
string adapterTypeName = nameSpace + "." + table.DataSet.DataSetName + "TableAdapters." + table.TableName + "TableAdapter";
Type adapterType = Type.GetType(adapterTypeName);
return adapterType;
}
static SqlDataAdapter SetupTableAdapter(Type adapterType, SqlConnection connection)
{
/// Set connection to TableAdapter and extract SqlDataAdapter (which is private anyway).
object adapterObj = Activator.CreateInstance(adapterType);
SqlDataAdapter sqlAdapter = (SqlDataAdapter)GetPropertyValue(adapterType, adapterObj, "Adapter");
SetPropertyValue(adapterType, adapterObj, "Connection", connection);
return sqlAdapter;
}
static object GetPropertyValue(Type type, object instance, string propertyName)
{
return type.GetProperty(propertyName, BindingFlags.NonPublic | BindingFlags.GetProperty | BindingFlags.Instance).GetValue(instance, null);
}
static void SetPropertyValue(Type type, object instance, string propertyName, object propertyValue)
{
type.GetProperty(propertyName, BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.GetProperty | BindingFlags.Instance).SetValue(instance, propertyValue, null);
}
static void Save(DataSet data, Dictionary<DataTable, SqlDataAdapter> adapters)
{
if (data == null)
throw new ArgumentNullException("data");
if (adapters == null)
throw new ArgumentNullException("adapters");
Dictionary<DataTable, bool> procesedTables = new Dictionary<DataTable, bool>();
List<DataTable> sortedTables = new List<DataTable>();
while (true)
{
DataTable rootTable = GetRootTable(data, procesedTables);
if (rootTable == null)
break;
sortedTables.Add(rootTable);
}
/// Updating Deleted rows in Child -> Parent order.
for (int i = sortedTables.Count - 1; i >= 0; i--)
{
Update(adapters, sortedTables[i], DataViewRowState.Deleted);
}
/// Updating Added / Modified rows in Parent -> Child order.
for (int i = 0; i < sortedTables.Count; i++)
{
Update(adapters, sortedTables[i], DataViewRowState.Added | DataViewRowState.ModifiedCurrent);
}
}
static void Update(Dictionary<DataTable, SqlDataAdapter> adapters, DataTable table, DataViewRowState states)
{
SqlDataAdapter adapter = null;
if (adapters.ContainsKey(table))
adapter = adapters[table];
if (adapter != null)
{
DataRow[] rowsToUpdate = table.Select("", "", states);
if (rowsToUpdate.Length > 0)
adapter.Update(rowsToUpdate);
}
}
static DataTable GetRootTable(DataSet data, Dictionary<DataTable, bool> procesedTables)
{
foreach (DataTable table in data.Tables)
{
if (!procesedTables.ContainsKey(table))
{
if (IsRootTable(table, procesedTables))
{
procesedTables.Add(table, false);
return table;
}
}
}
return null;
}
static bool IsRootTable(DataTable table, Dictionary<DataTable, bool> procesedTables)
{
foreach (DataRelation relation in table.ParentRelations)
{
DataTable parentTable = relation.ParentTable;
if (parentTable != table && !procesedTables.ContainsKey(parentTable))
return false;
}
return true;
}
Can't you just treat them as their base classes, DbDataAdapter, DataSet and DataTable?
You can access the table by name by doing DataSet.Tables["name"]. This returns a DataTable object that you can pass to the DbDataAdapters update method.
Or if your TableAdapter updates all the tables in your DataSet then you can pass the entire DataSet to the update method directly.
With that said I would suggest you rethink the use of typed data sets if you have the chance to do so. In my experience they end up being a hassle to maintain and use and have found the general DataTable, DataSet and DbDataAdapter classes to be much easier to use directly.
Do you really want reflection to be used that much in your DAL? Perhaps an ORM such as LINQ to SQL or NHibernate would be a good alternative?
Related
I'm comparing materialize time between Dapper and ADO.NET and Dapper. Ultimately, Dapper tend to faster than ADO.NET, though the first time a given fetch query was executed is slower than ADO.NET. a few result show that Dapper a little bit faster than ADO.NET(almost all of result show that it comparable though)
So I think I'm using inefficient approach to map result of SqlDataReader to object.
This is my code
var sql = "SELECT * FROM Sales.SalesOrderHeader WHERE SalesOrderID = #Id";
var conn = new SqlConnection(ConnectionString);
var stopWatch = new Stopwatch();
try
{
conn.Open();
var sqlCmd = new SqlCommand(sql, conn);
for (var i = 0; i < keys.GetLength(0); i++)
{
for (var r = 0; r < keys.GetLength(1); r++)
{
stopWatch.Restart();
sqlCmd.Parameters.Clear();
sqlCmd.Parameters.AddWithValue("#Id", keys[i, r]);
var reader = await sqlCmd.ExecuteReaderAsync();
SalesOrderHeaderSQLserver salesOrderHeader = null;
while (await reader.ReadAsync())
{
salesOrderHeader = new SalesOrderHeaderSQLserver();
salesOrderHeader.SalesOrderId = (int)reader["SalesOrderId"];
salesOrderHeader.SalesOrderNumber = reader["SalesOrderNumber"] as string;
salesOrderHeader.AccountNumber = reader["AccountNumber"] as string;
salesOrderHeader.BillToAddressID = (int)reader["BillToAddressID"];
salesOrderHeader.TotalDue = (decimal)reader["TotalDue"];
salesOrderHeader.Comment = reader["Comment"] as string;
salesOrderHeader.DueDate = (DateTime)reader["DueDate"];
salesOrderHeader.CurrencyRateID = reader["CurrencyRateID"] as int?;
salesOrderHeader.CustomerID = (int)reader["CustomerID"];
salesOrderHeader.SalesPersonID = reader["SalesPersonID"] as int?;
salesOrderHeader.CreditCardApprovalCode = reader["CreditCardApprovalCode"] as string;
salesOrderHeader.ShipDate = reader["ShipDate"] as DateTime?;
salesOrderHeader.Freight = (decimal)reader["Freight"];
salesOrderHeader.ModifiedDate = (DateTime)reader["ModifiedDate"];
salesOrderHeader.OrderDate = (DateTime)reader["OrderDate"];
salesOrderHeader.TerritoryID = reader["TerritoryID"] as int?;
salesOrderHeader.CreditCardID = reader["CreditCardID"] as int?;
salesOrderHeader.OnlineOrderFlag = (bool)reader["OnlineOrderFlag"];
salesOrderHeader.PurchaseOrderNumber = reader["PurchaseOrderNumber"] as string;
salesOrderHeader.RevisionNumber = (byte)reader["RevisionNumber"];
salesOrderHeader.Rowguid = (Guid)reader["Rowguid"];
salesOrderHeader.ShipMethodID = (int)reader["ShipMethodID"];
salesOrderHeader.ShipToAddressID = (int)reader["ShipToAddressID"];
salesOrderHeader.Status = (byte)reader["Status"];
salesOrderHeader.SubTotal = (decimal)reader["SubTotal"];
salesOrderHeader.TaxAmt = (decimal)reader["TaxAmt"];
}
stopWatch.Stop();
reader.Close();
await PrintTestFindByPKReport(stopWatch.ElapsedMilliseconds, salesOrderHeader.SalesOrderId.ToString());
}
I used as keyword to cast in nullable column, is that correct?
and this is code for Dapper.
using (var conn = new SqlConnection(ConnectionString))
{
conn.Open();
var stopWatch = new Stopwatch();
for (var i = 0; i < keys.GetLength(0); i++)
{
for (var r = 0; r < keys.GetLength(1); r++)
{
stopWatch.Restart();
var result = (await conn.QueryAsync<SalesOrderHeader>("SELECT * FROM Sales.SalesOrderHeader WHERE SalesOrderID = #Id", new { Id = keys[i, r] })).FirstOrDefault();
stopWatch.Stop();
await PrintTestFindByPKReport(stopWatch.ElapsedMilliseconds, result.ToString());
}
}
}
When in doubt regarding anything db or reflection, I ask myself, "what would Marc Gravell do?".
In this case, he would use FastMember! And you should too. It's the underpinning to the data conversions in Dapper, and can easily be used to map your own DataReader to an object (should you not want to use Dapper).
Below is an extension method converting a SqlDataReader into something of type T:
PLEASE NOTE: This code implies a dependency on FastMember and is written for .NET Core (though could easily be converted to .NET Framework/Standard compliant code).
public static T ConvertToObject<T>(this SqlDataReader rd) where T : class, new()
{
Type type = typeof(T);
var accessor = TypeAccessor.Create(type);
var members = accessor.GetMembers();
var t = new T();
for (int i = 0; i < rd.FieldCount; i++)
{
if (!rd.IsDBNull(i))
{
string fieldName = rd.GetName(i);
if (members.Any(m => string.Equals(m.Name, fieldName, StringComparison.OrdinalIgnoreCase)))
{
accessor[t, fieldName] = rd.GetValue(i);
}
}
}
return t;
}
2022 update
Now that we have .NET 5 and .NET 6 available, which include Source Generators - an amazing Roslyn-based feature, that, basically, allows your code to... generate more code at compile time. It's basically "AOT Reflection" (ahead-of-time) that allows you to generate lightning-fast mapping code that has zero overhead. This thing will revolutionize the ORM world for sure.
Now, back to the question - the fastest way to map an IDataReader would be to use Source Generators. We started experimenting with this feature and we love it.
Here's a library we're working on, that does exactly that (maps DataReader to objects), feel free to "steal" some code examples: https://github.com/jitbit/MapDataReader
Previous answer that is still 100% valid
The most upvoted answer mentions #MarkGravel and his FastMember. But if you're already using Dapper, which is also a component of his, you can use Dapper's GetRowParser like this:
var parser = reader.GetRowParser<MyObject>(typeof(MyObject));
while (reader.Read())
{
var myObject = parser(reader);
}
Here's a way to make your ADO.NET code faster.
When you do your select, list out the fields that you are selecting rather than using select *. This will let you ensure the order that the fields are coming back even if that order changes in the database.Then when getting those fields from the Reader, get them by index rather than by name. Using and index is faster.
Also, I'd recommend not making string database fields nullable unless there is a strong business reason. Then just store a blank string in the database if there is no value. Finally I'd recommend using the Get methods on the DataReader to get your fields in the type they are so that casting isn't needed in your code. So for example instead of casting the DataReader[index++] value as an int use DataReader.GetInt(index++)
So for example, this code:
salesOrderHeader = new SalesOrderHeaderSQLserver();
salesOrderHeader.SalesOrderId = (int)reader["SalesOrderId"];
salesOrderHeader.SalesOrderNumber = reader["SalesOrderNumber"] as string;
salesOrderHeader.AccountNumber = reader["AccountNumber"] as string;
becomes
int index = 0;
salesOrderHeader = new SalesOrderHeaderSQLserver();
salesOrderHeader.SalesOrderId = reader.GetInt(index++);
salesOrderHeader.SalesOrderNumber = reader.GetString(index++);
salesOrderHeader.AccountNumber = reader.GetString(index++);
Give that a whirl and see how it does for you.
Modified #HouseCat's solution to be case insensitive:
/// <summary>
/// Maps a SqlDataReader record to an object. Ignoring case.
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="dataReader"></param>
/// <param name="newObject"></param>
/// <remarks>https://stackoverflow.com/a/52918088</remarks>
public static void MapDataToObject<T>(this SqlDataReader dataReader, T newObject)
{
if (newObject == null) throw new ArgumentNullException(nameof(newObject));
// Fast Member Usage
var objectMemberAccessor = TypeAccessor.Create(newObject.GetType());
var propertiesHashSet =
objectMemberAccessor
.GetMembers()
.Select(mp => mp.Name)
.ToHashSet(StringComparer.InvariantCultureIgnoreCase);
for (int i = 0; i < dataReader.FieldCount; i++)
{
var name = propertiesHashSet.FirstOrDefault(a => a.Equals(dataReader.GetName(i), StringComparison.InvariantCultureIgnoreCase));
if (!String.IsNullOrEmpty(name))
{
objectMemberAccessor[newObject, name]
= dataReader.IsDBNull(i) ? null : dataReader.GetValue(i);
}
}
}
EDIT: This does not work for List<T> or multiple tables in the results.
EDIT2: Changing the calling function to this works for lists. I am just going to return a list of objects no matter what and get the first index if I was expecting a single object. I haven't looked into multiple tables yet but I will.
public static void MapDataToObject<T>(this SqlDataReader dataReader, T newObject)
{
if (newObject == null) throw new ArgumentNullException(nameof(newObject));
// Fast Member Usage
var objectMemberAccessor = TypeAccessor.Create(newObject.GetType());
var propertiesHashSet =
objectMemberAccessor
.GetMembers()
.Select(mp => mp.Name)
.ToHashSet(StringComparer.InvariantCultureIgnoreCase);
for (int i = 0; i < dataReader.FieldCount; i++)
{
var name = propertiesHashSet.FirstOrDefault(a => a.Equals(dataReader.GetName(i), StringComparison.InvariantCultureIgnoreCase));
if (!String.IsNullOrEmpty(name))
{
//Attention! if you are getting errors here, then double check that your model and sql have matching types for the field name.
//Check api.log for error message!
objectMemberAccessor[newObject, name]
= dataReader.IsDBNull(i) ? null : dataReader.GetValue(i);
}
}
}
EDIT 3: Updated to show sample calling function.
public async Task<List<T>> ExecuteReaderAsync<T>(string storedProcedureName, SqlParameter[] sqlParameters = null) where T : class, new()
{
var newListObject = new List<T>();
using (var conn = new SqlConnection(_connectionString))
{
using (SqlCommand sqlCommand = GetSqlCommand(conn, storedProcedureName, sqlParameters))
{
await conn.OpenAsync();
using (var dataReader = await sqlCommand.ExecuteReaderAsync(CommandBehavior.Default))
{
if (dataReader.HasRows)
{
while (await dataReader.ReadAsync())
{
var newObject = new T();
dataReader.MapDataToObject(newObject);
newListObject.Add(newObject);
}
}
}
}
}
return newListObject;
}
Took the method from pimbrouwers' answer and optimized it slightly. Reduce LINQ calls.
Maps only properties found in both the object and data field names. Handles DBNull. Other assumption made is your domain model properties absolutely equals table column/field names.
/// <summary>
/// Maps a SqlDataReader record to an object.
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="dataReader"></param>
/// <param name="newObject"></param>
public static void MapDataToObject<T>(this SqlDataReader dataReader, T newObject)
{
if (newObject == null) throw new ArgumentNullException(nameof(newObject));
// Fast Member Usage
var objectMemberAccessor = TypeAccessor.Create(newObject.GetType());
var propertiesHashSet =
objectMemberAccessor
.GetMembers()
.Select(mp => mp.Name)
.ToHashSet();
for (int i = 0; i < dataReader.FieldCount; i++)
{
if (propertiesHashSet.Contains(dataReader.GetName(i)))
{
objectMemberAccessor[newObject, dataReader.GetName(i)]
= dataReader.IsDBNull(i) ? null : dataReader.GetValue(i);
}
}
}
Sample Usage:
public async Task<T> GetAsync<T>(string storedProcedureName, SqlParameter[] sqlParameters = null) where T : class, new()
{
using (var conn = new SqlConnection(_connString))
{
var sqlCommand = await GetSqlCommandAsync(storedProcedureName, conn, sqlParameters);
var dataReader = await sqlCommand.ExecuteReaderAsync(CommandBehavior.CloseConnection);
if (dataReader.HasRows)
{
var newObject = new T();
if (await dataReader.ReadAsync())
{ dataReader.MapDataToObject(newObject); }
return newObject;
}
else
{ return null; }
}
}
You can install the package DbDataReaderMapper with the command Install-Package DbDataReaderMapper or using your IDE's package manager.
You can then create your data access object (I will choose a shorter example than the one you provided):
class EmployeeDao
{
public int Id { get; set; }
public string FirstName { get; set; }
public string LastName { get; set; }
public int? Age { get; set; }
}
To do the automatic mapping you can call the extension method MapToObject<T>()
var reader = await sqlCmd.ExecuteReaderAsync();
while (await reader.ReadAsync())
{
var employeeObj = reader.MapToObject<EmployeeDao>();
}
and you will get rid of tens of lines of unreadable and hardly-maintainable code.
Step-by-step example here: https://github.com/LucaMozzo/DbDataReaderMapper
Perhaps the approach I will present isn't the most efficient but gets the job done with very little coding effort. The main benefit I see here is that you don't have to deal with data structure other than building a compatible (mapable) object.
If you convert the SqlDataReader to DataTable then serialize it using JsonConvert.SerializeObject you can then deserialize it to a known object type using JsonConvert.DeserializeObject
Here is an example of implementation:
SqlDataReader reader = null;
SqlConnection myConnection = new SqlConnection();
myConnection.ConnectionString = ConfigurationManager.ConnectionStrings["DatabaseConnection"].ConnectionString;
SqlCommand sqlCmd = new SqlCommand();
sqlCmd.CommandType = CommandType.Text;
sqlCmd.CommandText = "SELECT * FROM MyTable";
sqlCmd.Connection = myConnection;
myConnection.Open();
reader = sqlCmd.ExecuteReader();
var dataTable = new DataTable();
dataTable.Load(reader);
List<MyObject> myObjects = new List<MyObject>();
if (dataTable.Rows.Count > 0)
{
var serializedMyObjects = JsonConvert.SerializeObject(dataTable);
// Here you get the object
myObjects = (List<MyObject>)JsonConvert.DeserializeObject(serializedMyObjects, typeof(List<MyObject>));
}
myConnection.Close();
List<T> result = new List<T>();
SqlDataReader reader = com.ExecuteReader();
while(reader.Read())
{
Type type = typeof(T);
T obj = (T)Activator.CreateInstance(type);
PropertyInfo[] properties = type.GetProperties();
foreach (PropertyInfo property in properties)
{
try
{
var value = reader[property.Name];
if (value != null)
property.SetValue(obj, Convert.ChangeType(value.ToString(), property.PropertyType));
}
catch{}
}
result.Add(obj);
}
There is a SqlDataReader Mapper library in NuGet which helps you to map SqlDataReader to an object. Here is how it can be used (from GitHub documentation):
var mappedObject = new SqlDataReaderMapper<DTOObject>(reader)
.Build();
Or, if you want a more advanced mapping:
var mappedObject = new SqlDataReaderMapper<DTOObject>(reader)
.NameTransformers("_", "")
.ForMember<int>("CurrencyId")
.ForMember("CurrencyCode", "Code")
.ForMember<string>("CreatedByUser", "User").Trim()
.ForMemberManual("CountryCode", val => val.ToString().Substring(0, 10))
.ForMemberManual("ZipCode", val => val.ToString().Substring(0, 5), "ZIP")
.Build();
Advanced mapping allows you to use name transformers, change types, map fields manually or even apply functions to the object's data so that you can easily map objects even if they differ with a reader.
I took both pimbrouwers and HouseCat's answers and come up with me. In my scenario, the column name in database has snake case format.
public static T ConvertToObject<T>(string query) where T : class, new()
{
using (var conn = new SqlConnection(AutoConfig.ConnectionString))
{
conn.Open();
var cmd = new SqlCommand(query) {Connection = conn};
var rd = cmd.ExecuteReader();
var mappedObject = new T();
if (!rd.HasRows) return mappedObject;
var accessor = TypeAccessor.Create(typeof(T));
var members = accessor.GetMembers();
if (!rd.Read()) return mappedObject;
for (var i = 0; i < rd.FieldCount; i++)
{
var columnNameFromDataTable = rd.GetName(i);
var columnValueFromDataTable = rd.GetValue(i);
var splits = columnNameFromDataTable.Split('_');
var columnName = new StringBuilder("");
foreach (var split in splits)
{
columnName.Append(CultureInfo.InvariantCulture.TextInfo.ToTitleCase(split.ToLower()));
}
var mappedColumnName = members.FirstOrDefault(x =>
string.Equals(x.Name, columnName.ToString(), StringComparison.OrdinalIgnoreCase));
if(mappedColumnName == null) continue;
var columnType = mappedColumnName.Type;
if (columnValueFromDataTable != DBNull.Value)
{
accessor[mappedObject, columnName.ToString()] = Convert.ChangeType(columnValueFromDataTable, columnType);
}
}
return mappedObject;
}
}
We use the following class to execute a SQL query and automatically map the rows to objects. You can easily adjust the class to fit to your needs. Beware that our approach depends on FastMember, but you could easily modify the code to use reflection.
/// <summary>
/// Mapping configuration for a specific sql table to a specific class.
/// </summary>
/// <param name="Accessor">Used to access the target class properties.</param>
/// <param name="PropToRowIdxDict">Target class property name -> database reader row idx dictionary.</param>
internal record RowMapper(TypeAccessor Accessor, IDictionary<string, int> PropToRowIdxDict);
public class RawSqlHelperService
{
/// <summary>
/// Create a new mapper for the conversion of a <see cref="DbDataReader"/> row -> <typeparamref name="T"/>.
/// </summary>
/// <typeparam name="T">Target class to use.</typeparam>
/// <param name="reader">Data reader to obtain column information from.</param>
/// <returns>Row mapper object for <see cref="DbDataReader"/> row -> <typeparamref name="T"/>.</returns>
private RowMapper GetRowMapper<T>(DbDataReader reader) where T : class, new()
{
var accessor = TypeAccessor.Create(typeof(T));
var members = accessor.GetMembers();
// Column name -> column idx dict
var columnIdxDict = Enumerable.Range(0, reader.FieldCount).ToDictionary(idx => reader.GetName(idx), idx => idx);
var propToRowIdxDict = members
.Where(m => m.GetAttribute(typeof(NotMappedAttribute), false) == null)
.Select(m =>
{
var columnAttr = m.GetAttribute(typeof(ColumnAttribute), false) as ColumnAttribute;
var columnName = columnAttr == null
? m.Name
: columnAttr.Name;
return (PropertyName: m.Name, ColumnName: columnName);
})
.ToDictionary(x => x.PropertyName, x => columnIdxDict[x.ColumnName]);
return new RowMapper(accessor, propToRowIdxDict);
}
/// <summary>
/// Read <see cref="DbDataReader"/> current row as object <typeparamref name="T"/>.
/// </summary>
/// <typeparam name="T">The class to map to.</typeparam>
/// <param name="reader">Data reader to read the current row from.</param>
/// <param name="mapper">Mapping configuration to use to perform the mapping operation.</param>
/// <returns>Resulting object of the mapping operation.</returns>
private T ReadRowAsObject<T>(DbDataReader reader, RowMapper mapper) where T : class, new()
{
var (accessor, propToRowIdxDict) = mapper;
var t = new T();
foreach (var (propertyName, columnIdx) in propToRowIdxDict)
accessor[t, propertyName] = reader.GetValue(columnIdx);
return t;
}
/// <summary>
/// Execute the specified <paramref name="sql"/> query and automatically map the resulting rows to <typeparamref name="T"/>.
/// </summary>
/// <typeparam name="T">Target class to map to.</typeparam>
/// <param name="dbContext">Database context to perform the operation on.</param>
/// <param name="sql">SQL query to execute.</param>
/// <param name="parameters">Additional list of parameters to use for the query.</param>
/// <returns>Result of the SQL query mapped to a list of <typeparamref name="T"/>.</returns>
public async Task<IEnumerable<T>> ExecuteSql<T>(DbContext dbContext, string sql, IEnumerable<DbParameter> parameters = null) where T : class, new()
{
var con = dbContext.Database.GetDbConnection();
await con.OpenAsync();
var cmd = con.CreateCommand() as OracleCommand;
cmd.BindByName = true;
cmd.CommandText = sql;
cmd.Parameters.AddRange(parameters?.ToArray() ?? new DbParameter[0]);
var reader = await cmd.ExecuteReaderAsync();
var records = new List<T>();
var mapper = GetRowMapper<T>(reader);
while (await reader.ReadAsync())
{
records.Add(ReadRowAsObject<T>(reader, mapper));
}
await con.CloseAsync();
return records;
}
}
Mapping Attributes Supported
I implemented support for the attributes NotMapped and Column used also by the entity framework.
NotMapped Attribute
Properties decorated with this attribute will be ignored by the mapper.
Column Attribute
With this attribute the column name can be customized. Without this attribute the property name is assumed to be the column name.
Example Class
private class Test
{
[Column("SDAT")]
public DateTime StartDate { get; set; } // Column name = "SDAT"
public DateTime EDAT { get; set; } // Column name = "EDAT"
[NotMapped]
public int IWillBeIgnored { get; set; }
}
Comparision to Reflection
I also compared the approach with FastMember to using plain reflection.
For the comparision I queried two date columns from a table with 1000000 rows, here are the results:
Approach
Duration in seconds
FastMember
~1.6 seconds
Reflection
~2 seconds
Credits to user pim for inspiration.
This is based on the other answers but I used standard reflection to read the properties of the class you want to instantiate and fill it from the dataReader. You could also store the properties using a dictionary persisted b/w reads.
Initialize a dictionary containing the properties from the type with their names as the keys.
var type = typeof(Foo);
var properties = type.GetProperties(BindingFlags.Public | BindingFlags.Instance);
var propertyDictionary = new Dictionary<string,PropertyInfo>();
foreach(var property in properties)
{
if (!property.CanWrite) continue;
propertyDictionary.Add(property.Name, property);
}
The method to set a new instance of the type from the DataReader would be like:
var foo = new Foo();
//retrieve the propertyDictionary for the type
for (var i = 0; i < dataReader.FieldCount; i++)
{
var n = dataReader.GetName(i);
PropertyInfo prop;
if (!propertyDictionary.TryGetValue(n, out prop)) continue;
var val = dataReader.IsDBNull(i) ? null : dataReader.GetValue(i);
prop.SetValue(foo, val, null);
}
return foo;
If you want to write an efficient generic class dealing with multiple types you could store each dictionary in a global dictionary>.
This kinda works
public static object PopulateClass(object o, SQLiteDataReader dr, Type T)
{
Type type = o.GetType();
PropertyInfo[] properties = type.GetProperties();
foreach (PropertyInfo property in properties)
{
T.GetProperty(property.Name).SetValue(o, dr[property.Name],null);
}
return o;
}
Note I'm using SQlite here but the concept is the same. As an example I'm filling a Game object by calling the above like this-
g = PopulateClass(g, dr, typeof(Game)) as Game;
Note you have to have your class match up with datareader 100%, so adjust your query to suit or pass in some sort of list to skip fields. With a SQLDataReader talking to a SQL Server DB you have a pretty good type match between .net and the database. With SQLite you have to declare your ints in your class as Int64s for this to work and watch sending nulls to strings. But the above concept seems to work so it should get you going. I think this is what the Op was after.
So I have a method that looks something like the following:
private static DataSet GetData()
{
DataSet returnValue = new DataSet();
try
{
//get all relevant tables here, add them to returnValue
}
catch (ArgumentException e)
{
//if a table isn't whitelisted, trying to grab it will throw an ArugmentException.
}
return returnValue;
}
Now, I want to pass along the caught exceptions. However, if for example 2 tables are whitelisted, but 1 isn't, I still want those two tables to be returned in the DataSet. I've been thinking I should do something like:
DataSet returnValue = new DataSet();
//TablesToFetch == string list or something containing tablenames you want to fetch
foreach (string tableName in tablesToFetch)
{
try
{
//get table tableName and add it to returnValue
}
catch (ArgumentException e)
{
//handle exception
}
}
return returnValue;
However, the problem here is that I can't just throw the exceptions I find, because then the DataSet won't be returned. The first solution I can think of is to "bundle" the exceptions and throw them one by one later, outside of the method*, but it kind of strikes me as a bit of a messy solution. Anyone have any tips on how to handle this, or should I just go ahead with the solution I just proposed?
*I could just wrap the method in another method, which calls a "handle all exceptions"-method after calling GetData()
This depends very much on the circumstances... I like an approach like this:
public returnType MyMethod([... parameters ...], out string ErrorMessage){
ErrorMessage=null;
try{
doSomething();
return something;
}
catch(Exception exp){
ErrorMessage=exp.Message;
return null; //
}
}
Instead of the out string you could create your own supi-dupi-ErrorInformation class. You just call your routine and check, wheter the ErrorMessage is null. If not, you can react on the out passed values. Maybe you want just to pass the exception out directly...
If you can't solve 'the problem' within the method, you should throw an Exception. You shouldn't return a DataSet when an exception occurred (you didn't handled). So either you return an Exception or you return a DataSet and handle the exception within the method.
It is possible to Aggregate the exceptions like:
private DataSet GetData(IEnumerable<string> tablesToFetch)
{
var exceptions = new List<Exception>();
DataSet returnValue = new DataSet();
//TablesToFetch == string list or something containing tablenames you want to fetch
foreach (string tableName in tablesToFetch)
{
try
{
//get table tableName and add it to returnValue
}
catch (ArgumentException e)
{
//handle exception
exceptions.Add(e);
}
}
if (exceptions.Count > 0)
throw new AggregateException(exceptions);
return returnValue;
}
Another appoach is return a class with results:
public class GetDataResult
{
public GetDataResult(DataSet dataSet, string[] missingTables)
{
DataSet = dataSet;
MissingTables = missingTables;
}
public string[] MissingTables { get; private set; }
public DataSet DataSet { get; private set; }
}
private GetDataResult GetData(IEnumerable<string> tablesToFetch)
{
List<string> missingTables = new List<string>();
DataSet returnValue = new DataSet();
//TablesToFetch == string list or something containing tablenames you want to fetch
foreach (string tableName in tablesToFetch)
{
try
{
//get table tableName and add it to returnValue
}
catch (ArgumentException e)
{
//handle exception
missingTables.Add(tableName);
}
}
return new GetDataResult(returnValue, missingTables.ToArray());
}
usage:
var result = GetData(new[] { "MyTable1", "MyTable2" });
if(result.MissingTables.Count > 0)
{
Trace.WriteLine("Missing tables: " + string.Join(", ", result.MissingTables));
}
// do something with result.DataSet
update from comments
I don't know much about the structure you're using, so this is pseudo code
// PSEUDO!
private DataTable GetTable(string tableName)
{
// if table isn't found return null
if(<table is found>)
return table;
else
return null;
}
private GetDataResult GetData(IEnumerable<string> tablesToFetch)
{
List<string> missingTables = new List<string>();
DataSet returnValue = new DataSet();
//TablesToFetch == string list or something containing tablenames you want to fetch
foreach (string tableName in tablesToFetch)
{
var table = GetTable(tableName);
if(table == null)
{
missingTables.Add(tableName);
continue;
}
// do something with the table.
}
return new GetDataResult(returnValue, missingTables.ToArray());
}
I need to store the data returned from this LINQ to Entities query (below) into a DataTable so that I can use it as data source to a DataGridView, how can I do that?
In this case I'm using LINQ to Entities to query against an Entity Framework conceptual model, so db is a class that inherits from System.Data.Entity.DbContext.
using (TccContext db = new TccContext())
{
var query = from vendedor in db.Vendedores.AsEnumerable()
where vendedor.codigo == Convert.ToInt32(textBoxPesquisa.Text)
select vendedor;
// I'd like to do something like DataTable dt = query;
}
I've tried to do this (below), but it throws an exception during execution [1].
using (TccContext db = new TccContext())
{
IEnumerable<DataRow> query = (IEnumerable<DataRow>)(from vendedor in db.Vendedores.AsEnumerable()
where vendedor.codigo == Convert.ToInt32(textBoxPesquisa.Text)
select vendedor);
using (DataTable dt = query.CopyToDataTable<DataRow>())
{
this.dataGridViewProcura.Rows.Add(
dt.Rows[0][0], // Código
dt.Rows[0][1], // Nome
dt.Rows[0][2]); // Venda Mensal
}
}
[1]: Exception: InvalidCastException
Unable to cast object of type 'WhereEnumerableIterator`1[Projeto_TCC.Models.Vendedor]' to type 'System.Collections.Generic.IEnumerable`1[System.Data.DataRow]'.
Thanks in advance
There is one important thing here, you are casting your Linq query to (IEnumerable<DataRow>) when you are selecting the vendedor, so I assume that vendedor is an instance of Vendedor, so your query will return an IEnumerable<Vendedor>
That should solve your problem, but also, can you try using the generated DataTable as the DataSource for your DataGridView? It would be something like this:
var query = (from vendedor in db.Vendedores.AsEnumerable()
where vendedor.codigo == Convert.ToInt32(textBoxPesquisa.Text)
select vendedor);
var dt = query.CopyToDataTable<Vendedor>();
this.dataGridViewProcura.DataSource = dt;
Hope I can help!
EDIT
As a side (and very personal) note, you could try using lambdas on your select, they look prettier :)
var pesquisa = Convert.ToInt32(textBoxPesquisa.Text);
var query = db.Vendedores.Where(vendedor => vendedor.codigo == pesquisa);
var dt = query.CopyToDataTable<Vendedor>();
this.dataGridViewProcura.DataSource = dt;
A lot cleaner, don't you think?
EDIT 2
I've just realized what you said on CopyToDataTable being for DataRow only, so last (admittedly not so clean) solution would be to mimic the logic on the helper?
public DataTable CopyGenericToDataTable<T>(this IEnumerable<T> items)
{
var properties = typeof(T).GetProperties();
var result = new DataTable();
//Build the columns
foreach ( var prop in properties ) {
result.Columns.Add(prop.Name, prop.PropertyType);
}
//Fill the DataTable
foreach( var item in items ){
var row = result.NewRow();
foreach ( var prop in properties ) {
var itemValue = prop.GetValue(item, new object[] {});
row[prop.Name] = itemValue;
}
result.Rows.Add(row);
}
return result;
}
Now, things to consider:
This solution will not work with complex properties
Customizing the resulting table might be a bit tricky
While this might solve the issue, I don't think this is a very good approach, but it could be the start of a decent idea :)
I hope I can help this time!
This is a the MSDN recommended solution: https://msdn.microsoft.com/en-us/library/bb669096(v=vs.110).aspx
I have Implemented it successfully
(*with minor additions to handle nullable DateTime.)
as Follows:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Data;
using System.Reflection;
/// <summary>
/// Converts Entity Type to DataTable
/// </summary>
public class ObjectShredder<T>
{
private System.Reflection.FieldInfo[] _fi;
private System.Reflection.PropertyInfo[] _pi;
private System.Collections.Generic.Dictionary<string, int> _ordinalMap;
private System.Type _type;
// ObjectShredder constructor.
public ObjectShredder()
{
_type = typeof(T);
_fi = _type.GetFields();
_pi = _type.GetProperties();
_ordinalMap = new Dictionary<string, int>();
}
/// <summary>
/// Loads a DataTable from a sequence of objects.
/// </summary>
/// <param name="source">The sequence of objects to load into the DataTable.</param>
/// <param name="table">The input table. The schema of the table must match that
/// the type T. If the table is null, a new table is created with a schema
/// created from the public properties and fields of the type T.</param>
/// <param name="options">Specifies how values from the source sequence will be applied to
/// existing rows in the table.</param>
/// <returns>A DataTable created from the source sequence.</returns>
public DataTable Shred(IEnumerable<T> source, DataTable table, LoadOption? options)
{
// Load the table from the scalar sequence if T is a primitive type.
if (typeof(T).IsPrimitive)
{
return ShredPrimitive(source, table, options);
}
// Create a new table if the input table is null.
if (table == null)
{
table = new DataTable(typeof(T).Name);
}
// Initialize the ordinal map and extend the table schema based on type T.
table = ExtendTable(table, typeof(T));
// Enumerate the source sequence and load the object values into rows.
table.BeginLoadData();
using (IEnumerator<T> e = source.GetEnumerator())
{
while (e.MoveNext())
{
if (options != null)
{
table.LoadDataRow(ShredObject(table, e.Current), (LoadOption)options);
}
else
{
table.LoadDataRow(ShredObject(table, e.Current), true);
}
}
}
table.EndLoadData();
// Return the table.
return table;
}
public DataTable ShredPrimitive(IEnumerable<T> source, DataTable table, LoadOption? options)
{
// Create a new table if the input table is null.
if (table == null)
{
table = new DataTable(typeof(T).Name);
}
if (!table.Columns.Contains("Value"))
{
table.Columns.Add("Value", typeof(T));
}
// Enumerate the source sequence and load the scalar values into rows.
table.BeginLoadData();
using (IEnumerator<T> e = source.GetEnumerator())
{
Object[] values = new object[table.Columns.Count];
while (e.MoveNext())
{
values[table.Columns["Value"].Ordinal] = e.Current;
if (options != null)
{
table.LoadDataRow(values, (LoadOption)options);
}
else
{
table.LoadDataRow(values, true);
}
}
}
table.EndLoadData();
// Return the table.
return table;
}
public object[] ShredObject(DataTable table, T instance)
{
FieldInfo[] fi = _fi;
PropertyInfo[] pi = _pi;
if (instance.GetType() != typeof(T))
{
// If the instance is derived from T, extend the table schema
// and get the properties and fields.
ExtendTable(table, instance.GetType());
fi = instance.GetType().GetFields();
pi = instance.GetType().GetProperties();
}
// Add the property and field values of the instance to an array.
Object[] values = new object[table.Columns.Count];
foreach (FieldInfo f in fi)
{
values[_ordinalMap[f.Name]] = f.GetValue(instance);
}
foreach (PropertyInfo p in pi)
{
values[_ordinalMap[p.Name]] = p.GetValue(instance, null);
}
// Return the property and field values of the instance.
return values;
}
public DataTable ExtendTable(DataTable table, Type type)
{
// Extend the table schema if the input table was null or if the value
// in the sequence is derived from type T.
foreach (FieldInfo f in type.GetFields())
{
if (!_ordinalMap.ContainsKey(f.Name))
{
// Add the field as a column in the table if it doesn't exist
// already.
DataColumn dc = table.Columns.Contains(f.Name) ? table.Columns[f.Name]
: table.Columns.Add(f.Name, f.FieldType);
// Add the field to the ordinal map.
_ordinalMap.Add(f.Name, dc.Ordinal);
}
}
foreach (PropertyInfo p in type.GetProperties())
{
if (!_ordinalMap.ContainsKey(p.Name))
{
// Add the property as a column in the table if it doesn't exist already.
DataColumn dc = table.Columns[p.Name];
//Added Try Catch to account for Nullable Types
try
{
dc = table.Columns.Contains(p.Name) ? table.Columns[p.Name]
: table.Columns.Add(p.Name, p.PropertyType);
}
catch (NotSupportedException nsEx)
{
string pType = p.PropertyType.ToString();
dc = pType.Contains("System.DateTime") ? table.Columns.Add(p.Name, typeof(System.DateTime)) : table.Columns.Add(p.Name);
//dc = table.Columns.Add(p.Name); //Modified to above statment in order to accomodate Nullable date Time
}
// Add the property to the ordinal map.
_ordinalMap.Add(p.Name, dc.Ordinal);
}
}
// Return the table.
return table;
}
}
}
The (big) caveat to this solution is that it is ** costly** and you have to customize it in the error handling.
you can put
var query = from ....
this.dataGridViewProcura.DataSource = query.tolist()
In my C# code, I have a source DataTable, and want to query it, storing the results to another DataTable.
I have A DataTable with stgId, fromdate, todate, colorCode, something1, something2 as columns. After querying with the where condition I need to remove something1 and something2 columns and to get the result in another DataTable.
Equivalent SQL query would be as shown below
SELECT
stgId,
fromdate,
todate,
colorCode
FROM
tblScheduling
WHERE
Mcode='123'
I want to get the result in another DataTable.
EDIT: Update After Answering
It is possible to get the result as DataRow[] type using where condition like this.
DataRow[] results = table.Select("A = 'foo' AND B = 'bar' AND C = 'baz'");
However I wanted the result set as new DataTable.
Quote from accepted answer
"Read about LINQ and lambda expression, they will be very useful for you. You can read about them here and here"
You can't use CopyToDataTable method directly, instead See: How to: Implement CopyToDataTable Where the Generic Type T Is Not a DataRow. After setting up your classes as per the link you can later call the method CopyToDataTable like:
var newDataTable = (dt.AsEnumerable()
.Where(r=> r.Field<string>("Mcode" == "123")
.Select new
{
stgId = r.Field<int>("stgId"),
fromdate = r.Field<DateTime>("fromdate"),
todate = r.Field<DateTime>("todate"),
colorCode = r.Field<int>("colorCode")
}).CopyToDataTable();
Remember to use the correct type in Field extension method.
The above requires following two classes to be setup in your code. *(its from the same MSDN link)
public static class CustomLINQtoDataSetMethods
{
public static DataTable CopyToDataTable<T>(this IEnumerable<T> source)
{
return new ObjectShredder<T>().Shred(source, null, null);
}
public static DataTable CopyToDataTable<T>(this IEnumerable<T> source,
DataTable table, LoadOption? options)
{
return new ObjectShredder<T>().Shred(source, table, options);
}
}
public class ObjectShredder<T>
{
private System.Reflection.FieldInfo[] _fi;
private System.Reflection.PropertyInfo[] _pi;
private System.Collections.Generic.Dictionary<string, int> _ordinalMap;
private System.Type _type;
// ObjectShredder constructor.
public ObjectShredder()
{
_type = typeof(T);
_fi = _type.GetFields();
_pi = _type.GetProperties();
_ordinalMap = new Dictionary<string, int>();
}
/// <summary>
/// Loads a DataTable from a sequence of objects.
/// </summary>
/// <param name="source">The sequence of objects to load into the DataTable.</param>
/// <param name="table">The input table. The schema of the table must match that
/// the type T. If the table is null, a new table is created with a schema
/// created from the public properties and fields of the type T.</param>
/// <param name="options">Specifies how values from the source sequence will be applied to
/// existing rows in the table.</param>
/// <returns>A DataTable created from the source sequence.</returns>
public DataTable Shred(IEnumerable<T> source, DataTable table, LoadOption? options)
{
// Load the table from the scalar sequence if T is a primitive type.
if (typeof(T).IsPrimitive)
{
return ShredPrimitive(source, table, options);
}
// Create a new table if the input table is null.
if (table == null)
{
table = new DataTable(typeof(T).Name);
}
// Initialize the ordinal map and extend the table schema based on type T.
table = ExtendTable(table, typeof(T));
// Enumerate the source sequence and load the object values into rows.
table.BeginLoadData();
using (IEnumerator<T> e = source.GetEnumerator())
{
while (e.MoveNext())
{
if (options != null)
{
table.LoadDataRow(ShredObject(table, e.Current), (LoadOption)options);
}
else
{
table.LoadDataRow(ShredObject(table, e.Current), true);
}
}
}
table.EndLoadData();
// Return the table.
return table;
}
public DataTable ShredPrimitive(IEnumerable<T> source, DataTable table, LoadOption? options)
{
// Create a new table if the input table is null.
if (table == null)
{
table = new DataTable(typeof(T).Name);
}
if (!table.Columns.Contains("Value"))
{
table.Columns.Add("Value", typeof(T));
}
// Enumerate the source sequence and load the scalar values into rows.
table.BeginLoadData();
using (IEnumerator<T> e = source.GetEnumerator())
{
Object[] values = new object[table.Columns.Count];
while (e.MoveNext())
{
values[table.Columns["Value"].Ordinal] = e.Current;
if (options != null)
{
table.LoadDataRow(values, (LoadOption)options);
}
else
{
table.LoadDataRow(values, true);
}
}
}
table.EndLoadData();
// Return the table.
return table;
}
public object[] ShredObject(DataTable table, T instance)
{
FieldInfo[] fi = _fi;
PropertyInfo[] pi = _pi;
if (instance.GetType() != typeof(T))
{
// If the instance is derived from T, extend the table schema
// and get the properties and fields.
ExtendTable(table, instance.GetType());
fi = instance.GetType().GetFields();
pi = instance.GetType().GetProperties();
}
// Add the property and field values of the instance to an array.
Object[] values = new object[table.Columns.Count];
foreach (FieldInfo f in fi)
{
values[_ordinalMap[f.Name]] = f.GetValue(instance);
}
foreach (PropertyInfo p in pi)
{
values[_ordinalMap[p.Name]] = p.GetValue(instance, null);
}
// Return the property and field values of the instance.
return values;
}
public DataTable ExtendTable(DataTable table, Type type)
{
// Extend the table schema if the input table was null or if the value
// in the sequence is derived from type T.
foreach (FieldInfo f in type.GetFields())
{
if (!_ordinalMap.ContainsKey(f.Name))
{
// Add the field as a column in the table if it doesn't exist
// already.
DataColumn dc = table.Columns.Contains(f.Name) ? table.Columns[f.Name]
: table.Columns.Add(f.Name, f.FieldType);
// Add the field to the ordinal map.
_ordinalMap.Add(f.Name, dc.Ordinal);
}
}
foreach (PropertyInfo p in type.GetProperties())
{
if (!_ordinalMap.ContainsKey(p.Name))
{
// Add the property as a column in the table if it doesn't exist
// already.
DataColumn dc = table.Columns.Contains(p.Name) ? table.Columns[p.Name]
: table.Columns.Add(p.Name, p.PropertyType);
// Add the property to the ordinal map.
_ordinalMap.Add(p.Name, dc.Ordinal);
}
}
// Return the table.
return table;
}
}
foreach (DataRow dr in dataTable1.Rows) {
if (dr["Mcode"].ToString()=="123")
dataTable2.Rows.Add(dr.ItemArray);
}
The above example assumes that dataTable1 and dataTable2 have the same number, type and order of columns.
Edit 1
You can use clone method to copy structure of existing datatable into another.
http://msdn.microsoft.com/en-IN/library/system.data.datatable.clone.aspx
Suppose you have a datatable dt1 So you can create a clone as follows
DataTable dt2 = dt1.Clone();
and use the above loop as follows
foreach (DataRow dr in dt1.Rows) {
if (dr["Mcode"].ToString()=="123")
dt2.Rows.Add(dr.ItemArray);
}
I need my program check if specified column exists in MS Access 2000 database, and if it doesn't - add it. I use .NET Framework 2.0
I tried to use oleDbConnection.GetSchema() method, but couldn't find column names in metadata (i'm really not a pro, huh) and any specification on msdn.
I would appreciate any help.
Thanks for answers.
Here is solution i used in my code:
bool flag = false; string[] restrictions = new string[] { null, null, mytable };
DataTable dtColumns = oleDbConnection1.GetOleDbSchemaTable(System.Data.OleDb.OleDbSchemaGuid.Columns, restrictions);
foreach (DataRow row in dtColumns.Rows)
{
if (mycolumnname==(string)row["COLUMN_NAME"]) flag = true;
}
This is code that is part of a o/r-mapper of mine. You cannot use it as is beacuse it depends on other classes, but I hope you get the picture.
Define restrictions like this
string[] restrictions = new string[] { null, null, tableName };
This retrieves the columns from a table
private void RetrieveColumnInfo(OleDbConnection cnn, TableSchema tableSchema,
string[] restrictions, Func<string, string> prepareColumnNameForMapping)
{
using (DataTable dtColumns =
cnn.GetOleDbSchemaTable(OleDbSchemaGuid.Columns, restrictions)) {
string AutoNumberColumn = RetrieveAutoNumberColumn(cnn, tableSchema);
foreach (DataRow row in dtColumns.Rows) {
var col = new TableColumn();
col.ColumnName = (string)row["COLUMN_NAME"];
try {
col.ColumnNameForMapping =
prepareColumnNameForMapping(col.ColumnName);
} catch (Exception ex) {
throw new UnimatrixExecutionException(
"Error in delegate 'prepareColumnNameForMapping'", ex);
}
col.ColumnAllowsDBNull = (bool)row["IS_NULLABLE"];
col.ColumnIsIdentity = col.ColumnName == AutoNumberColumn;
DbColumnFlags flags = (DbColumnFlags)(long)row["COLUMN_FLAGS"];
col.ColumnIsReadOnly =
col.ColumnIsIdentity ||
(flags & (DbColumnFlags.Write | DbColumnFlags.WriteUnknown)) ==
DbColumnFlags.None;
if (row["CHARACTER_MAXIMUM_LENGTH"] != DBNull.Value) {
col.ColumnMaxLength = (int)(long)row["CHARACTER_MAXIMUM_LENGTH"];
}
col.ColumnDbType = GetColumnDbType((int)row["DATA_TYPE"]);
col.ColumnOrdinalPosition = (int)(long)row["ORDINAL_POSITION"];
GetColumnDefaultValue(row, col);
tableSchema.ColumnSchema.Add(col);
}
}
}