In general, I tend to name my sql database columns using the following camel case convention:
camelCase (notice that the first letter is in lower case).
But when working with C#, I like to name my object's public properties in the following convention:
PascalCase (notice the first is in uppwer case).
Entity Framework's default behaviour is to name the created classes' properties to match their relative column names as they are in the database.
Is there any property in the project/solution level which can be changed in order to solve this issue?
Yes there is. Here you can see the full example:
using System;
using System.Data.Entity;
namespace ConsoleApplication1
{
class MyDbContext : DbContext
{
protected override void OnModelCreating(DbModelBuilder modelBuilder)
{
base.OnModelCreating(modelBuilder);
modelBuilder.Properties().Configure(c =>
{
var name = c.ClrPropertyInfo.Name;
var newName = char.ToLower(name[0]) + name.Substring(1);
c.HasColumnName(newName);
});
}
public MyDbCondenxt(string cs) : base(cs)
{
}
public DbSet<MyModel> MyModels { get; set; }
}
class Program
{
static void Main(string[] args)
{
var context = new MyDbContext ("DefaultConnection");
context.MyModels.Add(new MyModel{SomeText = "hello"});
context.SaveChanges();
Console.ReadLine();
}
}
class MyModel
{
public int Id { get; set; }
public string SomeText { get; set; }
}
}
The property name is "SomeText" and the column name is "someText".
I dont know of a solution level but you can set an attribute on your entity
[Table("myEntity")]
public class MyEntity{}
Achieving this is not impossible, but it's not going to be easy. Some of it depends which type of ef model you're working with, either code first or database/model first (they are similar in this regard), or if you're using the old ObjectContext based methods.
In general, EF uses T4 templating to create the actual classes and models in all but code first, so it's possible to edit the T4 templates and generate whatever you want, such as automatically generating properties with PascalCasing.
If you're using Code first (which doesn't really require that you code first, it's a terrible name) then you can use the Entity Framework power tools to reverse engineer your database to code first models, and again it uses T4 to do this.
If you're using actual code first (ie you create your models and generate your database from the model), then it may not be possible in the existing EF5 or lower. EF6 (currently in alpha) has something known as custom conventions that you could probably use for this, but it's still a long way from production quality.
Earlier I also had that kind of problem. So I simply write a tool in c# to rename the existing edmx file and then after the renaming each section of edmx file,Next using T4 template regenerate the Poco classes. It resolved my problem. It generates the expected POCO classes with Camel Case properties. Basically in the edmx we have 3 layers. So we need to modify 2 layers of them.
MappingsSection
ConceptualModelsSection
Please find the following class to do that.
namespace Edmx_Manager_V1._0
{
using System;
using System.Collections.Generic;
using System.Linq;
using System.Xml;
public static class RenameManager
{
public static XmlDocument Document = new XmlDocument();
public static string FilePath;
public static XmlNamespaceManager nsmgr;
/// <summary>
/// Updates the conceptual models section.
/// </summary>
public static void UpdateConceptualModelsSection()
{
///////////////////////update ConceptualModels section//////////////////////////////////////////////////////////
XmlNodeList Schema = Document.SelectNodes("/edmx:Edmx/edmx:Runtime/edmx:ConceptualModels/edm:Schema", nsmgr);
XmlNode SchemaNode = Schema[0];
XmlElement SchemaNodeXmlElement = SchemaNode as XmlElement;
//get all EntitySet nodes under EntityContainer node
XmlNodeList EntitySetlist = SchemaNodeXmlElement.GetElementsByTagName("EntitySet");
//get all EntityType nodes under SchemaNode
XmlNodeList EntityTypelist = SchemaNodeXmlElement.GetElementsByTagName("EntityType");
foreach (XmlNode EntityTypenode in EntityTypelist)
{
//to call GetElementsByTagName we need XmlElement object
XmlElement EntityTypenodeelement = EntityTypenode as XmlElement;
//get all PropertyRef nodes under EntityType node
XmlNodeList PropertyReflist = EntityTypenodeelement.GetElementsByTagName("PropertyRef");
foreach (XmlNode PropertyRefnode in PropertyReflist)
{
//update name attribute of Key/PropertyRef nodes
XmlAttribute PropertyRef_nameAttribute = PropertyRefnode.Attributes["Name"];
PropertyRef_nameAttribute.Value = UppercaseFirst(PropertyRef_nameAttribute.Value);
}
//get all Property nodes under EntityType node
XmlNodeList Propertylist = EntityTypenodeelement.GetElementsByTagName("Property");
foreach (XmlNode Propertynode in Propertylist)
{
//update name attribute of PropertyRef nodes
XmlAttribute Property_nameAttribute = Propertynode.Attributes["Name"];
Property_nameAttribute.Value = UppercaseFirst(Property_nameAttribute.Value);
}
//get all NavigationProperty nodes under EntityType node
XmlNodeList NavigationPropertylist = EntityTypenodeelement.GetElementsByTagName("NavigationProperty");
foreach (XmlNode NavigationPropertynode in NavigationPropertylist)
{
//update name attribute of NavigationProperty nodes
XmlAttribute NavigationPropertynode_nameAttribute = NavigationPropertynode.Attributes["Name"];
NavigationPropertynode_nameAttribute.Value = UppercaseFirst(NavigationPropertynode_nameAttribute.Value) + "s";// we append "s" for nav properties
}
}
//get Association node under Schema node
XmlNodeList Associationlist = SchemaNodeXmlElement.GetElementsByTagName("Association");
//get all Association nodes and process
foreach (XmlNode AssociationNode in Associationlist)
{
if (AssociationNode != null)
{
XmlElement AssociationNodeXmlElement = AssociationNode as XmlElement;
//get all end nodes under Association
XmlNodeList EndNodelist2 = AssociationNodeXmlElement.GetElementsByTagName("End");
//get all PropertyRef nodes under Association
XmlNodeList PropertyReflist2 = AssociationNodeXmlElement.GetElementsByTagName("PropertyRef");
foreach (XmlNode PropertyRefNode2 in PropertyReflist2)
{
//update Type attribute
XmlAttribute PropertyRefNode2Attribute = PropertyRefNode2.Attributes["Name"];
PropertyRefNode2Attribute.Value = UppercaseFirst(PropertyRefNode2Attribute.Value);
}
}
}
Console.WriteLine("ConceptualModelSection updated..");
}
/// <summary>
/// Updates the mappings section.
/// </summary>
public static void UpdateMappingsSection()
{
///////////////////////update edmx:Mappings section//////////////////////////////////////////////////////////
XmlNodeList EntityContainerMapping = Document.SelectNodes("/edmx:Edmx/edmx:Runtime/edmx:Mappings/cs:Mapping", nsmgr);
XmlNode EntityContainerMapping_Node = EntityContainerMapping[0];
XmlElement EntityContainerMappingNode_XmlElement = EntityContainerMapping_Node as XmlElement;
// update name attribute of all EntitySetMapping nodes
//get all EntitySetMapping nodes
XmlNodeList EntitySetMappinglist = EntityContainerMappingNode_XmlElement.GetElementsByTagName("EntitySetMapping");
//get all EntityTypeMapping nodes
XmlNodeList EntityTypeMappinglist = EntityContainerMappingNode_XmlElement.GetElementsByTagName("EntityTypeMapping");
//get all ScalarProperty nodes
XmlNodeList ScalarPropertyist = EntityContainerMappingNode_XmlElement.GetElementsByTagName("ScalarProperty");
foreach (XmlNode ScalarPropertyNode in ScalarPropertyist)
{
XmlAttribute nameAttribute = ScalarPropertyNode.Attributes["Name"];
nameAttribute.Value = UppercaseFirst(nameAttribute.Value);
}
Console.WriteLine("MappingSection updated..");
}
/// <summary>
/// Uppercases the first.
/// </summary>
/// <param name="name">The name.</param>
/// <returns></returns>
private static string UppercaseFirst(string name)
{
return char.ToUpper(name[0]) + name.Substring(1);
}
}
}
Usage :
RenameManager.FilePath = #"C:\Users\therath\testApp\Model1.edmx";
// Path of edmx file in the your solution
RenameManager.Document.Load(#RenameManager.FilePath);
RenameManager.nsmgr = new XmlNamespaceManager(RenameManager.Document.NameTable);
RenameManager.nsmgr.AddNamespace("edmx", "http://schemas.microsoft.com/ado/2008/10/edmx");
RenameManager.nsmgr.AddNamespace("edm", "http://schemas.microsoft.com/ado/2008/09/edm");
//nsmgr.AddNamespace("ssdl", "http://schemas.microsoft.com/ado/2009/02/edm/ssdl");
RenameManager.nsmgr.AddNamespace("cs", "http://schemas.microsoft.com/ado/2008/09/mapping/cs");
try
{
RenameManager.UpdateConceptualModelsSection();
RenameManager.UpdateMappingsSection();
RenameManager.Document.Save(#RenameManager.FilePath);
}
catch (Exception ex)
{
MessageBox.Show(ex.Message.ToString());
}
If you generate the edmx again you may need to run this tool again.
there are ways to do that , some of are already pointed out by others..
i found one class which does that ...
namespace System.Data.Entity.ModelConfiguration.Conventions
{
/// <summary>
/// Convention to convert any data types that were explicitly specified, via data annotations or <see cref="T:System.Data.Entity.DbModelBuilder"/> API,
/// to be lower case. The default SqlClient provider is case sensitive and requires data types to be lower case. This convention
/// allows the <see cref="T:System.ComponentModel.DataAnnotations.ColumnAttrbiute"/> and <see cref="T:System.Data.Entity.DbModelBuilder"/> API to be case insensitive.
///
/// </summary>
public sealed class ColumnTypeCasingConvention : IDbConvention<DbTableColumnMetadata>, IConvention
{
internal ColumnTypeCasingConvention()
{
}
[SuppressMessage("Microsoft.Globalization", "CA1308:NormalizeStringsToUppercase")]
void IDbConvention<DbTableColumnMetadata>.Apply(DbTableColumnMetadata tableColumn, DbDatabaseMetadata database)
{
if (string.IsNullOrWhiteSpace(tableColumn.TypeName))
return;
tableColumn.TypeName = tableColumn.TypeName.ToLowerInvariant();
}
}
}
explicit implementation of idbconvertion does that thing which you can implement
another one is to
go to solution => and find folder obj/debug/edmxresourcestoembed
there are three files db.csdl , db.msl , db.ssdl
edit msl file => you 'll see mapping for each table like as under.
<EntitySetMapping Name="Address">
<EntityTypeMapping TypeName="IsTypeOf(AdventureWorksLTModel.Address)">
<MappingFragment StoreEntitySet="Address">
<ScalarProperty Name="AddressID" ColumnName="AddressID" />
<ScalarProperty Name="AddressLine1" ColumnName="AddressLine1" />
<ScalarProperty Name="AddressLine2" ColumnName="AddressLine2" />
<ScalarProperty Name="City" ColumnName="City" />
<ScalarProperty Name="StateProvince" ColumnName="StateProvince" />
<ScalarProperty Name="CountryRegion" ColumnName="CountryRegion" />
<ScalarProperty Name="PostalCode" ColumnName="PostalCode" />
<ScalarProperty Name="rowguid" ColumnName="rowguid" />
<ScalarProperty Name="ModifiedDate" ColumnName="ModifiedDate" />
</MappingFragment>
</EntityTypeMapping>
</EntitySetMapping>
You can change it in the .edmx file. Just click on the property name and rename it to camel case and the same will be reflected whenever you try to access it using the object.
You can use external tools like
http://www.devart.com/entitydeveloper/
With the inbuild EDMX desinger this isnt possible, since the "update from database" routine doesnt have such a function.
The class code is generated from a T4 template. Depending on your configuration, this may already be in your project, or the EDMX may be using a built-in one, in which case you would need to add your own and set the "code generation strategy" to "None" in the EDMX properties (through the model browser). From this file it is fairly easy to find and modify the class and property name generation.
The default is calling an Escape function, which is defined in include files at "IDE/Extensions/Microsoft/Entity Framework Tools/Templates/Includes" under the Visual Studio folder, and ultimately just calls CreateEscapedIdentifier. Simply call this with the capitalized version of the string.
FYI: These names are coming from the EntityType and NavigationProperty objects as defined in the EDMX, not directly from the database. If you are using the "generate from database" feature for the EDMX, the names may have already gone through a transformation and the original table names are not retained in the model. This probably won't be an issue, though.
Well you can edit the names in the edmx actually, but everytime you refresh from database it's back to doing it again.
The only viable approach when using edmx type datatypes is to have the correct names (with capital letters) in the tables of the database or it will be to tedious.
You can of cause use link to sql instead, in which case you define your data classes and just supply a name property. But be warned this approach is significantly more manual and is being aborted most places because it requires much more thinking to set up that edmx autogeneration which is a click, click, next approach.
So yes you can edit the names in the edmx, but consider abandoning your camelCasing for tables instead, in hommage to edmx which saves you for a ton on work in return, or your .net autogenerated proxy classes will look wierd, as you know.
Related
I'm trying to process an XML document and determine which namespaces are defined in it but I'm having trouble getting consistent results from XmlNamespaceManager.HasNamespace. As it's reading through the document HasNamespace will return false even though it's still declared and in scope.
Sample code:
var ctx = new XmlParserContext(null, new XmlNamespaceManager(new NameTable()), null, XmlSpace.None);
var set = new XmlReaderSettings() { IgnoreComments = true, IgnoreProcessingInstructions = true, IgnoreWhitespace = true };
using (var xml = new StringReader(
"<?xml version=\"1.0\" encoding=\"utf-8\"?>" +
"<rdf:RDF " +
" xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"> " +
" <rdf:Description rdf:about=\"x\" /> " +
"</rdf:RDF>"))
using (var rdr = XmlReader.Create(xml, set, ctx))
{
rdr.MoveToContent();
Console.WriteLine(rdr.Name);
Console.WriteLine(rdr.LookupNamespace("rdf"));
Console.WriteLine(ctx.NamespaceManager.HasNamespace("rdf")); // True
rdr.Read();
Console.WriteLine(rdr.Name);
Console.WriteLine(rdr.LookupNamespace("rdf"));
Console.WriteLine(ctx.NamespaceManager.HasNamespace("rdf")); // False
rdr.Read();
Console.WriteLine(rdr.Name);
Console.WriteLine(rdr.LookupNamespace("rdf"));
Console.WriteLine(ctx.NamespaceManager.HasNamespace("rdf")); // True
}
Fiddle
As the reader enters each new element, it will call PushScope on the namespace manager. Once it leaves the element (via the end of a self-closing tag or the corresponding end tag), it calls PopScope.
HasNamespace, unlike some other members of the namespace manager, only answers the question for the current scope.
Gets a value indicating whether the supplied prefix has a namespace defined for the current pushed scope.
(My emphasis)
In general, you shouldn't be working with the namespace prefixes all that much, unless you're actually performing parsing yourself1 rather than leveraging the existing tools. It's the combination of the element name (RDF) and the namespace (http://www.w3.org/1999/02/22-rdf-syntax-ns#\) that uniquely defines the type of the element - the prefix can be changed (provided it's done consistently throughout the scope of the document in which it is valid) without changing the information content of the XML.
You can see this for yourself if you create this class:
class LoggingNamespaceManager : XmlNamespaceManager
{
public LoggingNamespaceManager (XmlNameTable table) : base(table)
{
}
public override void PushScope()
{
Console.WriteLine("Push");
base.PushScope();
}
public override bool PopScope()
{
Console.WriteLine("Pop");
return base.PopScope();
}
}
And instantiate it rather than XmlNamespaceManager in the first line of your sample.
1Please don't though. There are enough brittle "XML" parsers out there already which are built on invalid assumptions about XML. Use the tools provided in the Framework, as you're currently doing.
I have been trying to read this XML file however it is complex/nested a good amount compared to the examples I have seen online. I have tried using LINQ and XMLReader with no luck.
LINQ will read each OrderScreen; however, when it comes to the Cell of each OrderScreen it loads all possible Cells into each OrderScreen even if the Cell does not belong to that OrderScreen. I understand why it does it, but I am fairly new to LINQ and most of the examples I see are not this complex and do not really cover this.
XMLReader works pretty well but it does not continue reading the next Cell after it completed the reading of one OrderScreen, it just reads the first Cell of the next OrderScreen then assumes it is at the end of the document. I did not include that code because all the searches I have seen people using LINQ over XMLReader.
XML is below first, most recent LINQ code after that
Any help is greatly appreciated!
<Screens>
<DeleteScreens></DeleteScreens>
<NewScreens>
<OrderScreen>
<ScreenNumber></ScreenNumber>
<Title></Title>
<NumberOfColumns></NumberOfColumns>
<OptionScreen></OptionScreen>
<ShowQuantityButtons></ShowQuantityButtons>
<PrepSequenceScreen></PrepSequenceScreen>
<Cell>
<CellNumber></CellNumber>
<CellName></CellName>
<InventoryNumber></InventoryNumber>
...more Cell elements..
<OptionGroup>
<Type></Type>
<ScreenNumber></ScreenNumber>
<Cells></Cells>
</OptionGroup>
...more OptionGroups...
</Cell>
...more Cells...
</OrderScreen>
...more OrderScreens...
</NewScreens>
<UpdateMenus>
<Menu>
<MenuNumber></MenuNumber>
<MenuTitle></MenuTitle>
...more Menu elements...
</Menu>
...more Menus...
</UpdateMenus>
<Screens>
XDocument xdoc;
xdoc = XDocument.Load(#"C:\Users\Kwagstaff\Desktop\PMM_3.0\PMM_3.0\XML\Screens.xml");
var ORDERSCREENS = from a in xdoc.Descendants("OrderScreen")
select new
{
ScreenNumber = a.Element("ScreenNumber").Value,
Title = a.Element("Title").Value,
NumberOfColumns = a.Element("NumberOfColumns").Value,
OptionScreen = a.Element("OptionScreen").Value,
ShowQuantityButtons = a.Element("ShowQuantityButtons").Value,
PrepSequenceScreen = a.Element("PrepSequenceScreen").Value,
Cell = from b in xdoc.Descendants("Cell")
select new
{
CellNumber = b.Element("CellNumber"),
}
};
In my opinion, the proper way to do that is with entities and decorators, you will need to do some research but as example
for something like
<MyComplexXML>
....
<xalAddress>...</xalAddress>
<multiPoint>
<MultiPoint>...</MultiPoint>
</multiPoint>
...
</MyComplexXML>
First, you create your classes like this
using System.Xml.Serialization;
namespace MyComplexXML_Model
{
/// <summary>
/// Address field for MyComplexXML
/// </summary>
public class Address
{
/// <summary>
/// XalAddress
/// </summary>
[XmlElement("xalAddress")]
public XalAddress XalAddress;
[XmlElement("multiPoint")]
public MultiPointAddress MultiPointAddress;
}
}
and
using System.Xml.Serialization;
namespace MyComplexXML_Model
{
public class MultiPointAddress
{
[XmlElement("MultiPoint", Namespace = "http://www.sample.net/sample")]
public MultiPoint Multipoint;
}
}
and when your complete hierarchies are in place you can call your root element like this
var ns = new XmlSerializerNamespaces();
ns.Add("sample", "http://www.sample.net/sample");
...
var ms = new MemoryStream();
var sw = new StreamWriter(ms);
//Deserialize from file
var sr = new StreamReader(#"myfile.xml");
var city = (MyComplexXML)new XmlSerializer(typeof(MyComplexXML)).Deserialize(sr);
Hope this point you in the right direction.
I have been using EF Migrations for a while in my current project and all was working great, that is until today, the situation is as follows:
I made a small change of adding a string property
I called an API method and got an error that there are changes in the model
I ran the command "Add-Migration MigrationXYZ"
A new migration is created with extra changes that didn't happen
I ran the "Add-Migration MigrationXYZ -Force" to make sure its not a one thing issue, I dropped the DB, restarted VS(2015) but all the same
Another issue is that even if I apply the migration as done by the scaffolder, an error still returns saying "Unable to update database to match the current model because there are pending changes..."
After looking at these changes, they all but one are about having a string property with the [Required] attribute and the scaffolder need to make it nullable, below is a sample.
public partial class MigrationXYZ: DbMigration
{
public override void Up()
{
AddColumn("dbo.Foos", "NewProperty", c => c.String());//<-- Expected Change
AlterColumn("dbo.Bars", "Name", c => c.String());//<-- Unexpected Change
}
public override void Down()
{
AlterColumn("dbo.Bars", "Name", c => c.String(nullable: false));//<-- Unexpected Change
DropColumn("dbo.Foos", "NewProperty");//<-- Expected Change
}
}
public class Bar
{
//This was not touched in ages, some even before adding the first migration
[Required]
public string Name { get; set; }
}
And now I am stuck and don't know how to fix this...Corruption in the Migration state
Edit
I have been trying to debug the Add-Migration command to understand why does EF see the model is different than it really is, but using EF source is not possible when you have dependencies like Identity which needs signed DLLs to work.
However additional research lead me to the answer here which leads to this blog post By #trailmax and the code to decipher the migrations hash, and with a little search in the EF source I made a small app to extract both the current model and the last migration model to compare side to side.
The code to get the current model representation in XML
//Extracted from EF Source Code
public static class DbContextExtensions
{
public static XDocument GetModel(this DbContext context)
{
return GetModel(w => EdmxWriter.WriteEdmx(context, w));
}
public static XDocument GetModel(Action<XmlWriter> writeXml)
{
using (var memoryStream = new MemoryStream())
{
using (var xmlWriter = XmlWriter.Create(
memoryStream, new XmlWriterSettings
{
Indent = true
}))
{
writeXml(xmlWriter);
}
memoryStream.Position = 0;
return XDocument.Load(memoryStream);
}
}
}
//In Program.cs
using (var db = new DbContext())
{
var model = db.GetModel();
using (var streamWriter = new StreamWriter(#"D:\Current.xml"))
{
streamWriter.Write(model);
}
}
The code to extract the model from the migration in XML
//Code from Trailmax Tech Blog
public class MigrationDecompressor
{
public string ConnectionString { get; set; }
public String DecompressMigrationFromSource(IMigrationMetadata migration)
{
var target = migration.Target;
var xmlDoc = Decompress(Convert.FromBase64String(target));
return xmlDoc.ToString();
}
public String DecompressDatabaseMigration(String migrationName)
{
var sqlToExecute = String.Format("select model from __MigrationHistory where migrationId like '%{0}'", migrationName);
using (var connection = new SqlConnection(ConnectionString))
{
connection.Open();
var command = new SqlCommand(sqlToExecute, connection);
var reader = command.ExecuteReader();
if (!reader.HasRows)
{
throw new Exception("Now Rows to display. Probably migration name is incorrect");
}
while (reader.Read())
{
var model = (byte[])reader["model"];
var decompressed = Decompress(model);
return decompressed.ToString();
}
}
throw new Exception("Something went wrong. You should not get here");
}
/// <summary>
/// Stealing decomposer from EF itself:
/// http://entityframework.codeplex.com/SourceControl/latest#src/EntityFramework/Migrations/Edm/ModelCompressor.cs
/// </summary>
private XDocument Decompress(byte[] bytes)
{
using (var memoryStream = new MemoryStream(bytes))
{
using (var gzipStream = new GZipStream(memoryStream, CompressionMode.Decompress))
{
return XDocument.Load(gzipStream);
}
}
}
}
//Inside Program.cs
var decompresser = new MigrationDecompressor
{
ConnectionString = "<connection string>"
};
var databaseSchemaRecord = decompresser.DecompressDatabaseMigration("<migration name>");
using (var streamWriter = new StreamWriter(#"D:\LastMigration.xml"))
{
streamWriter.Write(databaseSchemaRecord);
}
Unfortunately I still cannot find the issue, the only difference between the model and the one hashed with the last migration is the expected change of the added property, none of the unexpected changes show up, also after running the migration suggested by EF, then comparing the current model with the suggested migration, still the model doesn't match the changes, what should be not null is still not null in the model, while the suggested migration show it as nullable.
The expected changes show up
<Property Name="NewProperty" Type="String" MaxLength="Max" FixedLength="false" Unicode="true" />
.
.
.
<ScalarProperty Name="NewProperty" ColumnName="NewProperty" />
.
.
.
<Property Name="NewProperty" Type="nvarchar(max)" Nullable="true" />
Try rolling back your database to one of your previous ones with
Update-database -targetMigration "nameofpreviousmigration"
(you may need to run update-database before running the above I am not certain)
Then delete your new migration, create a completely new migration and run
update-database.
Hopefully this will fix the problem with it thinking there is an extra migration
Another option but It is probably not the best solution is too manually edit the migration and take out the unexpected part
Well, looking again at #trailmax's answer, I wanted to try something, an info that I didn't include in the question, and was dismissing as the cause since its used in other places, was not changed in this migration, and was dismissed as the cause by #trailmax as well, which is attributes and ExpressiveAnnotations attributes in specific.
My actual Bar class looks like this
public class Bar
{
//This was not touched in ages, some even before adding the first migration
[Required]
[AssertThat(#"<Condition>", ErrorMessage = "Please revise the name")]
public string Name { get; set; }
}
I commented out the AssertThat attribute, and guess what, all the changes that shouldn't exist disappeared.
Please try providing connectionstring and provider explicitly with update-database command. You can find these values in your connectionstring.
Sometimes, we may need to direct the entity framework to connect to right database. One of the cases would be, selecting wrong project as start up project, which will make entity framework assume to connect to default database.
update-database -connectionstring:"" -provider:""
I'm trying to find out what is the semantic of System.Data.Entity.Migrations.Infrastructure.IMigrationMetadata interface in the EF. I know that it's used to manage and apply DB migrations. But I can't find detailed information about it. To be specific I would like to know:
What Source property is used for? Why it's always null when I generate migrations using tools?
What Target property is used for? I see that tools is generating something Base64-looking and placed into resources. What is it? Why it's generated in such non-friendly format?
Is it possible to develop migration manually without tools usage? I suppose it is not easy because of that Target property Base64-like value which should be generated somehow. Am I right?
When this interface is actually used? At the moment I found out that migrations not implementing this interface can't be found automatically by migrator. Am I right? Is it the only purpose of the interface?
The IMigrationMetadata Interface has the following responsibilities that I know of.
Identify the migration via the ID property so that is can be recognized and included by commands such as Update-Database.
Supply a snapshot of the model as it is after the migration is applied via the Target property. This is used to determine the changes that should be included in a new migration.
I am guessing that the Source property is often not implemented by the tooling as it is not required in the implementation of Add-Migration. That code probably just compares the model as it was at the end of the most recent, existing migration with a model generated from the code to determine the changes that need to be included in the new migration.
The Target property returns a model in EDMX format that has been both compressed using the GZipStream and encoded using Convert.ToBase64String. I wrote the following code to both decode and encode these values. You would probaly find this useful if you are going to be coding migrations manually.
using System;
using System.IO;
using System.IO.Compression;
using System.Text;
namespace ConsoleApplication6
{
class Program
{
static void Main()
{
var minimalModel = File.ReadAllText("Model1.edmx");
var encodedMinimalModel = Encode(minimalModel);
var decodedMinimalModel = Decode(encodedMinimalModel);
}
private static string Decode(string encodedText)
{
var compressedBytes = Convert.FromBase64String(encodedText);
var decompressedBytes = Decompress(compressedBytes);
return Encoding.UTF8.GetString(decompressedBytes);
}
private static string Encode(string plainText)
{
var bytes = Encoding.UTF8.GetBytes(plainText);
var compressedBytes = Compress(bytes);
return Convert.ToBase64String(compressedBytes);
}
public static byte[] Decompress(byte[] bytes)
{
using (var memorySteam = new MemoryStream(bytes))
{
using (var gzipStream = new GZipStream(memorySteam, CompressionMode.Decompress))
{
return ToByteArray(gzipStream);
}
}
}
private static byte[] ToByteArray(Stream stream)
{
using (var resultMemoryStream = new MemoryStream())
{
stream.CopyTo(resultMemoryStream);
return resultMemoryStream.ToArray();
}
}
public static byte[] Compress(byte[] bytes)
{
using (var memoryStream = new MemoryStream())
{
using (var gzipStream = new GZipStream(memoryStream, CompressionMode.Compress))
{
gzipStream.Write(bytes,0, bytes.Length);
}
return memoryStream.ToArray();
}
}
}
}
The compression probably explains your query as to why a non-human readable format was chosen. This content is repeated at least once (in the Target property) for each migration and can be large depending on the size of the model. The compression saves on space.
On that note, as far as I can see, it is really only the last migration that is required to return a true representation of the model after it has been applied. Only that migration is used by Add-Migration to calculate the changes required in the new migration. If you are dealing with a very large model and/or a very large number of migrations, removing that content could be advantageous. The remainder of this post covers my derivation of a minimal value for the Target property which can be used in all but the most recent migration.
The Target property must return a string object - an ArgumentNullException is thrown in a call to System.Convert.FromBase64String in System.Data.Entity.Migrations.DbMigrator.ApplyMigration when update-database is called if Target returns null.
Further, it must be a valid XML document. When I returned an empty string from Target I got an XmlException with the message "Root element is missing.".
From this point on, I used my code from above to encode the values.
I did not get very far with gradually building up the model starting with <root /> for example so I swapped over to discarding elements from an empty EDMX file that I generated by adding a new 'ADO.Net Entity Data Model' to my project and then choosing the 'Empty Model' option. This was the result.
<?xml version="1.0" encoding="utf-8"?>
<edmx:Edmx Version="3.0" xmlns:edmx="http://schemas.microsoft.com/ado/2009/11/edmx">
<edmx:Runtime>
<edmx:StorageModels>
<Schema xmlns="http://schemas.microsoft.com/ado/2009/11/edm/ssdl" Namespace="Model1.Store" Alias="Self" Provider="System.Data.SqlClient" ProviderManifestToken="2005">
</Schema>
</edmx:StorageModels>
</edmx:Runtime>
</edmx:Edmx>
When I encoded this using my code from above, this was the result.
H4sIAAAAAAAEAJVQy07DMBC8I/EP1t6xExASRA1VVTgWIYK4W/amtfCjeN2q/D12HsqJAxdLOzOe2Z3V+uIsO2MkE3wLNa+AoVdBG79v4ZT6mwdYP11frVC7S/OSH/Y5i++KOH/31BS2hUNKx0YIUgd0krgzKgYKfeIqOCF1ELdV9SjqWhQ5ZFfGRt/3k0/G4YDMWJdClHvcBY2WJiZz3WA+xv4vURBpC+xVOqSjVNjC4F3zkoTANtbIbNmh7YG9xXA2GmOefyih488ySd5926016NMi2ElveqT0Eb4wd5Lz7mHZVozrzoeJPy6biKWGCSh95+kXfT3Qv6UBAAA=
Be careful to ensure that you retain the real Target values for each of your migrations in source control in case you need to roll back to an earlier version. You could try applying the migration to a database and then using Visual Studio to generate an EDMX file. Another alternative would be to roll back the classes that form your model and then execute Add-Migration. Take the Target value from the newly created migration.
I was just looking into this because I wanted to use the Source property to enforce a strict ordering of migrations.
The answer to question 1 is hidden in DbMigrator.Scaffold
var scaffoldedMigration
= _configuration.CodeGenerator.Generate(
migrationId,
migrationOperations,
(sourceModel == _emptyModel.Value)
|| (sourceModel == _currentModel)
|| !sourceMigrationId.IsAutomaticMigration()
? null
: Convert.ToBase64String(modelCompressor.Compress(sourceModel)),
Convert.ToBase64String(modelCompressor.Compress(_currentModel)),
#namespace,
migrationName);
In other words, the Source property is only filled if the previous migration was an "Automatic Migration". Just tested it, and a subsequent migration after an automatic migration yields something like this:
[GeneratedCode("EntityFramework.Migrations", "6.2.0-61023")]
public sealed partial class Fourth : IMigrationMetadata
{
private readonly ResourceManager Resources = new ResourceManager(typeof(Fourth));
string IMigrationMetadata.Id
{
get { return "201905250916038_Fourth"; }
}
string IMigrationMetadata.Source
{
get { return Resources.GetString("Source"); }
}
string IMigrationMetadata.Target
{
get { return Resources.GetString("Target"); }
}
}
You go to: EF6 repository on codeplex and you see:
public interface IMigrationMetadata
{
/// <summary>
/// Gets the unique identifier for the migration.
/// </summary>
string Id { get; }
/// <summary>
/// Gets the state of the model before this migration is run.
/// </summary>
string Source { get; }
/// <summary>
/// Gets the state of the model after this migration is run.
/// </summary>
string Target { get; }
}
You can get the project and check references to see how this interface is being used. The base64 thing is your model. Again with the code you should be able to track how it is done.
The results from a rehosted designer (WF4) have an issue when adding a default value to an argument. Every other case seems to work fine. This is the (abridged) xaml of a (nearly) empty workflow.
<Activity mc:Ignorable="sap" x:Class="{x:Null}" this:_b40c.NewArg="test" xmlns="http://schemas.microsoft.com/netfx/2009/xaml/activities"
xmlns:av="http://schemas.microsoft.com/winfx/2006/xaml/presentation" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:mva="clr-namespace:Microsoft.VisualBasic.Activities;assembly=System.Activities" xmlns:sap="http://schemas.microsoft.com/netfx/2009/xaml/activities/presentation"
xmlns:scg="clr-namespace:System.Collections.Generic;assembly=mscorlib" xmlns:this="clr-namespace:" xmlns:twc="clr-namespace:Telogis.Workflow.CustomerApi;assembly=Telogis.Workflow.Activities"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml">
<x:Members>
<x:Property Name="AuthenticationHeader" Type="InArgument(twc:AuthenticationHeader)" />
<x:Property Name="BaseTdeUri" Type="InArgument(x:Uri)" />
<x:Property Name="NewArg" Type="InArgument(x:String)" />
</x:Members>
<sap:VirtualizedContainerService.HintSize>654,676</sap:VirtualizedContainerService.HintSize>
<mva:VisualBasic.Settings>Assembly references and imported namespaces serialized as XML namespaces</mva:VisualBasic.Settings>
<Flowchart />
</Activity>
Specifically when default value is added, the following additions are made to the definition: this:_b40c.NewArg="test" and xmlns:this="clr-namespace:"
xmlns:this="clr-namespace:" is invalid as it doesn't point anywhere and can't be parsed with ActivityXamlServices.Load(stream); (it throws XamlObjectWriterException: "'Cannot set unknown member '{clr-namespace:}_b40c.NewArg'.' ...)
This seems to occur whatever the specified type of the argument is.
Any idea what could be causing this?
Update
I was using an ActivityBuilder to utilise the activity in the first place. This was fine, but as I hadn't provided it with a name, it had to generate a key, in the example above _b40c. ActivityXamlServices.Load has some kind off issue processing these keys. However, simply defining a name for ActivityBuilder seems to do the trick.
This still doesn't answer why it creates xmlns:this="clr-namespace:" without an actual namespace.
Your workflow xaml is invalid. I'm not sure where you got it or how it got into this state.
I can tell this because
<Activity
x:Class="{x:Null}"
this:_b40c.NewArg="test"
xmlns:this="clr-namespace:"
the clr-style namespace declaration is invalid. It should read
clr-namespace:Some.Namespace.In.The.Current.Assembly
or
clr-namespace:Some.Namespace;assembly=SomeAssemblyWithSomeNamespace
As your declaration is malformed, the this xml namespace cannot be parsed by the XamlObjectWriter in order to determine what namespace/assembly your _b40c type exists in. Also, that looks highly suspicious as well. And I've never seen an x:Class set to null before. That also strikes me as malformed.
If I understand well - this is WF Designer bug.
I've faced with this problem when I had had to support default value definition for InArgument<T> in my custom WF designer. I still surprised lack of support for this basic procedure.
There are 2 reason of failure:
Definition of {x:Null} in x:Class attribute
Invalid definition of xmlns:this attribute
And the main problem is invalid definition of Argument's default value: this:_effe.MyArgument="asd". The definition of default value for argument should be equal to this:MyXamlClassName.MyArgument="asd". For example, if your x:Cass definition is x:Class="MyNamespace.MyClass", Argument definition should be this:MyClass.MyArgument="asd".
I resolved it by intervention into XAML saving process:
After calling of
_workflowDesigner.Save(_editedFile);
I added these two lines:
#region x:Class and Argument<T> default value issues solution
await CreateAttributeValue(_editedFile, ConstXClassAttributeName, typeof(App).Namespace + "." + Path.GetFileNameWithoutExtension(_editedFile));
//should finish first operation before second operation begins to avoid I/O exception
await CreateAttributeValue(_editedFile, ConstNamespaceAttributeName, ConstXamlClrNamespace + typeof(App).Namespace);
await RepairArgsAttributes(_editedFile);
#endregion
This is the methods definition:
/// <summary>
/// Reason of using of this method: bug in workflow designer. When you save your xaml file, WF Designer assign "{x:Null}" to x:Class attribute
/// Bug: In addition, if you want to set default value for your InArgument<T>, it defines attribute "this" (namespace declaration) with empty value. When you try to open your file, designer fails to parse XAML.
/// </summary>
/// <param name="editedFile"></param>
/// <param name="attribteName"></param>
/// <param name="attributeValueToReplace"></param>
private static async Task CreateAttributeValue(string editedFile, string attribteName, string attributeValueToReplace)
{
XmlDocument xmlDoc = new XmlDocument();
await Task.Run(() => xmlDoc.Load(editedFile));
await Task.Run(() =>
{
var attributteToReplace = xmlDoc.FirstChild.Attributes?[attribteName];
if (null != attributteToReplace)
{
xmlDoc.FirstChild.Attributes[attribteName].Value = attributeValueToReplace;
xmlDoc.Save(editedFile);
}
});
}
/// <summary>
/// Bug in Workflow designer: workflow designer saves declaration for In/Out Arguments in invalid format. Means, that it is unable to open the same file it saved itself. This method fixes the Arguments declaration in XAML xmlns
/// </summary>
/// <param name="editedFile"></param>
/// <returns></returns>
private async Task RepairArgsAttributes(string editedFile)
{
XmlDocument xmlDoc = new XmlDocument();
await Task.Run(() => xmlDoc.Load(editedFile));
await Task.Run(() =>
{
for (int i = 0; i < xmlDoc.FirstChild.Attributes.Count; i++)
{
if (xmlDoc.FirstChild.Attributes[i].Name.StartsWith(ConstInvalidArgStarts))
{
string innvalidAttrName = xmlDoc.FirstChild.Attributes[i].Name;//extraction of full argument declaration in xmlns
string[] oldStrings = innvalidAttrName.Split('.');//extraction of arguemnt name string
string localName = Path.GetFileNameWithoutExtension(editedFile) + "." + oldStrings[1];//build valid argment declaration without perfix
string valueBackup = xmlDoc.FirstChild.Attributes[i].Value;//saving of default value of Arguemnt<T>
xmlDoc.FirstChild.Attributes.RemoveNamedItem(xmlDoc.FirstChild.Attributes[i].Name);//removal of invalid Arguemnt declaration with default value. WARNING: when you remove attribue, at this moment you have another item at the place xmlDoc.FirstChild.Attributes[i]
//definition of new valid attribute requries: set separelly attribute prefix, localName (not "name" - it causes invalid attribute definition) and valid namespace url (in our case it's namespace deifinition in "this")
XmlAttribute attr = xmlDoc.CreateAttribute(ConstArgPrefix, localName, xmlDoc.FirstChild.Attributes[ConstNamespaceAttributeName].Value);
attr.Value = valueBackup;
xmlDoc.FirstChild.Attributes.InsertBefore(attr, xmlDoc.FirstChild.Attributes[i]);//define new correct Argument declaration attribute at the same place where was invalid attribute. When you put valid attribute at the same place your recover valid order of attributes that was changed while removal of invalid attribute declaration
}
}
xmlDoc.Save(editedFile);
});
}
The constants definition are:
#region Constants
private const string ConstXClassAttributeName = "x:Class";
private const string ConstXamlClrNamespace = "clr-namespace:";
private const string ConstNamespaceAttributeName = "xmlns:this";
private const string ConstInvalidArgStarts = #"this:_";
private const string ConstArgPrefix = #"this";
#endregion
This solution should resolve your issue too.