Earlier this week the solution designer in my company recently issued a challenge to all developers to find a bug in some code that he wrote. What happened was the build admins retracted his solution package from SharePoint when something went wrong and the whole application pool died.
It is speculated that the cause is one of the following:
Something in the code
Was it somehow related to retracting the solution from Central Admin?
C – none of the above
D – all of the above
I've been looking at the code today, but have not been able to find anything askew. I thought I'd put his code here and get an opinion from anyone who is interested in giving one. This is not a serious question and the challenge was issued in good sport, so please just give it any attention if you feel so inclined.
The message from the SD:
As I mentioned in DevDays yesterday, when the SharePoint admins retracted the CityDepartments solution package from SharePoint, the whole IIS Application Pool died. This was before the OWSTIMER.EXE timer job could unlock all the Web Applications and sites that it locked before the actual features were deactivated.
So, since nobody knows what went wrong, I will reward the first person who figures it out (myself included).
As promised, if you can find the problem in one of the FeatureDeactivating events that causes SharePoint to crash spectacularly, like it did last week, then I will buy you 2 movie tickets to any NuMetro or SterKinekor movie that is showing at the moment. The proof will be in the successful deploy and retract (3 times just to be sure) of the fixed solution package(s) in the DEV and QA environments.
List Feature Event Receiver:
using System;
using System.Runtime.InteropServices;
using System.Security.Permissions;
using Microsoft.SharePoint;
using Microsoft.SharePoint.Administration;
using Microsoft.SharePoint.Taxonomy;
using Microsoft.Office.DocumentManagement.MetadataNavigation;
using Microsoft.Office.Server.SocialData;
namespace CityDepartmentStructure.SharepointExtractTimerJob.Features.CityDepartmentsListFeature
{
/// <summary>
/// This class handles events raised during feature activation, deactivation, installation, uninstallation, and upgrade.
/// </summary>
/// <remarks>
/// The GUID attached to this class may be used during packaging and should not be modified.
/// </remarks>
[Guid("ce0a04a0-b20b-4587-998a-6817dce2d4d8")]
public class CityDepartmentsListFeatureEventReceiver : SPFeatureReceiver
{
public override void FeatureActivated(SPFeatureReceiverProperties properties)
{
try
{
SPServiceContext context = SPServiceContext.GetContext(SPServiceApplicationProxyGroup.Default, SPSiteSubscriptionIdentifier.Default);
SocialTagManager stm = new SocialTagManager(context);
TaxonomySession taxonomySession = stm.TaxonomySession;
TermStore termStore = taxonomySession.DefaultSiteCollectionTermStore;
Group termGroup = termStore.Groups["CityDepartments"];
TermSet termSet = termGroup.TermSets["Directorates"];
using (SPWeb web = properties.Feature.Parent as SPWeb)
{
web.Lists.Add("DepartmentSites", "This list maintains a list of web sites for all Org Units in CCT", SPListTemplateType.Links);
web.Update();
SPList departmentsList = web.Lists["DepartmentSites"];
TaxonomyField taxonomyField = departmentsList.Fields.CreateNewField("TaxonomyFieldType", "OrgLevel") as TaxonomyField;
taxonomyField.Description = "Org Unit in the Org Structure. Can be a Directorate, Department, Branch or Section.";
taxonomyField.SspId = termStore.Id;
taxonomyField.TermSetId = termSet.Id;
taxonomyField.AllowMultipleValues = false;
taxonomyField.CreateValuesInEditForm = false;
taxonomyField.Open = false;
taxonomyField.Group = "CCT Metadata Field Content Type";
taxonomyField.Required = true;
departmentsList.Fields.Add(taxonomyField);
TaxonomyField field = departmentsList.Fields["OrgLevel"] as TaxonomyField;
field.Title = "OrgLevel";
field.Update(true);
departmentsList.Update();
SPView view = departmentsList.DefaultView;
view.ViewFields.Add("OrgLevel");
view.Update();
var navigationField = departmentsList.Fields["OrgLevel"] as SPField;
MetadataNavigationSettings navigationSettings = MetadataNavigationSettings.GetMetadataNavigationSettings(departmentsList);
MetadataNavigationHierarchy navigationHierarchy = new MetadataNavigationHierarchy(navigationField);
navigationSettings.AddConfiguredHierarchy(navigationHierarchy);
MetadataNavigationSettings.SetMetadataNavigationSettings(departmentsList, navigationSettings, true);
departmentsList.Update();
MetadataNavigationKeyFilter navigationKeyFilter = new MetadataNavigationKeyFilter(navigationField);
navigationSettings.AddConfiguredKeyFilter(navigationKeyFilter);
MetadataNavigationSettings.SetMetadataNavigationSettings(departmentsList, navigationSettings, true);
departmentsList.Update();
}
}
catch (Exception ex)
{
throw ex;
}
}
public override void FeatureDeactivating(SPFeatureReceiverProperties properties)
{
try
{
using (SPWeb web = properties.Feature.Parent as SPWeb)
{
SPList departmentsList = web.Lists["DepartmentSites"];
departmentsList.Delete();
}
}
catch (Exception ex)
{
throw ex;
}
}
}
}
Timer Job Feature Event Receiver:
using System;
using System.Runtime.InteropServices;
using System.Security.Permissions;
using Microsoft.SharePoint;
using Microsoft.SharePoint.Administration;
using System.Linq;
namespace CityDepartmentStructure.SharepointExtractTimerJob.Features.Feature1
{
/// <summary>
/// This class handles events raised during feature activation, deactivation, installation, uninstallation, and upgrade.
/// </summary>
/// <remarks>
/// The GUID attached to this class may be used during packaging and should not be modified.
/// </remarks>
[Guid("10e80e0f-7be3-46f0-8a7f-fcf806ddf762")]
public class Feature1EventReceiver : SPFeatureReceiver
{
private const string JobName = "ExtractTimerJob";
public override void FeatureActivated(SPFeatureReceiverProperties properties)
{
SPService service = GetService();
// Remove job if it exists.
DeleteJobAndSettings(service);
// Create the job.
ExtractTimerJob job = new ExtractTimerJob(JobName, service);
// Create the schedule so that the job runs hourly, sometime
// during the first quarter of the hour.
SPHourlySchedule schedule = new SPHourlySchedule();
schedule.BeginMinute = 0;
schedule.EndMinute = 15;
job.Schedule = schedule;
job.Update();
// Configure the job.
ExtractTimerJobSettings jobSettings = new ExtractTimerJobSettings(service, Guid.NewGuid());
jobSettings.Name = "ExtractTimerJobSettings";
jobSettings.WebServiceLocation = "http://r3pci01.capetown.gov.za:8150/sap/zget_orgstruct";
jobSettings.Update(true);
}
public override void FeatureDeactivating(SPFeatureReceiverProperties properties)
{
DeleteJobAndSettings(GetService());
}
private void DeleteJobAndSettings(SPService service)
{
// Find the job and delete it.
foreach (SPJobDefinition job in service.JobDefinitions)
{
if (job.Name == JobName)
{
job.Delete();
break;
}
}
// Delete the job's settings.
ExtractTimerJobSettings jobSettings = service.GetChild<ExtractTimerJobSettings>("ExtractTimerJobSettings");
if (jobSettings != null)
{
jobSettings.Delete();
}
}
private static SPService GetService()
{
// Get an instance of the SharePoint farm.
SPFarm farm = SPFarm.Local;
// Get an instance of the service.
var results = from s in farm.Services
where s.Name == "SPSearch4"
select s;
SPService service = results.First();
return service;
}
}
}
Without a logs it may be hard to figure out what exactly happened, but i would guess that Disposing Properties.Feature.Parent isn't good idea.
If you're doing this from powershell, it may lead to some problems, cause most probably it will always try to use the same object.
Next thing is, using what script you're deploying/retracting solution. Are you explicitly Activate/Deactivate feature every time?
More over - killing app pools is normal behavior for deployment and retract, but the problem with starting them again may be related to some kind of timeouts - for example, app pool start tries to occur before it will really turn off.
Related
In short:
I have a C# console app which i used to test and write data from SQL to SharePoint lists.
Everything works fine while it is ran as console app. I get the connection towards SQL, context is created, then I connect to SharePoint site and proceed with update of certain fields.
Now, when I deploy working solution as a timerjob (a .wsp) for sharepoint, and when I update it to the server farm and deploy it as feature to the site, and run it as a timerjob, it does work, but only, so to speak "once".
When I run that timer job, it recives SQL context, connects, and updates SharePoint lists. But when I change data in a SQL table (eg. a field called "price" from 10.99 to 11.99), and run timerjob again, it still only updates the "old" data, to be exact, the 10.99 value.
Now when doing this with console app .exe, on the server, no matter how many db changes I perform, it always updates the newest data, but as a timerJob it seems like it "hanges" onto previous context connection, and updates previous data only.
Do I need to specify, in the code, to drop context after the timerjob has ended it's run, so it can call the same but "fresh" context on the next run.
Here is inital code in the .wsp
class TimerJobPromoToolsDefinition : SPJobDefinition
{
public TimerJobPromoToolsDefinition() : base()
{
}
public TimerJobPromoToolsDefinition(string jobName, SPService service) : base(jobName, service, null, SPJobLockType.None)
{
this.Title = "PromoTools_Timer_Job";
}
public TimerJobPromoToolsDefinition(string jobName, SPWebApplication webapp) : base(jobName, webapp, null, SPJobLockType.ContentDatabase)
{
this.Title = "PromoTools_Timer_Job";
}
public override void Execute(Guid targetInstanceId)
{
System.Diagnostics.Trace.Assert(false);
Main();
}
private static TimerJobConnection _context;
public static void Main()
{
PrintInitializing();
_context = new TimerJobConnection();
string siteURL = "http://somesite.com/";
try
{
using (SPSite site = new SPSite(siteURL))
{
using (SPWeb web = site.OpenWeb())
{
var catalogs = GetArtikliFromPromoTools(web);
var articlesFiltered = GetArtikliFromDB(catalogs);
PrintFinishedLoadingArticles();
GetSharePointCatalogHeaders(web);
UpdateArticles(catalogs, articlesFiltered, web);
PrintEndOfOperation();
Console.WriteLine("Press any key to continue...");
}
}
}
catch (Exception)
{
PrintErrorSharepointConnection();
PrintPressKeyToExit();
}
finally
{
}
}
I think the "context" should not the issue based on my experience.
As you catched the exception, try to check is any exception, You could try to debug the code by attaching to owstimer process also.
I want to create a timer job or workflow which runs once a month and exports sharepoint list data to excel and stores this file in a document library.
I have downloaded the code to create timer job from below link but dont know how to include the above requirement
http://code.msdn.microsoft.com/SharePoint-2010-Custom-416cd3a1
//Create class derived from SPJonDefinition Class
class ListTimerJob : SPJobDefinition
{
public ListTimerJob()
: base()
{
}
public ListTimerJob(string jobName, SPService service, SPServer server, SPJobLockType targetType)
: base(jobName, service, server, targetType)
{
}
public ListTimerJob(string jobName, SPWebApplication webApplication)
: base(jobName, webApplication, null, SPJobLockType.ContentDatabase)
{
this.Title = "List Timer Job";
}
public override void Execute(Guid contentDbId)
{
// get a reference to the current site collection's content database
SPWebApplication webApplication = this.Parent as SPWebApplication;
SPContentDatabase contentDb = webApplication.ContentDatabases[contentDbId];
// get a reference to the "ListTimerJob" list in the RootWeb of the first site collection in the content database
SPList Listjob = contentDb.Sites[0].RootWeb.Lists["ListTimerJob"];
// create a new list Item, set the Title to the current day/time, and update the item
SPListItem newList = Listjob.Items.Add();
newList["Title"] = DateTime.Now.ToString();
newList.Update();
}
}
//Add Event receiver at Feature Level
[Guid("9a724fdb-e423-4232-9626-0cffc53fb74b")]
public class Feature1EventReceiver : SPFeatureReceiver
{
const string List_JOB_NAME = "ListLogger";
// Uncomment the method below to handle the event raised after a feature has been activated.
public override void FeatureActivated(SPFeatureReceiverProperties properties)
{
SPSite site = properties.Feature.Parent as SPSite;
// make sure the job isn't already registered
foreach (SPJobDefinition job in site.WebApplication.JobDefinitions)
{
if (job.Name == List_JOB_NAME)
job.Delete();
}
// install the job
ListTimerJob listLoggerJob = new ListTimerJob(List_JOB_NAME, site.WebApplication);
SPMinuteSchedule schedule = new SPMinuteSchedule();
schedule.BeginSecond = 0;
schedule.EndSecond = 59;
schedule.Interval = 5;
listLoggerJob.Schedule = schedule;
listLoggerJob.Update();
}
// Uncomment the method below to handle the event raised before a feature is deactivated.
public override void FeatureDeactivating(SPFeatureReceiverProperties properties)
{
SPSite site = properties.Feature.Parent as SPSite;
// delete the job
foreach (SPJobDefinition job in site.WebApplication.JobDefinitions)
{
if (job.Name == List_JOB_NAME)
job.Delete();
}
}
I would also advice you not to use SharePoint timer jobs engine.
It's definitely not stable.
Sometimes jobs simply don't trigger, they are difficult and slow to instantiate.
Of course you could always spend time tweaking SharePoint to achieve stability, but there is no guarantee. I know it sounds imperative, but trust me, I can't remember all the problems we had with this engine, but we lost much time on it.
I recommend you Quartz.NET or Windows Scheduler, as mentionned earlier.
These are well proven solutions, used by many people, also for SharePoint.
We implemented Quartz.Net for SharePoint at my company, all our Timer Jobs run on this engine.
We had no glitch for two years.
Best Regards.
You should change the SPMinuteSchedule to SPMonthlyByDaySchedule, see http://msdn.microsoft.com/en-us/library/microsoft.sharepoint.spschedule.aspx.
But, my recommendation is to use windows server scheduler and console application. Easy to change, easy to maintain (not iisreset!!) , and easy to log everything. We use console applications for various scheduled jobs, varying from 1 hour to 1 day.
I am currently using the Change Notifications in Active Directory Domain Services in .NET as described in this blog. This will return all events that happen on an selected object (or in the subtree of that object). I now want to filter the list of events for creation and deletion (and maybe undeletion) events.
I would like to tell the ChangeNotifier class to only observe create-/delete-/undelete-events. The other solution is to receive all events and filter them on my side. I know that in case of the deletion of an object, the atribute list that is returned will contain the attribute isDeleted with the value True. But is there a way to see if the event represents the creation of an object? In my tests the value for usnchanged is always usncreated+1 in case of userobjects and both are equal for OUs, but can this be assured in high-frequency ADs? It is also possible to compare the changed and modified timestamp. And how can I tell if an object has been undeleted?
Just for the record, here is the main part of the code from the blog:
public class ChangeNotifier : IDisposable
{
static void Main(string[] args)
{
using (LdapConnection connect = CreateConnection("localhost"))
{
using (ChangeNotifier notifier = new ChangeNotifier(connect))
{
//register some objects for notifications (limit 5)
notifier.Register("dc=dunnry,dc=net", SearchScope.OneLevel);
notifier.Register("cn=testuser1,ou=users,dc=dunnry,dc=net", SearchScope.Base);
notifier.ObjectChanged += new EventHandler<ObjectChangedEventArgs>(notifier_ObjectChanged);
Console.WriteLine("Waiting for changes...");
Console.WriteLine();
Console.ReadLine();
}
}
}
static void notifier_ObjectChanged(object sender, ObjectChangedEventArgs e)
{
Console.WriteLine(e.Result.DistinguishedName);
foreach (string attrib in e.Result.Attributes.AttributeNames)
{
foreach (var item in e.Result.Attributes[attrib].GetValues(typeof(string)))
{
Console.WriteLine("\t{0}: {1}", attrib, item);
}
}
Console.WriteLine();
Console.WriteLine("====================");
Console.WriteLine();
}
LdapConnection _connection;
HashSet<IAsyncResult> _results = new HashSet<IAsyncResult>();
public ChangeNotifier(LdapConnection connection)
{
_connection = connection;
_connection.AutoBind = true;
}
public void Register(string dn, SearchScope scope)
{
SearchRequest request = new SearchRequest(
dn, //root the search here
"(objectClass=*)", //very inclusive
scope, //any scope works
null //we are interested in all attributes
);
//register our search
request.Controls.Add(new DirectoryNotificationControl());
//we will send this async and register our callback
//note how we would like to have partial results
IAsyncResult result = _connection.BeginSendRequest(
request,
TimeSpan.FromDays(1), //set timeout to a day...
PartialResultProcessing.ReturnPartialResultsAndNotifyCallback,
Notify,
request
);
//store the hash for disposal later
_results.Add(result);
}
private void Notify(IAsyncResult result)
{
//since our search is long running, we don't want to use EndSendRequest
PartialResultsCollection prc = _connection.GetPartialResults(result);
foreach (SearchResultEntry entry in prc)
{
OnObjectChanged(new ObjectChangedEventArgs(entry));
}
}
private void OnObjectChanged(ObjectChangedEventArgs args)
{
if (ObjectChanged != null)
{
ObjectChanged(this, args);
}
}
public event EventHandler<ObjectChangedEventArgs> ObjectChanged;
#region IDisposable Members
public void Dispose()
{
foreach (var result in _results)
{
//end each async search
_connection.Abort(result);
}
}
#endregion
}
public class ObjectChangedEventArgs : EventArgs
{
public ObjectChangedEventArgs(SearchResultEntry entry)
{
Result = entry;
}
public SearchResultEntry Result { get; set; }
}
I participated in a design review about five years back on a project that started out using AD change notification. Very similar questions to yours were asked. I can share what I remember, and don't think things have change much since then. We ended up switching to DirSync.
It didn't seem possible to get just creates & deletes from AD change notifications. We found change notification resulted enough events monitoring a large directory that notification processing could bottleneck and fall behind. This API is not designed for scale, but as I recall the performance/latency were not the primary reason we switched.
Yes, the usn relationship for new objects generally holds, although I think there are multi-dc scenarios where you can get usncreated == usnchanged for a new user, but we didn't test that extensively, because...
The important thing for us was that change notification only gives you reliable object creation detection under the unrealistic assumption that your machine is up 100% of the time! In production systems there are always some case where you need to reboot and catch up or re-synchronize, and we switched to DirSync because it has a robust way to handle those scenarios.
In our case it could block email to a new user for an indeterminate time if an object create were missed. That obviously wouldn't be good, we needed to be sure. For AD change notifications, getting that resync right that would have some more work and hard to test. But for DirSync, its more natural, and there's a fast-path resume mechanism that usually avoids resync. For safety I think we triggered a full re-synchronize every day.
DirSync is not as real-time as change notification, but its possible to get ~30-second average latency by issuing the DirSync query once a minute.
Just because software is automated doesn't mean it will abide by your robots.txt.
What are some methods available to detect when someone is crawling or DDOSing your website? Assume your site has 100s of 1000s of pages and is worth crawling or DDOSing.
Here's a dumb idea I had that probably doesn't work: give each user a cookie with a unique value, and use the cookie to know when someone is making second/third/etc requests. This probably doesn't work because crawlers probably don't accept cookies, and thus in this scheme a robot will look like a new user with each request.
Does anyone have better ideas?
You could put links in your pages that are not visible, or clickable by end-users. Many bots just follow all links. Once someone requests one of those links you almost certainly have a crawler/robot.
Project Honey Pot keeps a list of 'bad' bots.
Here's a class I wrote to contact their web-service. You'll have to modify it some since I have a couple proprietary libs in it, but mostly it should be good to go. Sometimes their service sends back errors, but it does help cut down on some of the bad traffic.
using System;
using System.Linq;
using System.Net;
using System.Xml.Linq;
using SeaRisenLib2.Text;
using XmlLib;
/// <summary>
/// Summary description for HoneyPot
/// </summary>
public class HoneyPot
{
private const string KEY = "blacklistkey"; // blacklist key - need to register at httpbl.org to get it
private const string HTTPBL = "dnsbl.httpbl.org"; // blacklist lookup host
public HoneyPot()
{
}
public static Score GetScore_ByIP(string ip)
{
string sendMsg = "", receiveMsg = "";
int errorCount = 0; // track where in try/catch we fail for debugging
try
{
// for testing: ip = "188.143.232.31";
//ip = "173.242.116.72";
if ("127.0.0.1" == ip) return null; // localhost development computer
IPAddress address;
if (!IPAddress.TryParse(ip, out address))
throw new Exception("Invalid IP address to HoneyPot.GetScore_ByIP:" + ip);
errorCount++; // 1
string reverseIP = ip.ToArray('.').Reverse().ToStringCSV(".");
sendMsg = string.Format("{0}.{1}.{2}", KEY, reverseIP, HTTPBL);
errorCount++; // 2
//IPHostEntry value = Dns.GetHostByName(sendMsg);
IPHostEntry value = Dns.GetHostEntry(sendMsg);
errorCount++; // 3
address = value.AddressList[0];
errorCount++; // 4
receiveMsg = address.ToString();
errorCount++; // 5
int[] ipArray = receiveMsg.ToArray('.').Select(s => Convert.ToInt32(s)).ToArray();
errorCount++; // 6
if (127 != ipArray[0]) // error
throw new Exception("HoneyPot error");
errorCount++; // 7
Score score = new Score()
{
DaysSinceLastSeen = ipArray[1],
Threat = ipArray[2],
BotType = ipArray[3]
};
errorCount++; // 8
return score;
}
catch (Exception ex)
{
Log.Using("VisitorLog/HoneyPotErrors", log =>
{
log.SetString("IPrequest", ip);
log.SetString("SendMsg", sendMsg, XmlFile.ELEMENT);
log.SetString("RecvMsg", receiveMsg, XmlFile.ELEMENT);
log.SetString("Exception", ex.Message, XmlFile.ELEMENT);
log.SetString("ErrorCount", errorCount.ToString());
});
}
return null;
}
// Bitwise values
public enum BotTypeEnum : int
{
SearchEngine = 0,
Suspicious = 1,
Harvester = 2,
CommentSpammer = 4
}
public class Score
{
public Score()
{
BotType = -1;
DaysSinceLastSeen = -1;
Threat = -1;
}
public int DaysSinceLastSeen { get; internal set; }
public int Threat { get; internal set; }
/// <summary>
/// Use BotTypeEnum to understand value.
/// </summary>
public int BotType { get; internal set; }
/// <summary>
/// Convert HoneyPot Score values to String (DaysSinceLastSeen.Threat.BotType)
/// </summary>
/// <returns></returns>
public override string ToString()
{
return string.Format("{0}.{1}.{2}",
DaysSinceLastSeen,
Threat,
BotType);
}
public static explicit operator XElement(Score score)
{
XElement xpot = new XElement("HoneyPot");
if (null != score)
{
if (score.DaysSinceLastSeen >= 0)
xpot.SetString("Days", score.DaysSinceLastSeen);
if (score.Threat >= 0)
xpot.SetString("Threat", score.Threat);
if (score.BotType >= 0)
xpot.SetString("Type", score.BotType);
foreach (BotTypeEnum t in Enum.GetValues(typeof(BotTypeEnum)))
{
// Log enum values as string for each bitwise value represented in score.BotType
int value = (int)t;
if ((value == score.BotType) || ((value & score.BotType) > 0))
xpot.GetCategory(t.ToString());
}
}
return xpot;
}
public static explicit operator Score(XElement xpot)
{
Score score = null;
if (null != xpot)
score = new Score()
{
DaysSinceLastSeen = xpot.GetInt("Days"),
Threat = xpot.GetInt("Threat"),
BotType = xpot.GetInt("Type")
};
return score;
}
}
/// <summary>
/// Log score value to HoneyPot child Element (if score not null).
/// </summary>
/// <param name="score"></param>
/// <param name="parent"></param>
public static void LogScore(HoneyPot.Score score, XElement parent)
{
if ((null != score) && (null != parent))
{
parent.Add((XElement)score);
}
}
}
Though technically, it would not "detect" bot crawlers, I have an interesting way to block them. My method is to create an IIS filter or Apache plug-in. What you would do is encrypt all your html, asp, php, etc... pages. The only page not encrypted would be the index page. The index page simply installs a cookie with an encrypted public key, then redirects to a second index page. The IIS filter or Apache plug-in would then check each vistor to make sure they have this cookie. If it does, the filter would decrypt the requested page, then pass the page onto the web server for processing.
This method would allow a normal vistor to view your web page, but if a bot, which rejects cookies, tried to read your pages, they would all be encrypted.
A blacklist is probably not a good way of doing it, it would be better to have a whitelist of known bots that are allowed to make over a certain amount of hits per second. If someone not on that whitelist makes too many hits per second, start dropping their connections for a few seconds. This will help prevent ddosing, and still let unknown bots scan your site (although a lot slower than the ones you think matter).
You can keep a log of the offenders and see who are breaking the rules repeatedly :)
First, I will outline my issue in case someone has an alternate fix.
The Problem:
I have a winform app that uses MergeReplication. This is working great except I needed to make changes to the columns and Primary Key on 5 Tables. I dropped them from the Articles and then made my changes. I then re-added them to the Articles and set the Publication to Reintilialize All.
Unfortunately, this does not work. When I go to run the Subscription Program it tells me that the Subscription is InValid.
EDIT 1
I have a correction/addition here. The actual errors I am getting in the Replication Monitor are such -->
Error messages:
The schema script 'tblCaseNotes_4.sch' could not be propagated to the subscriber. (Source: MSSQL_REPL, Error number: MSSQL_REPL-2147201001)
Get help: http://help/MSSQL_REPL-2147201001
Could not drop object 'dbo.tblCaseNotes' because it is referenced by a FOREIGN KEY constraint. (Source: MSSQLServer, Error number: 3726)
Get help: http://help/3726
This seems important because it means that my MergeRepl sync process is trying to ReInitialize but cannot because of the below issue.
The way I was able to fix it on my machine was to use MSSSMS to DELETE the DB and then run my program which creates a db and syncs it. Unfortunately I do not have MSSSMS access to all the remote user SQL Express installs as for security reasons remote connections are off.
My Idea:
Create a small program that runs a .sql script to DELETE the DB on local machine. A la; DROP DATABASE MyDB This is only the test stage so no data preservation is needed.
Unfortunately I haven't the faintest idea how to have a program do that.
The Code:
This is the code that runs as my program loads. It takes care of creating the local db's and subscription if they aren't already there. It then checks to see if they need to be syncronized and kicks off a Pull Sync if needed. I include it because of the possibility that my solution is a change to this code.
I call this code like this -->
MergeRepl matrixMergeRepl = new MergeRepl(SystemInformation.ComputerName + "\\SQLEXPRESS","WWCSTAGE","MATRIX","MATRIX","MATRIX");
matrixMergeRepl.RunDataSync();
MergeRepl is below -->
public class MergeRepl
{
// Declare nessesary variables
private string subscriberName;
private string publisherName;
private string publicationName;
private string subscriptionDbName;
private string publicationDbName;
private MergePullSubscription mergeSubscription;
private MergePublication mergePublication;
private ServerConnection subscriberConn;
private ServerConnection publisherConn;
private Server theLocalSQLServer;
private ReplicationDatabase localRepDB;
public MergeRepl(string subscriber, string publisher, string publication, string subscriptionDB, string publicationDB)
{
subscriberName = subscriber;
publisherName = publisher;
publicationName = publication;
subscriptionDbName = subscriptionDB;
publicationDbName = publicationDB;
//Create connections to the Publisher and Subscriber.
subscriberConn = new ServerConnection(subscriberName);
publisherConn = new ServerConnection(publisherName);
// Define the pull mergeSubscription
mergeSubscription = new MergePullSubscription
{
ConnectionContext = subscriberConn,
DatabaseName = subscriptionDbName,
PublisherName = publisherName,
PublicationDBName = publicationDbName,
PublicationName = publicationName
};
// Ensure that the publication exists and that it supports pull subscriptions.
mergePublication = new MergePublication
{
Name = publicationName,
DatabaseName = publicationDbName,
ConnectionContext = publisherConn
};
// Create the local SQL Server instance
theLocalSQLServer = new Server(subscriberConn);
// Create a Replication DB Object to initiate Replication settings on local DB
localRepDB = new ReplicationDatabase(subscriptionDbName, subscriberConn);
// Check that the database exists locally
CreateDatabase(subscriptionDbName);
}
/// <exception cref="ApplicationException">There is insufficient metadata to synchronize the subscription.Recreate the subscription with the agent job or supply the required agent properties at run time.</exception>
public void RunDataSync()
{
// Keep program from appearing 'Not Responding'
///// Application.DoEvents();
// Does the needed Databases exist on local SQLExpress Install
/////CreateDatabase("ContactDB");
try
{
// Connect to the Subscriber
subscriberConn.Connect();
// if the Subscription exists, then start the sync
if (mergeSubscription.LoadProperties())
{
// Check that we have enough metadata to start the agent
if (mergeSubscription.PublisherSecurity != null || mergeSubscription.DistributorSecurity != null)
{
// Synchronously start the merge Agent for the mergeSubscription
// lblStatus.Text = "Data Sync Started - Please Be Patient!";
mergeSubscription.SynchronizationAgent.Synchronize();
}
else
{
throw new ApplicationException("There is insufficient metadata to synchronize the subscription." +
"Recreate the subscription with the agent job or supply the required agent properties at run time.");
}
}
else
{
// do something here if the pull mergeSubscription does not exist
// throw new ApplicationException(String.Format("A mergeSubscription to '{0}' does not exist on {1}", publicationName, subscriberName));
CreateMergeSubscription();
}
}
catch (Exception ex)
{
// Implement appropriaate error handling here
throw new ApplicationException("The subscription could not be synchronized. Verify that the subscription has been defined correctly.", ex);
//CreateMergeSubscription();
}
finally
{
subscriberConn.Disconnect();
}
}
/// <exception cref="ApplicationException"><c>ApplicationException</c>.</exception>
public void CreateMergeSubscription()
{
// Keep program from appearing 'Not Responding'
// Application.DoEvents();
try
{
if (mergePublication.LoadProperties())
{
if ((mergePublication.Attributes & PublicationAttributes.AllowPull) == 0)
{
mergePublication.Attributes |= PublicationAttributes.AllowPull;
}
// Make sure that the agent job for the mergeSubscription is created.
mergeSubscription.CreateSyncAgentByDefault = true;
// Create the pull mergeSubscription at the Subscriber.
mergeSubscription.Create();
Boolean registered = false;
// Verify that the mergeSubscription is not already registered.
foreach (MergeSubscription existing in mergePublication.EnumSubscriptions())
{
if (existing.SubscriberName == subscriberName
&& existing.SubscriptionDBName == subscriptionDbName
&& existing.SubscriptionType == SubscriptionOption.Pull)
{
registered = true;
}
}
if (!registered)
{
// Register the local mergeSubscription with the Publisher.
mergePublication.MakePullSubscriptionWellKnown(
subscriberName, subscriptionDbName,
SubscriptionSyncType.Automatic,
MergeSubscriberType.Local, 0);
}
}
else
{
// Do something here if the publication does not exist.
throw new ApplicationException(String.Format(
"The publication '{0}' does not exist on {1}.",
publicationName, publisherName));
}
}
catch (Exception ex)
{
// Implement the appropriate error handling here.
throw new ApplicationException(String.Format("The subscription to {0} could not be created.", publicationName), ex);
}
finally
{
publisherConn.Disconnect();
}
}
/// <summary>
/// This will make sure the needed DataBase exists locally before allowing any interaction with it.
/// </summary>
/// <param name="whichDataBase">The name of the DataBase to check for.</param>
/// <returns>True if the specified DataBase exists, False if it doesn't.</returns>
public void CreateDatabase(string whichDataBase)
{
Database db = LocalDBConn(whichDataBase, theLocalSQLServer, localRepDB);
if (!theLocalSQLServer.Databases.Contains(whichDataBase))
{
//Application.DoEvents();
// Create the database on the instance of SQL Server.
db = new Database(theLocalSQLServer, whichDataBase);
db.Create();
}
localRepDB.Load();
localRepDB.EnabledMergePublishing = false;
localRepDB.CommitPropertyChanges();
if (!mergeSubscription.LoadProperties())
{
CreateMergeSubscription();
}
}
private Database LocalDBConn(string databaseName, Server server, ReplicationDatabase replicationDatabase)
{
return server.Databases[replicationDatabase.Name];
}
/// <summary>
/// Checks for the existence of the Publication. If there is one it verifies Allow Pull is set
/// </summary>
/// <returns>True if Publication is present. False if not.</returns>
public bool CheckForPublication()
{
// If LoadProperties() returns TRUE then the Publication exists and is reachable
if (mergePublication.LoadProperties())
return true;
if ((mergePublication.Attributes & PublicationAttributes.AllowPull) == 0)
{
mergePublication.Attributes |= PublicationAttributes.AllowPull;
}
return false;
} // end CheckForPublication()
/// <summary>
/// Checks for the existence of a Subscription.
/// </summary>
/// <returns>True if a Subscription is present. False if not</returns>
public bool CheckForSubscription()
{
// Check for the existence of the Subscription
return mergeSubscription.IsExistingObject;
} // end CheckForSubscription()
}
The Guerdon (Reward):
This is extremely important to me so even if I am a flaming idiot and there is a super simple solution I will be adding a bounty to the correct answer.
EDIT 2
I created this to try and remove the Subscription first....which it does but still errors out on the DROP DB portion saying it is in use...
class Program
{
static void Main(string[] args)
{
DropSubscription();
DropDB();
}
private static void DropSubscription()
{
ServerConnection subscriberConn = new ServerConnection(".\\SQLEXPRESS");
MergePullSubscription mergePullSubscription = new MergePullSubscription("MATRIX","WWCSTAGE","MATRIX","MATRIX",subscriberConn);
mergePullSubscription.Remove();
}
private static void DropDB()
{
SqlCommand cmd;
string sql;
string dbName = "MATRIX";
SqlConnection sqlConnection = new SqlConnection("Server=.\\SQLEXPRESS;Initial Catalog="+ dbName + ";Integrated Security=True;User Instance=False");
sqlConnection.Open();
sql = "DROP DATABASE " + dbName;
cmd = new SqlCommand(sql,sqlConnection);
cmd.ExecuteNonQuery();
sqlConnection.Close();
}
}
If you're in testing phase (and I certainly don't recommend significant schema changes on a production system), then just drop the subscription and database on the subscriber machines and start over. If you can connect to them through SSMS then you can do it from there; or if you have physical access to them you can do it with SQLCMD.
I have code for dropping subscriptions and databases using SMO but it has to be run on the subscriber. Let me know if you think it would be helpful and I'll post it.
Edited to add: OK, the code is below. I don't have time right now to clean it up so it's raw. RaiseSyncManagerStatus is a method to display the status back to the UI because these methods are invoked asynchronously. Hope this helps -- bring on the guerdon. :-)
public void DropSubscription()
{
try
{
RaiseSyncManagerStatus(string.Format("Dropping subscription '{0}'.", _publicationName));
Server srv = new Server(_subscriberName);
MergePullSubscription sub = GetSubscription(srv.ConnectionContext);
// Remove if it exists
// Cannot remove from publisher because sysadmin or dbo roles are required
if (sub.LoadProperties() == true)
{
sub.Remove();
RaiseSyncManagerStatus("Subscription dropped.");
RaiseSyncManagerStatus("Removing subscription registration from the publisher.");
Server srvPub = new Server(_publisherName);
MergePublication pub = GetPublication(srvPub.ConnectionContext);
// Remove the subscription registration
pub.RemovePullSubscription(srv.Name, _subscriberDbName);
}
else
{
RaiseSyncManagerStatus("Failed to drop subscription; LoadProperties failed.");
}
}
catch (Exception ex)
{
RaiseSyncManagerStatus(ex);
throw;
}
}
public void DropSubscriberDb()
{
try
{
RaiseSyncManagerStatus(string.Format("Dropping subscriber database '{0}'.", _subscriberDbName));
if (SubscriptionValid())
{
throw new Exception("Subscription exists; cannot drop local database.");
}
Server srv = new Server(_subscriberName);
Database db = srv.Databases[_subscriberDbName];
if (db == null)
{
RaiseSyncManagerStatus("Subscriber database not found.");
}
else
{
RaiseSyncManagerStatus(string.Format("Subscriber database state: '{0}'.", db.State));
srv.KillDatabase(_subscriberDbName);
RaiseSyncManagerStatus("Subscriber database dropped.");
}
}
catch (Exception ex)
{
RaiseSyncManagerStatus(ex);
throw;
}
}
If I have understood your 'original' issue correctly, then you need to create a new Snapshot of the publication before it can be reinitialized. This is so that any structural changes you have made are applied to the subscribers.
See Adding Articles to and Dropping Articles from Existing Publications for more information and follow the specific steps for Merge replication.