I have added scheduler in Startup of service like:
services.AddQuartz(q =>
{
q.SchedulerId = "S1";
q.SchedulerName = "S1";
q.UseMicrosoftDependencyInjectionJobFactory();
q.UsePersistentStore(s =>
{
s.UseProperties = true;
s.UsePostgres("ConnectionString");
s.UseJsonSerializer();
});
})
Now I am tring to use this created Scheduler via DI like:
public SchedulerStartup(ISchedulerFactory schedulerFactory)
{
this.schedulerFactory = schedulerFactory;
}
public async Task StartAsync(CancellationToken cancellationToken)
{
Scheduler = await schedulerFactory.GetScheduler("S1", cancellationToken);
await Scheduler.Start(cancellationToken);
}
But somehow Scheduler is null. I wont able to access created Scheduler in startup configuration (S1).
Link: https://www.quartz-scheduler.net/documentation/quartz-3.x/packages/microsoft-di-integration.html#di-aware-job-factories
Here I have missed services.AddQuartzHostedService() to start scheduler using Hosting Service. Additional startup class not required.
This should be like this:
services.AddQuartz(q =>
{
q.SchedulerName = "S1";
q.UseMicrosoftDependencyInjectionJobFactory();
q.UsePersistentStore(s =>
{
s.UseProperties = true;
s.UsePostgres(DbConnectionString);
s.UseJsonSerializer();
});
});
services.AddQuartzHostedService(options =>
{
options.WaitForJobsToComplete = true;
});
Later created instance of this Scheduler can be used as (S1):
public MyRuntimeScheduler(ISchedulerFactory schedulerFactory)
{
Scheduler = schedulerFactory.GetScheduler("S1").GetAwaiter().GetResult();
}
Related
I have an ASP .NET core Web API which uses Queued background tasks like described
here.
I've used the code sample provided and added the IBackgroundTaskQueue, BackgroundTaskQueue and QueuedHostedService exactly as described in the article.
In my Startup.cs, I'm registering only one QueuedHostedService instance as follows: services.AddHostedService<QueuedHostedService>();
Tasks coming from the WebApi's controller are enqueued and then dequeued and executed one by one by the QueuedHostedService.
I'll would like to allow more than one background processing thread that will dequeue and execute the incoming Tasks.
The most straight forward solution i can come up with is to register more than one instance of the QueuedHostedService in my Startup.cs. i.e, something like this:
int maxNumOfParallelOperations;
var isValid = int.TryParse(Configuration["App:MaxNumOfParallelOperations"], out maxNumOfParallelOperations);
maxNumOfParallelOperations = isValid && maxNumOfParallelOperations > 0 ? maxNumOfParallelOperations : 2;
for (int index = 0; index < maxNumOfParallelOperations; index++)
{
services.AddHostedService<QueuedHostedService>();
}
I've also noticed that thanks to the singal Semaphore in BackgroundTaskQueue, the QueuedHostedService instances are not really working all the time, but only awaken when a new Task is available in the queue.
This solution seems to works just fine in my tests.
But, In this particular use case - is it really a valid, recommended solution for parallel processing?
You can use an IHostedService with a number of threads to consume the IBackgroundTaskQueue.
Here is a basic implementation. I assume you're using the same IBackgroundTaskQueue and BackgroundTaskQueue described here.
public class QueuedHostedService : IHostedService
{
private readonly ILogger _logger;
private readonly Task[] _executors;
private readonly int _executorsCount = 2; //--default value: 2
private CancellationTokenSource _tokenSource;
public IBackgroundTaskQueue TaskQueue { get; }
public QueuedHostedService(IBackgroundTaskQueue taskQueue,
ILoggerFactory loggerFactory,
IConfiguration configuration)
{
TaskQueue = taskQueue;
_logger = loggerFactory.CreateLogger<QueuedHostedService>();
if (ushort.TryParse(configuration["App:MaxNumOfParallelOperations"], out var ct))
{
_executorsCount = ct;
}
_executors = new Task[_executorsCount];
}
public Task StartAsync(CancellationToken cancellationToken)
{
_logger.LogInformation("Queued Hosted Service is starting.");
_tokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
for (var i = 0; i < _executorsCount; i++)
{
var executorTask = new Task(
async () =>
{
while (!cancellationToken.IsCancellationRequested)
{
#if DEBUG
_logger.LogInformation("Waiting background task...");
#endif
var workItem = await TaskQueue.DequeueAsync(cancellationToken);
try
{
#if DEBUG
_logger.LogInformation("Got background task, executing...");
#endif
await workItem(cancellationToken);
}
catch (Exception ex)
{
_logger.LogError(ex,
"Error occurred executing {WorkItem}.", nameof(workItem)
);
}
}
}, _tokenSource.Token);
_executors[i] = executorTask;
executorTask.Start();
}
return Task.CompletedTask;
}
public Task StopAsync(CancellationToken cancellationToken)
{
_logger.LogInformation("Queued Hosted Service is stopping.");
_tokenSource.Cancel(); // send the cancellation signal
if (_executors != null)
{
// wait for _executors completion
Task.WaitAll(_executors, cancellationToken);
}
return Task.CompletedTask;
}
}
You need to register the services in ConfigureServices on Startup class.
...
services.AddSingleton<IBackgroundTaskQueue, BackgroundTaskQueue>();
services.AddHostedService<QueuedHostedService>();
...
Aditionally, you can set the number of threads in configuration (appsettings.json)
...
"App": {
"MaxNumOfParallelOperations": 4
}
...
I am playing with Orleans but instead of relying on network and hence the configuration of endpoints I would rather like to be able to have grains in process in the code below:
public interface IGreeter : IActorGrain
{
}
public class Greeter : DispatchActorGrain, IGreeter
{
void On(Greet msg) => WriteLine($"Hello, {msg.Who}");
}
[SerializableAttribute]
public class Greet
{
public string Who { get; set; }
}
public static class Program
{
public static async Task Main()
{
WriteLine("Running example. Booting cluster might take some time ...\n");
var host = new SiloHostBuilder()
.Configure<ClusterOptions>(options =>
{
options.ClusterId = "localhost-demo";
options.ServiceId = "localhost-demo-service";
})
.Configure<SchedulingOptions>(options =>
{
options.AllowCallChainReentrancy = false;
})
.Configure<SiloMessagingOptions>(options =>
{
options.ResponseTimeout = TimeSpan.FromSeconds(5);
options.ResponseTimeoutWithDebugger = TimeSpan.FromSeconds(5);
})
.ConfigureLogging(logging =>
{
logging.SetMinimumLevel(LogLevel.Information);
logging.AddConsole();
})
.UseDevelopmentClustering(options => options.PrimarySiloEndpoint = new IPEndPoint(IPAddress.Loopback, 30000))
.ConfigureEndpoints(IPAddress.Loopback, 11111, 30000)
.ConfigureApplicationParts(x => x
.AddApplicationPart(Assembly.GetExecutingAssembly())
.WithCodeGeneration())
.UseOrleankka()
.Build();
await host.StartAsync();
var client = new ClientBuilder()
.Configure<ClusterOptions>(options => {
options.ClusterId = "localhost-demo";
options.ServiceId = "localhost-demo-service";
})
.UseStaticClustering(options => options.Gateways.Add(new IPEndPoint(IPAddress.Loopback, 30000).ToGatewayUri()))
.ConfigureApplicationParts(x => x
.AddApplicationPart(Assembly.GetExecutingAssembly())
.WithCodeGeneration())
.UseOrleankka()
.Build();
await client.Connect();
var greeter = client.ActorSystem().ActorOf<IGreeter>("id");
await greeter.Tell(new Greet {Who = "world"});
Write("\n\nPress any key to terminate ...");
ReadKey(true);
}
}
Is it possible?
It is totally possible to use Orleans as a single process without clustering (which I did during testing and pre-production stages), but you will lose availability.
Silos are supposed to be running as long-running processes in a cluster so ~30 seconds startup time of a single node should never be an issue for cloud environments. If you need a single-host actor system, akka.net might be a better solution
I'm using newest 3.0.6 version of Quartz.net
I want o schedule a process to run each day (currently in example it's every 40seconds but I will change that)
I have method where I call it to execute, however it does not seem to start any process which it should as I initiated it in `Job' class. I'm not sure why it's not trigerring?
public static void Run()
{
var isRunning = false;
IServiceCollection services = new ServiceCollection();
Startup startup = new Startup();
var serviceProvider = startup.ConfigureServices(services);
IWindsorContainer _container = serviceProvider.GetService<IWindsorContainer>();
while (true)
{
if (!isRunning)
{
isRunning = true;
var configuration = _container.Resolve<IConfigurationRoot>();
var _process = _container.Resolve<Process.Process>();
int secondsForSleep = Convert.ToUInt16(configuration[Enums.SleepTime]);
{
try
{
execute();
}
catch (Exception ex)
{
}
finally
{
isRunning = false;
Thread.Sleep(secondsForSleep);
_container.Release(_process);
}
}
}
}
}
public static async void execute()
{
// construct a scheduler factory
NameValueCollection props = new NameValueCollection
{
{ "quartz.serializer.type", "binary" }
};
StdSchedulerFactory factory = new StdSchedulerFactory(props);
// get a scheduler
IScheduler sched = await factory.GetScheduler();
await sched.Start();
IJobDetail job = JobBuilder.Create<Job>()
.WithIdentity("myJob", "group1")
.Build();
ITrigger trigger = TriggerBuilder.Create()
.WithIdentity("myTrigger", "group1")
.StartNow()
.WithSimpleSchedule(x => x
.WithIntervalInSeconds(1)
.RepeatForever())
.Build();
await sched.ScheduleJob(job, trigger);
}
}
public class Job : IJob
{
public async Task Execute(IJobExecutionContext context)
{
JobKey key = context.JobDetail.Key;
IServiceCollection services = new ServiceCollection();
Startup startup = new Startup();
var serviceProvider = startup.ConfigureServices(services);
IWindsorContainer _container = serviceProvider.GetService<IWindsorContainer>();
var configuration = _container.Resolve<IConfigurationRoot>();
var _process = _container.Resolve<Process.Process>();
Task t = new Task(() =>
{
_process.MainProcess();
});
t.Start();
await t;
}
}
Do I incorrectly set IJobDetail object or something? does anyone had a similar problem? Seems like i did eveerything according to documentation but I still can't get it working
This is not directly the answer to your question but may help you if the problem is related to below.
Look, you have defined the execute method as async void. As a result, you cannot call it via await execute() and so
your try-catch block will not handle any exceptions raised in the execute method
finally block runs directly after calling execute() method (not waiting for method finishing)
In other words, change your code to async Task execute() and await execute() and test it again.
For more theory I suggest you read this SO: async/await - when to return a Task vs void? and msdn: Async/Await - Best Practices
I've tried using MassTransit to publish a message to a topic named events in an Azure Service Bus. I have problems configuring MassTransit to use my predefined topic events, instead it creates a new topic named by the namespace/classname for the message type. So I wonder how to specify which topic to use instead of creating a new one.
This is the code I've tested with:
using System;
using System.Threading.Tasks;
using MassTransit;
using MassTransit.AzureServiceBusTransport;
using Microsoft.ServiceBus;
namespace PublisherNameSpace
{
public class Publisher
{
public static async Task PublishMessage()
{
var topic = "events";
var bus = Bus.Factory.CreateUsingAzureServiceBus(
cfg =>
{
var azureServiceBusHost = cfg.Host(new Uri("sb://<busname>.servicebus.windows.net"), host =>
{
host.OperationTimeout = TimeSpan.FromSeconds(5);
host.TokenProvider =
TokenProvider.CreateSharedAccessSignatureTokenProvider(
"RootManageSharedAccessKey",
"<key>"
);
});
cfg.ReceiveEndpoint(azureServiceBusHost, topic, e =>
{
e.Consumer<TestConsumer>();
});
});
await bus.Publish<TestConsumer>(new TestMessage { TestString = "testing" });
}
}
public class TestConsumer : IConsumer<TestMessage>
{
public Task Consume(ConsumeContext<TestMessage> context)
{
return Console.Out.WriteAsync("Consuming message");
}
}
public class TestMessage
{
public string TestString { get; set; }
}
}
The accepted answer clears up the subscription side:
cfg.SubscriptionEndpoint(
host,
"sub-1",
"my-topic-1",
e =>
{
e.ConfigureConsumer<TestConsumer>(provider);
});
For those wondering how to get the bus configuration right on the publish side, it should look like:
cfg.Message<TestMessage>(x =>
{
x.SetEntityName("my-topic-1");
});
You can then call publish on the bus:
await bus.Publish<TestMessage>(message);
Thanks to #ChrisPatterson for pointing this out to me!
If you want to consume from a specific topic, create a subscription endpoint instead of a receive endpoint, and specify the topic and subscription name in the configuration.
The simplest form is shown in the unit tests:
https://github.com/MassTransit/MassTransit/blob/develop/tests/MassTransit.Azure.ServiceBus.Core.Tests/Subscription_Specs.cs
I was able to send to an Azure Service Bus Topic using the _sendEndpointProvider.GetSendEndpoint(new Uri("topic:shape")); where... "shape" is the topic name.
public class MassTransitController : ControllerBase
{
private readonly ILogger<MassTransitController> _logger;
private readonly ISendEndpointProvider _sendEndpointProvider;
public MassTransitController(ILogger<MassTransitController> logger, ISendEndpointProvider sendEndpointProvider)
{
_logger = logger;
_sendEndpointProvider = sendEndpointProvider;
}
[HttpGet]
public async Task<IActionResult> Get()
{
try
{
var randomType = new Random();
var randomColor = new Random();
var shape = new Shape();
shape.ShapeId = Guid.NewGuid();
shape.Color = ShapeType.ShapeColors[randomColor.Next(ShapeType.ShapeColors.Count)];
shape.Type = ShapeType.ShapeTypes[randomType.Next(ShapeType.ShapeTypes.Count)];
var endpoint = await _sendEndpointProvider.GetSendEndpoint(new Uri("topic:shape"));
await endpoint.Send(shape);
return Ok(shape);
}
catch (Exception ex)
{
throw ex;
}
}
}
I also was able to get a .NET 5 Worker Consumer working with code like this... where the subscription "sub-all" would catch all shapes.. I'm going to make a blog post / git repo of this.
public static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.ConfigureServices((hostContext, services) =>
{
services.AddMassTransit(x =>
{
x.UsingAzureServiceBus((context, cfg) =>
{
cfg.Host("Endpoint=sb://******");
cfg.SubscriptionEndpoint(
"sub-all",
"shape",
e =>
{
e.Handler<Shape>(async context =>
{
await Console.Out.WriteLineAsync($"Shape Received: {context.Message.Type}");
});
e.MaxDeliveryCount = 15;
});
});
});
services.AddMassTransitHostedService();
});
I have a project where I use TopShelf and TopShelf.Quartz
Following this example I am building my jobs with
s.ScheduleQuartzJob(q =>
q.WithJob(() => JobBuilder.Create<MyJob>().Build())
.AddTrigger(() => TriggerBuilder.Create()
.WithSimpleSchedule(builder => builder
.WithIntervalInSeconds(5)
.RepeatForever())
.Build())
);
which fires my job every five seconds even if the previous is still running. What I really want to achive is to start a job and after the completion wait five seconds and start again. Is this possible or do I have to implement my own logic (for example via a static variable).
A job listener as proposed by #NateKerkhofs will work, like this:
public class RepeatAfterCompletionJobListener : IJobListener
{
private readonly TimeSpan interval;
public RepeatAfterCompletionJobListener(TimeSpan interval)
{
this.interval = interval;
}
public void JobExecutionVetoed(IJobExecutionContext context)
{
}
public void JobToBeExecuted(IJobExecutionContext context)
{
}
public void JobWasExecuted(IJobExecutionContext context, JobExecutionException jobException)
{
string triggerKey = context.JobDetail.Key.Name + ".trigger";
var trigger = TriggerBuilder.Create()
.WithIdentity(triggerKey)
.StartAt(new DateTimeOffset(DateTime.UtcNow.Add(interval)))
.Build();
context.Scheduler.RescheduleJob(new TriggerKey(triggerKey), trigger);
}
public string Name
{
get
{
return "RepeatAfterCompletionJobListener";
}
}
}
Then add the listener to the scheduler:
var jobKey = "myJobKey";
var schedule = new StdSchedulerFactory().GetScheduler();
listener = new
RepeatAfterCompletionJobListener(TimeSpan.FromSeconds(5));
schedule.ListenerManager.AddJobListener
(listener, KeyMatcher<JobKey>.KeyEquals(new JobKey(jobKey)));
var job = JobBuilder.Create(MyJob)
.WithIdentity(jobKey)
.Build();
// Schedule the job to start in 5 seconds to give the service time to initialise
var trigger = TriggerBuilder.Create()
.WithIdentity(CreateTriggerKey(jobKey))
.StartAt(DateTimeOffset.Now.AddSeconds(5))
.Build();
schedule.ScheduleJob(job, trigger);
Unfortunately I don't know how to do this (or if it can be done) with the fluent syntax used by Typshelf.Quartz library, I use this with TopShelf and regular Quartz.Net.
You can use a TriggerListener (http://www.quartz-scheduler.net/documentation/quartz-2.x/tutorial/trigger-and-job-listeners.html) to listen to when the trigger finishes, then reschedule in 5 seconds.
Another option is to schedule the next job as the final action in the Execute of the job itself.
http://www.quartz-scheduler.net/documentation/faq.html has a question somewhere 2/3rds of the way down that explains more about it.
The JobListener solution is a very powerful and flexible way to reschedule your job after completion. Thanks to Nate Kerkhofs and stuartd for the input.
In my case it was sufficient to decorate my Job class with the DisallowConcurrentExecution attribute since I don't have different instances of my job
[DisallowConcurrentExecution]
public class MyJob : IJob
{
}
FYI: Using a JobListerener with TopShelf.Quartz the code could look like this
var jobName = "MyJob";
var jobKey = new JobKey(jobName);
s.ScheduleQuartzJob(q =>
q.WithJob(() => JobBuilder.Create<MyJob>()
.WithIdentity(jobKey).Build())
.AddTrigger(() => TriggerBuilder.Create()
.WithSimpleSchedule(builder => builder
.WithIntervalInSeconds(5)
.Build())
var listener = new RepeatAfterCompletionJobListener(TimeSpan.FromSeconds(5));
var listenerManager = ScheduleJobServiceConfiguratorExtensions
.SchedulerFactory().ListenerManager;
listenerManager.AddJobListener(listener, KeyMatcher<JobKey>.KeyEquals(jobKey));
If you are using TopShelf.Quartz.Ninject (like I do) don't forget to call UseQuartzNinject() prior to calling ScheduleJobServiceConfiguratorExtensions.SchedulerFactory()
The best way I found is to add simple Job Listener.
In my example it reschedules job, just after failure.
Of cause you can add delay in .StartAt(DateTime.UtcNow)
public class QuartzRetryJobListner : IJobListener
{
public string Name => GetType().Name;
public async Task JobExecutionVetoed(IJobExecutionContext context, CancellationToken cancellationToken = default) => await Task.CompletedTask;
public async Task JobToBeExecuted(IJobExecutionContext context, CancellationToken cancellationToken = default) => await Task.CompletedTask;
public async Task JobWasExecuted(
IJobExecutionContext context,
JobExecutionException jobException,
CancellationToken cancellationToken = default)
{
if (jobException == null) return;
// Create and schedule new trigger
ITrigger retryTrigger = TriggerBuilder.Create()
.StartAt(DateTime.UtcNow)
.Build();
await context.Scheduler.ScheduleJob(context.JobDetail, new[] { retryTrigger }, true);
}
}
Also, I think it's useful to add class extension
public static class QuartzExtensions
{
public static void RepeatJobAfterFall(this IScheduler scheduler, IJobDetail job)
{
scheduler.ListenerManager.AddJobListener(
new QuartzRetryJobListner(),
KeyMatcher<JobKey>.KeyEquals(job.Key));
}
}
Just for simplify usage.
_scheduler.ScheduleJob(job, trigger);
//In case of failue repeat job immediately
_scheduler.RepeatJobAfterFall(job);