summaryrefslogtreecommitdiff
path: root/plugins/jobs/drivers/ephemeral
diff options
context:
space:
mode:
Diffstat (limited to 'plugins/jobs/drivers/ephemeral')
-rw-r--r--plugins/jobs/drivers/ephemeral/consumer.go244
-rw-r--r--plugins/jobs/drivers/ephemeral/item.go115
-rw-r--r--plugins/jobs/drivers/ephemeral/plugin.go41
3 files changed, 400 insertions, 0 deletions
diff --git a/plugins/jobs/drivers/ephemeral/consumer.go b/plugins/jobs/drivers/ephemeral/consumer.go
new file mode 100644
index 00000000..95ad6ecd
--- /dev/null
+++ b/plugins/jobs/drivers/ephemeral/consumer.go
@@ -0,0 +1,244 @@
+package ephemeral
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/pkg/events"
+ priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/jobs/job"
+ "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const (
+ prefetch string = "prefetch"
+ goroutinesMax uint64 = 1000
+)
+
+type Config struct {
+ Prefetch uint64 `mapstructure:"prefetch"`
+}
+
+type JobConsumer struct {
+ cfg *Config
+ log logger.Logger
+ eh events.Handler
+ pipeline sync.Map
+ pq priorityqueue.Queue
+ localPrefetch chan *Item
+
+ // time.sleep goroutines max number
+ goroutines uint64
+
+ stopCh chan struct{}
+}
+
+func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) {
+ const op = errors.Op("new_ephemeral_pipeline")
+
+ jb := &JobConsumer{
+ log: log,
+ pq: pq,
+ eh: eh,
+ goroutines: 0,
+ stopCh: make(chan struct{}, 1),
+ }
+
+ err := cfg.UnmarshalKey(configKey, &jb.cfg)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ if jb.cfg.Prefetch == 0 {
+ jb.cfg.Prefetch = 100_000
+ }
+
+ // initialize a local queue
+ jb.localPrefetch = make(chan *Item, jb.cfg.Prefetch)
+
+ // consume from the queue
+ go jb.consume()
+
+ return jb, nil
+}
+
+func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) {
+ jb := &JobConsumer{
+ log: log,
+ pq: pq,
+ eh: eh,
+ goroutines: 0,
+ stopCh: make(chan struct{}, 1),
+ }
+
+ // initialize a local queue
+ jb.localPrefetch = make(chan *Item, pipeline.Int(prefetch, 100_000))
+
+ // consume from the queue
+ go jb.consume()
+
+ return jb, nil
+}
+
+func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error {
+ const op = errors.Op("ephemeral_push")
+
+ // check if the pipeline registered
+ b, ok := j.pipeline.Load(jb.Options.Pipeline)
+ if !ok {
+ return errors.E(op, errors.Errorf("no such pipeline: %s", jb.Options.Pipeline))
+ }
+
+ if !b.(bool) {
+ return errors.E(op, errors.Errorf("pipeline disabled: %s", jb.Options.Pipeline))
+ }
+
+ err := j.handleItem(ctx, fromJob(jb))
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ return nil
+}
+
+func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error {
+ const op = errors.Op("ephemeral_handle_request")
+ // handle timeouts
+ // theoretically, some bad user may send millions requests with a delay and produce a billion (for example)
+ // goroutines here. We should limit goroutines here.
+ if msg.Options.Delay > 0 {
+ // if we have 1000 goroutines waiting on the delay - reject 1001
+ if atomic.LoadUint64(&j.goroutines) >= goroutinesMax {
+ return errors.E(op, errors.Str("max concurrency number reached"))
+ }
+
+ go func(jj *Item) {
+ atomic.AddUint64(&j.goroutines, 1)
+ time.Sleep(jj.Options.DelayDuration())
+
+ // send the item after timeout expired
+ j.localPrefetch <- jj
+
+ atomic.AddUint64(&j.goroutines, ^uint64(0))
+ }(msg)
+
+ return nil
+ }
+
+ // insert to the local, limited pipeline
+ select {
+ case j.localPrefetch <- msg:
+ return nil
+ case <-ctx.Done():
+ return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", j.cfg.Prefetch, ctx.Err()))
+ }
+}
+
+func (j *JobConsumer) consume() {
+ // redirect
+ for {
+ select {
+ case item, ok := <-j.localPrefetch:
+ if !ok {
+ j.log.Warn("ephemeral local prefetch queue was closed")
+ return
+ }
+
+ // set requeue channel
+ item.Options.requeueFn = j.handleItem
+
+ j.pq.Insert(item)
+ case <-j.stopCh:
+ return
+ }
+ }
+}
+
+func (j *JobConsumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error {
+ const op = errors.Op("ephemeral_register")
+ if _, ok := j.pipeline.Load(pipeline.Name()); ok {
+ return errors.E(op, errors.Errorf("queue %s has already been registered", pipeline))
+ }
+
+ j.pipeline.Store(pipeline.Name(), true)
+
+ return nil
+}
+
+func (j *JobConsumer) Pause(_ context.Context, pipeline string) {
+ if q, ok := j.pipeline.Load(pipeline); ok {
+ if q == true {
+ // mark pipeline as turned off
+ j.pipeline.Store(pipeline, false)
+ }
+ // if not true - do not send the EventPipeStopped, because pipe already stopped
+ return
+ }
+
+ j.eh.Push(events.JobEvent{
+ Event: events.EventPipePaused,
+ Pipeline: pipeline,
+ Start: time.Now(),
+ Elapsed: 0,
+ })
+}
+
+func (j *JobConsumer) Resume(_ context.Context, pipeline string) {
+ if q, ok := j.pipeline.Load(pipeline); ok {
+ if q == false {
+ // mark pipeline as turned on
+ j.pipeline.Store(pipeline, true)
+ }
+
+ // if not true - do not send the EventPipeActive, because pipe already active
+ return
+ }
+
+ j.eh.Push(events.JobEvent{
+ Event: events.EventPipeActive,
+ Pipeline: pipeline,
+ Start: time.Now(),
+ Elapsed: 0,
+ })
+}
+
+// Run is no-op for the ephemeral
+func (j *JobConsumer) Run(_ context.Context, pipe *pipeline.Pipeline) error {
+ j.eh.Push(events.JobEvent{
+ Event: events.EventPipeActive,
+ Driver: pipe.Driver(),
+ Pipeline: pipe.Name(),
+ Start: time.Now(),
+ })
+ return nil
+}
+
+func (j *JobConsumer) Stop(ctx context.Context) error {
+ const op = errors.Op("ephemeral_plugin_stop")
+ var pipe string
+ j.pipeline.Range(func(key, _ interface{}) bool {
+ pipe = key.(string)
+ j.pipeline.Delete(key)
+ return true
+ })
+
+ select {
+ // return from the consumer
+ case j.stopCh <- struct{}{}:
+ j.eh.Push(events.JobEvent{
+ Event: events.EventPipeStopped,
+ Pipeline: pipe,
+ Start: time.Now(),
+ Elapsed: 0,
+ })
+ return nil
+
+ case <-ctx.Done():
+ return errors.E(op, ctx.Err())
+ }
+}
diff --git a/plugins/jobs/drivers/ephemeral/item.go b/plugins/jobs/drivers/ephemeral/item.go
new file mode 100644
index 00000000..1a61d7e9
--- /dev/null
+++ b/plugins/jobs/drivers/ephemeral/item.go
@@ -0,0 +1,115 @@
+package ephemeral
+
+import (
+ "context"
+ "time"
+
+ json "github.com/json-iterator/go"
+ "github.com/spiral/roadrunner/v2/plugins/jobs/job"
+ "github.com/spiral/roadrunner/v2/utils"
+)
+
+type Item struct {
+ // Job contains name of job broker (usually PHP class).
+ Job string `json:"job"`
+
+ // Ident is unique identifier of the job, should be provided from outside
+ Ident string `json:"id"`
+
+ // Payload is string data (usually JSON) passed to Job broker.
+ Payload string `json:"payload"`
+
+ // Headers with key-values pairs
+ Headers map[string][]string `json:"headers"`
+
+ // Options contains set of PipelineOptions specific to job execution. Can be empty.
+ Options *Options `json:"options,omitempty"`
+}
+
+// Options carry information about how to handle given job.
+type Options struct {
+ // Priority is job priority, default - 10
+ // pointer to distinguish 0 as a priority and nil as priority not set
+ Priority int64 `json:"priority"`
+
+ // Pipeline manually specified pipeline.
+ Pipeline string `json:"pipeline,omitempty"`
+
+ // Delay defines time duration to delay execution for. Defaults to none.
+ Delay int64 `json:"delay,omitempty"`
+
+ // private
+ requeueFn func(context.Context, *Item) error
+}
+
+// DelayDuration returns delay duration in a form of time.Duration.
+func (o *Options) DelayDuration() time.Duration {
+ return time.Second * time.Duration(o.Delay)
+}
+
+func (i *Item) ID() string {
+ return i.Ident
+}
+
+func (i *Item) Priority() int64 {
+ return i.Options.Priority
+}
+
+// Body packs job payload into binary payload.
+func (i *Item) Body() []byte {
+ return utils.AsBytes(i.Payload)
+}
+
+// Context packs job context (job, id) into binary payload.
+func (i *Item) Context() ([]byte, error) {
+ ctx, err := json.Marshal(
+ struct {
+ ID string `json:"id"`
+ Job string `json:"job"`
+ Headers map[string][]string `json:"headers"`
+ Pipeline string `json:"pipeline"`
+ }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline},
+ )
+
+ if err != nil {
+ return nil, err
+ }
+
+ return ctx, nil
+}
+
+func (i *Item) Ack() error {
+ // noop for the in-memory
+ return nil
+}
+
+func (i *Item) Nack() error {
+ // noop for the in-memory
+ return nil
+}
+
+func (i *Item) Requeue(headers map[string][]string, delay int64) error {
+ // overwrite the delay
+ i.Options.Delay = delay
+ i.Headers = headers
+
+ err := i.Options.requeueFn(context.Background(), i)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func fromJob(job *job.Job) *Item {
+ return &Item{
+ Job: job.Job,
+ Ident: job.Ident,
+ Payload: job.Payload,
+ Options: &Options{
+ Priority: job.Options.Priority,
+ Pipeline: job.Options.Pipeline,
+ Delay: job.Options.Delay,
+ },
+ }
+}
diff --git a/plugins/jobs/drivers/ephemeral/plugin.go b/plugins/jobs/drivers/ephemeral/plugin.go
new file mode 100644
index 00000000..28495abb
--- /dev/null
+++ b/plugins/jobs/drivers/ephemeral/plugin.go
@@ -0,0 +1,41 @@
+package ephemeral
+
+import (
+ "github.com/spiral/roadrunner/v2/common/jobs"
+ "github.com/spiral/roadrunner/v2/pkg/events"
+ priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const (
+ PluginName string = "ephemeral"
+)
+
+type Plugin struct {
+ log logger.Logger
+ cfg config.Configurer
+}
+
+func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
+ p.log = log
+ p.cfg = cfg
+ return nil
+}
+
+func (p *Plugin) Name() string {
+ return PluginName
+}
+
+func (p *Plugin) Available() {}
+
+// JobsConstruct creates new ephemeral consumer from the configuration
+func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
+ return NewJobBroker(configKey, p.log, p.cfg, e, pq)
+}
+
+// FromPipeline creates new ephemeral consumer from the provided pipeline
+func (p *Plugin) FromPipeline(pipeline *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
+ return FromPipeline(pipeline, p.log, e, pq)
+}