From 3212a5b59b6dcd8aa6edac137e945d42f6f9e0ce Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Wed, 25 Aug 2021 18:03:30 +0300 Subject: BoltDB local queue initial commit Signed-off-by: Valery Piashchynski --- plugins/boltdb/boltjobs/config.go | 16 + plugins/boltdb/boltjobs/consumer.go | 128 ++++++ plugins/boltdb/boltjobs/item.go | 77 ++++ plugins/boltdb/boltjobs/listener.go | 22 + plugins/boltdb/boltkv/config.go | 30 ++ plugins/boltdb/boltkv/driver.go | 468 ++++++++++++++++++++ plugins/boltdb/plugin.go | 82 ++++ plugins/jobs/drivers/amqp/amqpjobs/config.go | 67 +++ plugins/jobs/drivers/amqp/amqpjobs/consumer.go | 512 ++++++++++++++++++++++ plugins/jobs/drivers/amqp/amqpjobs/item.go | 239 ++++++++++ plugins/jobs/drivers/amqp/amqpjobs/listener.go | 25 ++ plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go | 57 +++ plugins/jobs/drivers/amqp/amqpjobs/redial.go | 141 ++++++ plugins/jobs/drivers/amqp/config.go | 67 --- plugins/jobs/drivers/amqp/consumer.go | 508 --------------------- plugins/jobs/drivers/amqp/item.go | 239 ---------- plugins/jobs/drivers/amqp/listener.go | 25 -- plugins/jobs/drivers/amqp/plugin.go | 5 +- plugins/jobs/drivers/amqp/rabbit_init.go | 57 --- plugins/jobs/drivers/amqp/redial.go | 141 ------ plugins/jobs/drivers/beanstalk/consumer.go | 26 +- plugins/jobs/drivers/beanstalk/item.go | 2 +- plugins/jobs/drivers/beanstalk/listen.go | 2 +- plugins/jobs/drivers/ephemeral/consumer.go | 28 +- plugins/jobs/drivers/sqs/consumer.go | 26 +- plugins/jobs/drivers/sqs/item.go | 2 +- plugins/jobs/drivers/sqs/listener.go | 2 +- plugins/kv/drivers/boltdb/config.go | 30 -- plugins/kv/drivers/boltdb/driver.go | 459 ------------------- plugins/kv/drivers/boltdb/plugin.go | 71 --- plugins/kv/plugin.go | 4 +- 31 files changed, 1913 insertions(+), 1645 deletions(-) create mode 100644 plugins/boltdb/boltjobs/config.go create mode 100644 plugins/boltdb/boltjobs/consumer.go create mode 100644 plugins/boltdb/boltjobs/item.go create mode 100644 plugins/boltdb/boltjobs/listener.go create mode 100644 plugins/boltdb/boltkv/config.go create mode 100644 plugins/boltdb/boltkv/driver.go create mode 100644 plugins/boltdb/plugin.go create mode 100644 plugins/jobs/drivers/amqp/amqpjobs/config.go create mode 100644 plugins/jobs/drivers/amqp/amqpjobs/consumer.go create mode 100644 plugins/jobs/drivers/amqp/amqpjobs/item.go create mode 100644 plugins/jobs/drivers/amqp/amqpjobs/listener.go create mode 100644 plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go create mode 100644 plugins/jobs/drivers/amqp/amqpjobs/redial.go delete mode 100644 plugins/jobs/drivers/amqp/config.go delete mode 100644 plugins/jobs/drivers/amqp/consumer.go delete mode 100644 plugins/jobs/drivers/amqp/item.go delete mode 100644 plugins/jobs/drivers/amqp/listener.go delete mode 100644 plugins/jobs/drivers/amqp/rabbit_init.go delete mode 100644 plugins/jobs/drivers/amqp/redial.go delete mode 100644 plugins/kv/drivers/boltdb/config.go delete mode 100644 plugins/kv/drivers/boltdb/driver.go delete mode 100644 plugins/kv/drivers/boltdb/plugin.go (limited to 'plugins') diff --git a/plugins/boltdb/boltjobs/config.go b/plugins/boltdb/boltjobs/config.go new file mode 100644 index 00000000..013e30bf --- /dev/null +++ b/plugins/boltdb/boltjobs/config.go @@ -0,0 +1,16 @@ +package boltjobs + +type Config struct { + // File is boltDB file. No need to create it by your own, + // boltdb driver is able to create the file, or read existing + File string + // Bucket to store data in boltDB + bucket string + // db file permissions + Permissions int + // consume timeout +} + +func (c *Config) InitDefaults() { + +} diff --git a/plugins/boltdb/boltjobs/consumer.go b/plugins/boltdb/boltjobs/consumer.go new file mode 100644 index 00000000..a8db2f30 --- /dev/null +++ b/plugins/boltdb/boltjobs/consumer.go @@ -0,0 +1,128 @@ +package boltjobs + +import ( + "context" + "os" + "sync/atomic" + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + jobState "github.com/spiral/roadrunner/v2/pkg/state/job" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/utils" + bolt "go.etcd.io/bbolt" +) + +const ( + PluginName = "boltdb" +) + +type consumer struct { + // bbolt configuration + file string + permissions int + bucket string + db *bolt.DB + + log logger.Logger + eh events.Handler + pq priorityqueue.Queue + pipe atomic.Value +} + +func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("init_boltdb_jobs") + + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(PluginName) { + return nil, errors.E(op, errors.Str("no global boltdb configuration")) + } + + conf := &Config{} + + err := cfg.UnmarshalKey(configKey, conf) + if err != nil { + return nil, errors.E(op, err) + } + + // add default values + conf.InitDefaults() + c := &consumer{ + file: conf.File, + permissions: conf.Permissions, + bucket: conf.bucket, + + log: log, + eh: e, + pq: pq, + } + + db, err := bolt.Open(c.file, os.FileMode(c.permissions), &bolt.Options{ + Timeout: time.Second * 20, + NoGrowSync: false, + NoFreelistSync: false, + ReadOnly: false, + NoSync: false, + }) + + if err != nil { + return nil, errors.E(op, err) + } + + c.db = db + + // create bucket if it does not exist + // tx.Commit invokes via the db.Update + err = db.Update(func(tx *bolt.Tx) error { + const upOp = errors.Op("boltdb_plugin_update") + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(c.bucket)) + if err != nil { + return errors.E(op, upOp) + } + return nil + }) + + return c, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + return &consumer{}, nil +} + +func (c *consumer) Push(ctx context.Context, job *job.Job) error { + panic("implement me") +} + +func (c *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { + c.pipe.Store(pipeline) + return nil +} + +func (c *consumer) Run(_ context.Context, pipeline *pipeline.Pipeline) error { + panic("implement me") +} + +func (c *consumer) Stop(ctx context.Context) error { + panic("implement me") +} + +func (c *consumer) Pause(ctx context.Context, pipeline string) { + panic("implement me") +} + +func (c *consumer) Resume(ctx context.Context, pipeline string) { + panic("implement me") +} + +func (c *consumer) State(ctx context.Context) (*jobState.State, error) { + panic("implement me") +} diff --git a/plugins/boltdb/boltjobs/item.go b/plugins/boltdb/boltjobs/item.go new file mode 100644 index 00000000..8a4aefa3 --- /dev/null +++ b/plugins/boltdb/boltjobs/item.go @@ -0,0 +1,77 @@ +package boltjobs + +import ( + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + panic("implement me") +} + +func (i *Item) Nack() error { + panic("implement me") +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + panic("implement me") +} diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go new file mode 100644 index 00000000..1f8e6ff1 --- /dev/null +++ b/plugins/boltdb/boltjobs/listener.go @@ -0,0 +1,22 @@ +package boltjobs + +import "time" + +func (c *consumer) listener() { + tt := time.NewTicker(time.Second) + for { + select { + case <-tt.C: + tx, err := c.db.Begin(false) + if err != nil { + panic(err) + } + //cursor := tx.Cursor() + + err = tx.Commit() + if err != nil { + panic(err) + } + } + } +} diff --git a/plugins/boltdb/boltkv/config.go b/plugins/boltdb/boltkv/config.go new file mode 100644 index 00000000..56d00674 --- /dev/null +++ b/plugins/boltdb/boltkv/config.go @@ -0,0 +1,30 @@ +package boltkv + +type Config struct { + // File is boltDB file. No need to create it by your own, + // boltdb driver is able to create the file, or read existing + File string + // Bucket to store data in boltDB + bucket string + // db file permissions + Permissions int + // timeout + Interval int `mapstructure:"interval"` +} + +// InitDefaults initializes default values for the boltdb +func (s *Config) InitDefaults() { + s.bucket = "default" + + if s.File == "" { + s.File = "rr.db" // default file name + } + + if s.Permissions == 0 { + s.Permissions = 0777 // free for all + } + + if s.Interval == 0 { + s.Interval = 60 // default is 60 seconds timeout + } +} diff --git a/plugins/boltdb/boltkv/driver.go b/plugins/boltdb/boltkv/driver.go new file mode 100644 index 00000000..ba1450cd --- /dev/null +++ b/plugins/boltdb/boltkv/driver.go @@ -0,0 +1,468 @@ +package boltkv + +import ( + "bytes" + "encoding/gob" + "os" + "strings" + "sync" + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/logger" + kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" + "github.com/spiral/roadrunner/v2/utils" + bolt "go.etcd.io/bbolt" +) + +const ( + RootPluginName string = "kv" +) + +type Driver struct { + clearMu sync.RWMutex + // db instance + DB *bolt.DB + // name should be UTF-8 + bucket []byte + log logger.Logger + cfg *Config + + // gc contains keys with timeouts + gc sync.Map + // default timeout for cache cleanup is 1 minute + timeout time.Duration + + // stop is used to stop keys GC and close boltdb connection + stop chan struct{} +} + +func NewBoltDBDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stop chan struct{}) (*Driver, error) { + const op = errors.Op("new_boltdb_driver") + + if !cfgPlugin.Has(RootPluginName) { + return nil, errors.E(op, errors.Str("no kv section in the configuration")) + } + + d := &Driver{ + log: log, + stop: stop, + } + + err := cfgPlugin.UnmarshalKey(key, &d.cfg) + if err != nil { + return nil, errors.E(op, err) + } + + // add default values + d.cfg.InitDefaults() + + d.bucket = []byte(d.cfg.bucket) + d.timeout = time.Duration(d.cfg.Interval) * time.Second + d.gc = sync.Map{} + + db, err := bolt.Open(d.cfg.File, os.FileMode(d.cfg.Permissions), &bolt.Options{ + Timeout: time.Second * 20, + NoGrowSync: false, + NoFreelistSync: false, + ReadOnly: false, + NoSync: false, + }) + + if err != nil { + return nil, errors.E(op, err) + } + + d.DB = db + + // create bucket if it does not exist + // tx.Commit invokes via the db.Update + err = db.Update(func(tx *bolt.Tx) error { + const upOp = errors.Op("boltdb_plugin_update") + _, err = tx.CreateBucketIfNotExists([]byte(d.cfg.bucket)) + if err != nil { + return errors.E(op, upOp) + } + return nil + }) + + if err != nil { + return nil, errors.E(op, err) + } + + go d.startGCLoop() + + return d, nil +} + +func (d *Driver) Has(keys ...string) (map[string]bool, error) { + const op = errors.Op("boltdb_driver_has") + d.log.Debug("boltdb HAS method called", "args", keys) + if keys == nil { + return nil, errors.E(op, errors.NoKeys) + } + + m := make(map[string]bool, len(keys)) + + // this is readable transaction + err := d.DB.View(func(tx *bolt.Tx) error { + // Get retrieves the value for a key in the bucket. + // Returns a nil value if the key does not exist or if the key is a nested bucket. + // The returned value is only valid for the life of the transaction. + for i := range keys { + keyTrimmed := strings.TrimSpace(keys[i]) + if keyTrimmed == "" { + return errors.E(op, errors.EmptyKey) + } + b := tx.Bucket(d.bucket) + if b == nil { + return errors.E(op, errors.NoSuchBucket) + } + exist := b.Get([]byte(keys[i])) + if exist != nil { + m[keys[i]] = true + } + } + return nil + }) + if err != nil { + return nil, errors.E(op, err) + } + + d.log.Debug("boltdb HAS method finished") + return m, nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (d *Driver) Get(key string) ([]byte, error) { + const op = errors.Op("boltdb_driver_get") + // to get cases like " " + keyTrimmed := strings.TrimSpace(key) + if keyTrimmed == "" { + return nil, errors.E(op, errors.EmptyKey) + } + + var val []byte + err := d.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket(d.bucket) + if b == nil { + return errors.E(op, errors.NoSuchBucket) + } + val = b.Get([]byte(key)) + + // try to decode values + if val != nil { + buf := bytes.NewReader(val) + decoder := gob.NewDecoder(buf) + + var i string + err := decoder.Decode(&i) + if err != nil { + // unsafe (w/o runes) convert + return errors.E(op, err) + } + + // set the value + val = utils.AsBytes(i) + } + return nil + }) + if err != nil { + return nil, errors.E(op, err) + } + + return val, nil +} + +func (d *Driver) MGet(keys ...string) (map[string][]byte, error) { + const op = errors.Op("boltdb_driver_mget") + // defense + if keys == nil { + return nil, errors.E(op, errors.NoKeys) + } + + // should not be empty keys + for i := range keys { + keyTrimmed := strings.TrimSpace(keys[i]) + if keyTrimmed == "" { + return nil, errors.E(op, errors.EmptyKey) + } + } + + m := make(map[string][]byte, len(keys)) + + err := d.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket(d.bucket) + if b == nil { + return errors.E(op, errors.NoSuchBucket) + } + + buf := new(bytes.Buffer) + var out []byte + buf.Grow(100) + for i := range keys { + value := b.Get([]byte(keys[i])) + buf.Write(value) + // allocate enough space + dec := gob.NewDecoder(buf) + if value != nil { + err := dec.Decode(&out) + if err != nil { + return errors.E(op, err) + } + m[keys[i]] = out + buf.Reset() + out = nil + } + } + + return nil + }) + if err != nil { + return nil, errors.E(op, err) + } + + return m, nil +} + +// Set puts the K/V to the bolt +func (d *Driver) Set(items ...*kvv1.Item) error { + const op = errors.Op("boltdb_driver_set") + if items == nil { + return errors.E(op, errors.NoKeys) + } + + // start writable transaction + tx, err := d.DB.Begin(true) + if err != nil { + return errors.E(op, err) + } + defer func() { + err = tx.Commit() + if err != nil { + errRb := tx.Rollback() + if errRb != nil { + d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb) + } + } + }() + + b := tx.Bucket(d.bucket) + // use access by index to avoid copying + for i := range items { + // performance note: pass a prepared bytes slice with initial cap + // we can't move buf and gob out of loop, because we need to clear both from data + // but gob will contain (w/o re-init) the past data + buf := new(bytes.Buffer) + encoder := gob.NewEncoder(buf) + if errors.Is(errors.EmptyItem, err) { + return errors.E(op, errors.EmptyItem) + } + + // Encode value + err = encoder.Encode(&items[i].Value) + if err != nil { + return errors.E(op, err) + } + // buf.Bytes will copy the underlying slice. Take a look in case of performance problems + err = b.Put([]byte(items[i].Key), buf.Bytes()) + if err != nil { + return errors.E(op, err) + } + + // if there are no errors, and TTL > 0, we put the key with timeout to the hashmap, for future check + // we do not need mutex here, since we use sync.Map + if items[i].Timeout != "" { + // check correctness of provided TTL + _, err := time.Parse(time.RFC3339, items[i].Timeout) + if err != nil { + return errors.E(op, err) + } + // Store key TTL in the separate map + d.gc.Store(items[i].Key, items[i].Timeout) + } + + buf.Reset() + } + + return nil +} + +// Delete all keys from DB +func (d *Driver) Delete(keys ...string) error { + const op = errors.Op("boltdb_driver_delete") + if keys == nil { + return errors.E(op, errors.NoKeys) + } + + // should not be empty keys + for _, key := range keys { + keyTrimmed := strings.TrimSpace(key) + if keyTrimmed == "" { + return errors.E(op, errors.EmptyKey) + } + } + + // start writable transaction + tx, err := d.DB.Begin(true) + if err != nil { + return errors.E(op, err) + } + + defer func() { + err = tx.Commit() + if err != nil { + errRb := tx.Rollback() + if errRb != nil { + d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb) + } + } + }() + + b := tx.Bucket(d.bucket) + if b == nil { + return errors.E(op, errors.NoSuchBucket) + } + + for _, key := range keys { + err = b.Delete([]byte(key)) + if err != nil { + return errors.E(op, err) + } + } + + return nil +} + +// MExpire sets the expiration time to the key +// If key already has the expiration time, it will be overwritten +func (d *Driver) MExpire(items ...*kvv1.Item) error { + const op = errors.Op("boltdb_driver_mexpire") + for i := range items { + if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" { + return errors.E(op, errors.Str("should set timeout and at least one key")) + } + + // verify provided TTL + _, err := time.Parse(time.RFC3339, items[i].Timeout) + if err != nil { + return errors.E(op, err) + } + + d.gc.Store(items[i].Key, items[i].Timeout) + } + return nil +} + +func (d *Driver) TTL(keys ...string) (map[string]string, error) { + const op = errors.Op("boltdb_driver_ttl") + if keys == nil { + return nil, errors.E(op, errors.NoKeys) + } + + // should not be empty keys + for i := range keys { + keyTrimmed := strings.TrimSpace(keys[i]) + if keyTrimmed == "" { + return nil, errors.E(op, errors.EmptyKey) + } + } + + m := make(map[string]string, len(keys)) + + for i := range keys { + if item, ok := d.gc.Load(keys[i]); ok { + // a little bit dangerous operation, but user can't store value other that kv.Item.TTL --> int64 + m[keys[i]] = item.(string) + } + } + return m, nil +} + +func (d *Driver) Clear() error { + err := d.DB.Update(func(tx *bolt.Tx) error { + err := tx.DeleteBucket(d.bucket) + if err != nil { + d.log.Error("boltdb delete bucket", "error", err) + return err + } + + _, err = tx.CreateBucket(d.bucket) + if err != nil { + d.log.Error("boltdb create bucket", "error", err) + return err + } + + return nil + }) + + if err != nil { + d.log.Error("clear transaction failed", "error", err) + return err + } + + d.clearMu.Lock() + d.gc = sync.Map{} + d.clearMu.Unlock() + + return nil +} + +// ========================= PRIVATE ================================= + +func (d *Driver) startGCLoop() { //nolint:gocognit + go func() { + t := time.NewTicker(d.timeout) + defer t.Stop() + for { + select { + case <-t.C: + d.clearMu.RLock() + + // calculate current time before loop started to be fair + now := time.Now() + d.gc.Range(func(key, value interface{}) bool { + const op = errors.Op("boltdb_plugin_gc") + k := key.(string) + v, err := time.Parse(time.RFC3339, value.(string)) + if err != nil { + return false + } + + if now.After(v) { + // time expired + d.gc.Delete(k) + d.log.Debug("key deleted", "key", k) + err := d.DB.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(d.bucket) + if b == nil { + return errors.E(op, errors.NoSuchBucket) + } + err := b.Delete(utils.AsBytes(k)) + if err != nil { + return errors.E(op, err) + } + return nil + }) + if err != nil { + d.log.Error("error during the gc phase of update", "error", err) + return false + } + } + return true + }) + + d.clearMu.RUnlock() + case <-d.stop: + err := d.DB.Close() + if err != nil { + d.log.Error("error") + } + return + } + } + }() +} diff --git a/plugins/boltdb/plugin.go b/plugins/boltdb/plugin.go new file mode 100644 index 00000000..683b26f1 --- /dev/null +++ b/plugins/boltdb/plugin.go @@ -0,0 +1,82 @@ +package boltdb + +import ( + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/common/kv" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/boltdb/boltjobs" + "github.com/spiral/roadrunner/v2/plugins/boltdb/boltkv" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + PluginName string = "boltdb" +) + +// Plugin BoltDB K/V storage. +type Plugin struct { + cfgPlugin config.Configurer + // logger + log logger.Logger + // stop is used to stop keys GC and close boltdb connection + stop chan struct{} + + drivers uint +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.stop = make(chan struct{}) + p.log = log + p.cfgPlugin = cfg + return nil +} + +// Serve is noop here +func (p *Plugin) Serve() chan error { + return make(chan error, 1) +} + +func (p *Plugin) Stop() error { + if p.drivers > 0 { + for i := uint(0); i < p.drivers; i++ { + // send close signal to every driver + p.stop <- struct{}{} + } + } + return nil +} + +// Name returns plugin name +func (p *Plugin) Name() string { + return PluginName +} + +// Available interface implementation +func (p *Plugin) Available() {} + +func (p *Plugin) KVConstruct(key string) (kv.Storage, error) { + const op = errors.Op("boltdb_plugin_provide") + st, err := boltkv.NewBoltDBDriver(p.log, key, p.cfgPlugin, p.stop) + if err != nil { + return nil, errors.E(op, err) + } + + // save driver number to release resources after Stop + p.drivers++ + + return st, nil +} + +// JOBS bbolt implementation + +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, queue priorityqueue.Queue) (jobs.Consumer, error) { + return boltjobs.NewBoltDBJobs(configKey, p.log, p.cfgPlugin, e, queue) +} + +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, queue priorityqueue.Queue) (jobs.Consumer, error) { + return boltjobs.FromPipeline(pipe, p.log, p.cfgPlugin, e, queue) +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/config.go b/plugins/jobs/drivers/amqp/amqpjobs/config.go new file mode 100644 index 00000000..ac2f6e53 --- /dev/null +++ b/plugins/jobs/drivers/amqp/amqpjobs/config.go @@ -0,0 +1,67 @@ +package amqpjobs + +// pipeline rabbitmq info +const ( + exchangeKey string = "exchange" + exchangeType string = "exchange_type" + queue string = "queue" + routingKey string = "routing_key" + prefetch string = "prefetch" + exclusive string = "exclusive" + priority string = "priority" + multipleAsk string = "multiple_ask" + requeueOnFail string = "requeue_on_fail" + + dlx string = "x-dead-letter-exchange" + dlxRoutingKey string = "x-dead-letter-routing-key" + dlxTTL string = "x-message-ttl" + dlxExpires string = "x-expires" + + contentType string = "application/octet-stream" +) + +type GlobalCfg struct { + Addr string `mapstructure:"addr"` +} + +// Config is used to parse pipeline configuration +type Config struct { + Prefetch int `mapstructure:"prefetch"` + Queue string `mapstructure:"queue"` + Priority int64 `mapstructure:"priority"` + Exchange string `mapstructure:"exchange"` + ExchangeType string `mapstructure:"exchange_type"` + RoutingKey string `mapstructure:"routing_key"` + Exclusive bool `mapstructure:"exclusive"` + MultipleAck bool `mapstructure:"multiple_ask"` + RequeueOnFail bool `mapstructure:"requeue_on_fail"` +} + +func (c *Config) InitDefault() { + // all options should be in sync with the pipeline defaults in the FromPipeline method + if c.ExchangeType == "" { + c.ExchangeType = "direct" + } + + if c.Exchange == "" { + c.Exchange = "amqp.default" + } + + if c.Queue == "" { + c.Queue = "default" + } + + if c.Prefetch == 0 { + c.Prefetch = 10 + } + + if c.Priority == 0 { + c.Priority = 10 + } +} + +func (c *GlobalCfg) InitDefault() { + if c.Addr == "" { + c.Addr = "amqp://guest:guest@127.0.0.1:5672/" + } +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/consumer.go b/plugins/jobs/drivers/amqp/amqpjobs/consumer.go new file mode 100644 index 00000000..1931ceaa --- /dev/null +++ b/plugins/jobs/drivers/amqp/amqpjobs/consumer.go @@ -0,0 +1,512 @@ +package amqpjobs + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + jobState "github.com/spiral/roadrunner/v2/pkg/state/job" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + pluginName string = "amqp" +) + +type consumer struct { + sync.Mutex + log logger.Logger + pq priorityqueue.Queue + eh events.Handler + + pipeline atomic.Value + + // amqp connection + conn *amqp.Connection + consumeChan *amqp.Channel + publishChan chan *amqp.Channel + consumeID string + connStr string + + retryTimeout time.Duration + // + // prefetch QoS AMQP + // + prefetch int + // + // pipeline's priority + // + priority int64 + exchangeName string + queue string + exclusive bool + exchangeType string + routingKey string + multipleAck bool + requeueOnFail bool + + listeners uint32 + delayed *int64 + stopCh chan struct{} +} + +// NewAMQPConsumer initializes rabbitmq pipeline +func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_amqp_consumer") + // we need to obtain two parts of the amqp information here. + // firs part - address to connect, it is located in the global section under the amqp pluginName + // second part - queues and other pipeline information + // if no such key - error + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) + } + + // PARSE CONFIGURATION START ------- + var pipeCfg Config + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + // PARSE CONFIGURATION END ------- + + jb := &consumer{ + log: log, + pq: pq, + eh: e, + consumeID: uuid.NewString(), + stopCh: make(chan struct{}), + // TODO to config + retryTimeout: time.Minute * 5, + priority: pipeCfg.Priority, + delayed: utils.Int64(0), + + publishChan: make(chan *amqp.Channel, 1), + routingKey: pipeCfg.RoutingKey, + queue: pipeCfg.Queue, + exchangeType: pipeCfg.ExchangeType, + exchangeName: pipeCfg.Exchange, + prefetch: pipeCfg.Prefetch, + exclusive: pipeCfg.Exclusive, + multipleAck: pipeCfg.MultipleAck, + requeueOnFail: pipeCfg.RequeueOnFail, + } + + jb.conn, err = amqp.Dial(globalCfg.Addr) + if err != nil { + return nil, errors.E(op, err) + } + + // save address + jb.connStr = globalCfg.Addr + + err = jb.initRabbitMQ() + if err != nil { + return nil, errors.E(op, err) + } + + pch, err := jb.conn.Channel() + if err != nil { + return nil, errors.E(op, err) + } + + jb.publishChan <- pch + + // run redialer and requeue listener for the connection + jb.redialer() + + return jb, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_amqp_consumer_from_pipeline") + // we need to obtain two parts of the amqp information here. + // firs part - address to connect, it is located in the global section under the amqp pluginName + // second part - queues and other pipeline information + + // only global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) + } + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + jb := &consumer{ + log: log, + eh: e, + pq: pq, + consumeID: uuid.NewString(), + stopCh: make(chan struct{}), + retryTimeout: time.Minute * 5, + delayed: utils.Int64(0), + + publishChan: make(chan *amqp.Channel, 1), + routingKey: pipeline.String(routingKey, ""), + queue: pipeline.String(queue, "default"), + exchangeType: pipeline.String(exchangeType, "direct"), + exchangeName: pipeline.String(exchangeKey, "amqp.default"), + prefetch: pipeline.Int(prefetch, 10), + priority: int64(pipeline.Int(priority, 10)), + exclusive: pipeline.Bool(exclusive, false), + multipleAck: pipeline.Bool(multipleAsk, false), + requeueOnFail: pipeline.Bool(requeueOnFail, false), + } + + jb.conn, err = amqp.Dial(globalCfg.Addr) + if err != nil { + return nil, errors.E(op, err) + } + + // save address + jb.connStr = globalCfg.Addr + + err = jb.initRabbitMQ() + if err != nil { + return nil, errors.E(op, err) + } + + pch, err := jb.conn.Channel() + if err != nil { + return nil, errors.E(op, err) + } + + jb.publishChan <- pch + + // register the pipeline + // error here is always nil + _ = jb.Register(context.Background(), pipeline) + + // run redialer for the connection + jb.redialer() + + return jb, nil +} + +func (j *consumer) Push(ctx context.Context, job *job.Job) error { + const op = errors.Op("rabbitmq_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != job.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) + } + + err := j.handleItem(ctx, fromJob(job)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { + j.pipeline.Store(p) + return nil +} + +func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("rabbit_consume") + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) + } + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + var err error + j.consumeChan, err = j.conn.Channel() + if err != nil { + return errors.E(op, err) + } + + err = j.consumeChan.Qos(j.prefetch, 0, false) + if err != nil { + return errors.E(op, err) + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // run listener + j.listener(deliv) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *consumer) State(ctx context.Context) (*jobState.State, error) { + const op = errors.Op("amqp_driver_state") + select { + case pch := <-j.publishChan: + defer func() { + j.publishChan <- pch + }() + + q, err := pch.QueueInspect(j.queue) + if err != nil { + return nil, errors.E(op, err) + } + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + return &jobState.State{ + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Queue: q.Name, + Active: int64(q.Messages), + Delayed: atomic.LoadInt64(j.delayed), + Ready: ready(atomic.LoadUint32(&j.listeners)), + }, nil + + case <-ctx.Done(): + return nil, errors.E(op, errors.TimeOut, ctx.Err()) + } +} + +func (j *consumer) Pause(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested pause on: ", p) + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + err := j.consumeChan.Cancel(j.consumeID, true) + if err != nil { + j.log.Error("cancel publish channel, forcing close", "error", err) + errCl := j.consumeChan.Close() + if errCl != nil { + j.log.Error("force close failed", "error", err) + return + } + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) Resume(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested resume on: ", p) + } + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("amqp listener already in the active state") + return + } + + var err error + j.consumeChan, err = j.conn.Channel() + if err != nil { + j.log.Error("create channel on rabbitmq connection", "error", err) + return + } + + err = j.consumeChan.Qos(j.prefetch, 0, false) + if err != nil { + j.log.Error("qos set failed", "error", err) + return + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + j.log.Error("consume operation failed", "error", err) + return + } + + // run listener + j.listener(deliv) + + // increase number of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) Stop(context.Context) error { + j.stopCh <- struct{}{} + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} + +// handleItem +func (j *consumer) handleItem(ctx context.Context, msg *Item) error { + const op = errors.Op("rabbitmq_handle_item") + select { + case pch := <-j.publishChan: + // return the channel back + defer func() { + j.publishChan <- pch + }() + + // convert + table, err := pack(msg.ID(), msg) + if err != nil { + return errors.E(op, err) + } + + const op = errors.Op("rabbitmq_handle_item") + // handle timeouts + if msg.Options.DelayDuration() > 0 { + atomic.AddInt64(j.delayed, 1) + // TODO declare separate method for this if condition + // TODO dlx cache channel?? + delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) + tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, j.exchangeName, j.queue) + _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ + dlx: j.exchangeName, + dlxRoutingKey: j.routingKey, + dlxTTL: delayMs, + dlxExpires: delayMs * 2, + }) + if err != nil { + atomic.AddInt64(j.delayed, ^int64(0)) + return errors.E(op, err) + } + + err = pch.QueueBind(tmpQ, tmpQ, j.exchangeName, false, nil) + if err != nil { + atomic.AddInt64(j.delayed, ^int64(0)) + return errors.E(op, err) + } + + // insert to the local, limited pipeline + err = pch.Publish(j.exchangeName, tmpQ, false, false, amqp.Publishing{ + Headers: table, + ContentType: contentType, + Timestamp: time.Now().UTC(), + DeliveryMode: amqp.Persistent, + Body: msg.Body(), + }) + + if err != nil { + atomic.AddInt64(j.delayed, ^int64(0)) + return errors.E(op, err) + } + + return nil + } + + // insert to the local, limited pipeline + err = pch.Publish(j.exchangeName, j.routingKey, false, false, amqp.Publishing{ + Headers: table, + ContentType: contentType, + Timestamp: time.Now(), + DeliveryMode: amqp.Persistent, + Body: msg.Body(), + }) + + if err != nil { + return errors.E(op, err) + } + + return nil + case <-ctx.Done(): + return errors.E(op, errors.TimeOut, ctx.Err()) + } +} + +func ready(r uint32) bool { + return r > 0 +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/item.go b/plugins/jobs/drivers/amqp/amqpjobs/item.go new file mode 100644 index 00000000..a8e305ea --- /dev/null +++ b/plugins/jobs/drivers/amqp/amqpjobs/item.go @@ -0,0 +1,239 @@ +package amqpjobs + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + json "github.com/json-iterator/go" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // private + // Ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery + ack func(multiply bool) error + + // Nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server. + // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel. + // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue. + // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time + nack func(multiply bool, requeue bool) error + + // requeueFn used as a pointer to the push function + requeueFn func(context.Context, *Item) error + delayed *int64 + multipleAsk bool + requeue bool +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the amqp, amqp.Table used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + } + return i.Options.ack(i.Options.multipleAsk) +} + +func (i *Item) Nack() error { + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + } + return i.Options.nack(false, i.Options.requeue) +} + +// Requeue with the provided delay, handled by the Nack +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + } + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + errNack := i.Options.nack(false, true) + if errNack != nil { + return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack) + } + + return err + } + + // ack the job + err = i.Options.ack(false) + if err != nil { + return err + } + + return nil +} + +// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ +func (j *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { + const op = errors.Op("from_delivery_convert") + item, err := j.unpack(d) + if err != nil { + return nil, errors.E(op, err) + } + + i := &Item{ + Job: item.Job, + Ident: item.Ident, + Payload: item.Payload, + Headers: item.Headers, + Options: item.Options, + } + + item.Options.ack = d.Ack + item.Options.nack = d.Nack + item.Options.delayed = j.delayed + + // requeue func + item.Options.requeueFn = j.handleItem + return i, nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +// pack job metadata into headers +func pack(id string, j *Item) (amqp.Table, error) { + headers, err := json.Marshal(j.Headers) + if err != nil { + return nil, err + } + return amqp.Table{ + job.RRID: id, + job.RRJob: j.Job, + job.RRPipeline: j.Options.Pipeline, + job.RRHeaders: headers, + job.RRDelay: j.Options.Delay, + job.RRPriority: j.Options.Priority, + }, nil +} + +// unpack restores jobs.Options +func (j *consumer) unpack(d amqp.Delivery) (*Item, error) { + item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ + multipleAsk: j.multipleAck, + requeue: j.requeueOnFail, + requeueFn: j.handleItem, + }} + + if _, ok := d.Headers[job.RRID].(string); !ok { + return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID)) + } + + item.Ident = d.Headers[job.RRID].(string) + + if _, ok := d.Headers[job.RRJob].(string); !ok { + return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob)) + } + + item.Job = d.Headers[job.RRJob].(string) + + if _, ok := d.Headers[job.RRPipeline].(string); ok { + item.Options.Pipeline = d.Headers[job.RRPipeline].(string) + } + + if h, ok := d.Headers[job.RRHeaders].([]byte); ok { + err := json.Unmarshal(h, &item.Headers) + if err != nil { + return nil, err + } + } + + if _, ok := d.Headers[job.RRDelay].(int64); ok { + item.Options.Delay = d.Headers[job.RRDelay].(int64) + } + + if _, ok := d.Headers[job.RRPriority]; !ok { + // set pipe's priority + item.Options.Priority = j.priority + } else { + item.Options.Priority = d.Headers[job.RRPriority].(int64) + } + + return item, nil +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/listener.go b/plugins/jobs/drivers/amqp/amqpjobs/listener.go new file mode 100644 index 00000000..0156d55c --- /dev/null +++ b/plugins/jobs/drivers/amqp/amqpjobs/listener.go @@ -0,0 +1,25 @@ +package amqpjobs + +import amqp "github.com/rabbitmq/amqp091-go" + +func (j *consumer) listener(deliv <-chan amqp.Delivery) { + go func() { + for { //nolint:gosimple + select { + case msg, ok := <-deliv: + if !ok { + j.log.Info("delivery channel closed, leaving the rabbit listener") + return + } + + d, err := j.fromDelivery(msg) + if err != nil { + j.log.Error("amqp delivery convert", "error", err) + continue + } + // insert job into the main priority queue + j.pq.Insert(d) + } + } + }() +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go b/plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go new file mode 100644 index 00000000..e260fabe --- /dev/null +++ b/plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go @@ -0,0 +1,57 @@ +package amqpjobs + +import ( + "github.com/spiral/errors" +) + +func (j *consumer) initRabbitMQ() error { + const op = errors.Op("jobs_plugin_rmq_init") + // Channel opens a unique, concurrent server channel to process the bulk of AMQP + // messages. Any error from methods on this receiver will render the receiver + // invalid and a new Channel should be opened. + channel, err := j.conn.Channel() + if err != nil { + return errors.E(op, err) + } + + // declare an exchange (idempotent operation) + err = channel.ExchangeDeclare( + j.exchangeName, + j.exchangeType, + true, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // verify or declare a queue + q, err := channel.QueueDeclare( + j.queue, + false, + false, + j.exclusive, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // bind queue to the exchange + err = channel.QueueBind( + q.Name, + j.routingKey, + j.exchangeName, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + return channel.Close() +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/redial.go b/plugins/jobs/drivers/amqp/amqpjobs/redial.go new file mode 100644 index 00000000..0835e3ea --- /dev/null +++ b/plugins/jobs/drivers/amqp/amqpjobs/redial.go @@ -0,0 +1,141 @@ +package amqpjobs + +import ( + "time" + + "github.com/cenkalti/backoff/v4" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" +) + +// redialer used to redial to the rabbitmq in case of the connection interrupts +func (j *consumer) redialer() { //nolint:gocognit + go func() { + const op = errors.Op("rabbitmq_redial") + + for { + select { + case err := <-j.conn.NotifyClose(make(chan *amqp.Error)): + if err == nil { + return + } + + j.Lock() + + // trash the broken publishing channel + <-j.publishChan + + t := time.Now() + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeError, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Error: err, + Start: time.Now(), + }) + + expb := backoff.NewExponentialBackOff() + // set the retry timeout (minutes) + expb.MaxElapsedTime = j.retryTimeout + operation := func() error { + j.log.Warn("rabbitmq reconnecting, caused by", "error", err) + var dialErr error + j.conn, dialErr = amqp.Dial(j.connStr) + if dialErr != nil { + return errors.E(op, dialErr) + } + + j.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") + + // re-init connection + errInit := j.initRabbitMQ() + if errInit != nil { + j.log.Error("rabbitmq dial", "error", errInit) + return errInit + } + + // redeclare consume channel + var errConnCh error + j.consumeChan, errConnCh = j.conn.Channel() + if errConnCh != nil { + return errors.E(op, errConnCh) + } + + // redeclare publish channel + pch, errPubCh := j.conn.Channel() + if errPubCh != nil { + return errors.E(op, errPubCh) + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // put the fresh publishing channel + j.publishChan <- pch + // restart listener + j.listener(deliv) + + j.log.Info("queues and subscribers redeclared successfully") + + return nil + } + + retryErr := backoff.Retry(operation, expb) + if retryErr != nil { + j.Unlock() + j.log.Error("backoff failed", "error", retryErr) + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Start: t, + Elapsed: time.Since(t), + }) + + j.Unlock() + + case <-j.stopCh: + if j.publishChan != nil { + pch := <-j.publishChan + err := pch.Close() + if err != nil { + j.log.Error("publish channel close", "error", err) + } + } + + if j.consumeChan != nil { + err := j.consumeChan.Close() + if err != nil { + j.log.Error("consume channel close", "error", err) + } + } + if j.conn != nil { + err := j.conn.Close() + if err != nil { + j.log.Error("amqp connection close", "error", err) + } + } + + return + } + } + }() +} diff --git a/plugins/jobs/drivers/amqp/config.go b/plugins/jobs/drivers/amqp/config.go deleted file mode 100644 index 1ec089f1..00000000 --- a/plugins/jobs/drivers/amqp/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package amqp - -// pipeline rabbitmq info -const ( - exchangeKey string = "exchange" - exchangeType string = "exchange_type" - queue string = "queue" - routingKey string = "routing_key" - prefetch string = "prefetch" - exclusive string = "exclusive" - priority string = "priority" - multipleAsk string = "multiple_ask" - requeueOnFail string = "requeue_on_fail" - - dlx string = "x-dead-letter-exchange" - dlxRoutingKey string = "x-dead-letter-routing-key" - dlxTTL string = "x-message-ttl" - dlxExpires string = "x-expires" - - contentType string = "application/octet-stream" -) - -type GlobalCfg struct { - Addr string `mapstructure:"addr"` -} - -// Config is used to parse pipeline configuration -type Config struct { - Prefetch int `mapstructure:"prefetch"` - Queue string `mapstructure:"queue"` - Priority int64 `mapstructure:"priority"` - Exchange string `mapstructure:"exchange"` - ExchangeType string `mapstructure:"exchange_type"` - RoutingKey string `mapstructure:"routing_key"` - Exclusive bool `mapstructure:"exclusive"` - MultipleAck bool `mapstructure:"multiple_ask"` - RequeueOnFail bool `mapstructure:"requeue_on_fail"` -} - -func (c *Config) InitDefault() { - // all options should be in sync with the pipeline defaults in the FromPipeline method - if c.ExchangeType == "" { - c.ExchangeType = "direct" - } - - if c.Exchange == "" { - c.Exchange = "amqp.default" - } - - if c.Queue == "" { - c.Queue = "default" - } - - if c.Prefetch == 0 { - c.Prefetch = 10 - } - - if c.Priority == 0 { - c.Priority = 10 - } -} - -func (c *GlobalCfg) InitDefault() { - if c.Addr == "" { - c.Addr = "amqp://guest:guest@127.0.0.1:5672/" - } -} diff --git a/plugins/jobs/drivers/amqp/consumer.go b/plugins/jobs/drivers/amqp/consumer.go deleted file mode 100644 index 95df02ec..00000000 --- a/plugins/jobs/drivers/amqp/consumer.go +++ /dev/null @@ -1,508 +0,0 @@ -package amqp - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/google/uuid" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" -) - -type JobConsumer struct { - sync.Mutex - log logger.Logger - pq priorityqueue.Queue - eh events.Handler - - pipeline atomic.Value - - // amqp connection - conn *amqp.Connection - consumeChan *amqp.Channel - publishChan chan *amqp.Channel - consumeID string - connStr string - - retryTimeout time.Duration - // - // prefetch QoS AMQP - // - prefetch int - // - // pipeline's priority - // - priority int64 - exchangeName string - queue string - exclusive bool - exchangeType string - routingKey string - multipleAck bool - requeueOnFail bool - - listeners uint32 - delayed *int64 - stopCh chan struct{} -} - -// NewAMQPConsumer initializes rabbitmq pipeline -func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { - const op = errors.Op("new_amqp_consumer") - // we need to obtain two parts of the amqp information here. - // firs part - address to connect, it is located in the global section under the amqp pluginName - // second part - queues and other pipeline information - // if no such key - error - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) - } - - // PARSE CONFIGURATION START ------- - var pipeCfg Config - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(configKey, &pipeCfg) - if err != nil { - return nil, errors.E(op, err) - } - - pipeCfg.InitDefault() - - err = cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - // PARSE CONFIGURATION END ------- - - jb := &JobConsumer{ - log: log, - pq: pq, - eh: e, - consumeID: uuid.NewString(), - stopCh: make(chan struct{}), - // TODO to config - retryTimeout: time.Minute * 5, - priority: pipeCfg.Priority, - delayed: utils.Int64(0), - - publishChan: make(chan *amqp.Channel, 1), - routingKey: pipeCfg.RoutingKey, - queue: pipeCfg.Queue, - exchangeType: pipeCfg.ExchangeType, - exchangeName: pipeCfg.Exchange, - prefetch: pipeCfg.Prefetch, - exclusive: pipeCfg.Exclusive, - multipleAck: pipeCfg.MultipleAck, - requeueOnFail: pipeCfg.RequeueOnFail, - } - - jb.conn, err = amqp.Dial(globalCfg.Addr) - if err != nil { - return nil, errors.E(op, err) - } - - // save address - jb.connStr = globalCfg.Addr - - err = jb.initRabbitMQ() - if err != nil { - return nil, errors.E(op, err) - } - - pch, err := jb.conn.Channel() - if err != nil { - return nil, errors.E(op, err) - } - - jb.publishChan <- pch - - // run redialer and requeue listener for the connection - jb.redialer() - - return jb, nil -} - -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { - const op = errors.Op("new_amqp_consumer_from_pipeline") - // we need to obtain two parts of the amqp information here. - // firs part - address to connect, it is located in the global section under the amqp pluginName - // second part - queues and other pipeline information - - // only global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) - } - - // PARSE CONFIGURATION ------- - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - jb := &JobConsumer{ - log: log, - eh: e, - pq: pq, - consumeID: uuid.NewString(), - stopCh: make(chan struct{}), - retryTimeout: time.Minute * 5, - delayed: utils.Int64(0), - - publishChan: make(chan *amqp.Channel, 1), - routingKey: pipeline.String(routingKey, ""), - queue: pipeline.String(queue, "default"), - exchangeType: pipeline.String(exchangeType, "direct"), - exchangeName: pipeline.String(exchangeKey, "amqp.default"), - prefetch: pipeline.Int(prefetch, 10), - priority: int64(pipeline.Int(priority, 10)), - exclusive: pipeline.Bool(exclusive, false), - multipleAck: pipeline.Bool(multipleAsk, false), - requeueOnFail: pipeline.Bool(requeueOnFail, false), - } - - jb.conn, err = amqp.Dial(globalCfg.Addr) - if err != nil { - return nil, errors.E(op, err) - } - - // save address - jb.connStr = globalCfg.Addr - - err = jb.initRabbitMQ() - if err != nil { - return nil, errors.E(op, err) - } - - pch, err := jb.conn.Channel() - if err != nil { - return nil, errors.E(op, err) - } - - jb.publishChan <- pch - - // register the pipeline - // error here is always nil - _ = jb.Register(context.Background(), pipeline) - - // run redialer for the connection - jb.redialer() - - return jb, nil -} - -func (j *JobConsumer) Push(ctx context.Context, job *job.Job) error { - const op = errors.Op("rabbitmq_push") - // check if the pipeline registered - - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != job.Options.Pipeline { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) - } - - err := j.handleItem(ctx, fromJob(job)) - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (j *JobConsumer) Register(_ context.Context, p *pipeline.Pipeline) error { - j.pipeline.Store(p) - return nil -} - -func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("rabbit_consume") - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) - } - - // protect connection (redial) - j.Lock() - defer j.Unlock() - - var err error - j.consumeChan, err = j.conn.Channel() - if err != nil { - return errors.E(op, err) - } - - err = j.consumeChan.Qos(j.prefetch, 0, false) - if err != nil { - return errors.E(op, err) - } - - // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // run listener - j.listener(deliv) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - - return nil -} - -func (j *JobConsumer) State(ctx context.Context) (*jobState.State, error) { - const op = errors.Op("amqp_driver_state") - select { - case pch := <-j.publishChan: - defer func() { - j.publishChan <- pch - }() - - q, err := pch.QueueInspect(j.queue) - if err != nil { - return nil, errors.E(op, err) - } - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - return &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: q.Name, - Active: int64(q.Messages), - Delayed: atomic.LoadInt64(j.delayed), - Ready: ready(atomic.LoadUint32(&j.listeners)), - }, nil - - case <-ctx.Done(): - return nil, errors.E(op, errors.TimeOut, ctx.Err()) - } -} - -func (j *JobConsumer) Pause(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested pause on: ", p) - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 0 { - j.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&j.listeners, ^uint32(0)) - - // protect connection (redial) - j.Lock() - defer j.Unlock() - - err := j.consumeChan.Cancel(j.consumeID, true) - if err != nil { - j.log.Error("cancel publish channel, forcing close", "error", err) - errCl := j.consumeChan.Close() - if errCl != nil { - j.log.Error("force close failed", "error", err) - return - } - return - } - - j.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *JobConsumer) Resume(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested resume on: ", p) - } - - // protect connection (redial) - j.Lock() - defer j.Unlock() - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 1 { - j.log.Warn("amqp listener already in the active state") - return - } - - var err error - j.consumeChan, err = j.conn.Channel() - if err != nil { - j.log.Error("create channel on rabbitmq connection", "error", err) - return - } - - err = j.consumeChan.Qos(j.prefetch, 0, false) - if err != nil { - j.log.Error("qos set failed", "error", err) - return - } - - // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - j.log.Error("consume operation failed", "error", err) - return - } - - // run listener - j.listener(deliv) - - // increase number of listeners - atomic.AddUint32(&j.listeners, 1) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *JobConsumer) Stop(context.Context) error { - j.stopCh <- struct{}{} - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - return nil -} - -// handleItem -func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { - const op = errors.Op("rabbitmq_handle_item") - select { - case pch := <-j.publishChan: - // return the channel back - defer func() { - j.publishChan <- pch - }() - - // convert - table, err := pack(msg.ID(), msg) - if err != nil { - return errors.E(op, err) - } - - const op = errors.Op("rabbitmq_handle_item") - // handle timeouts - if msg.Options.DelayDuration() > 0 { - atomic.AddInt64(j.delayed, 1) - // TODO declare separate method for this if condition - // TODO dlx cache channel?? - delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) - tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, j.exchangeName, j.queue) - _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ - dlx: j.exchangeName, - dlxRoutingKey: j.routingKey, - dlxTTL: delayMs, - dlxExpires: delayMs * 2, - }) - if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) - return errors.E(op, err) - } - - err = pch.QueueBind(tmpQ, tmpQ, j.exchangeName, false, nil) - if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) - return errors.E(op, err) - } - - // insert to the local, limited pipeline - err = pch.Publish(j.exchangeName, tmpQ, false, false, amqp.Publishing{ - Headers: table, - ContentType: contentType, - Timestamp: time.Now().UTC(), - DeliveryMode: amqp.Persistent, - Body: msg.Body(), - }) - - if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) - return errors.E(op, err) - } - - return nil - } - - // insert to the local, limited pipeline - err = pch.Publish(j.exchangeName, j.routingKey, false, false, amqp.Publishing{ - Headers: table, - ContentType: contentType, - Timestamp: time.Now(), - DeliveryMode: amqp.Persistent, - Body: msg.Body(), - }) - - if err != nil { - return errors.E(op, err) - } - - return nil - case <-ctx.Done(): - return errors.E(op, errors.TimeOut, ctx.Err()) - } -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/jobs/drivers/amqp/item.go b/plugins/jobs/drivers/amqp/item.go deleted file mode 100644 index 623dcca7..00000000 --- a/plugins/jobs/drivers/amqp/item.go +++ /dev/null @@ -1,239 +0,0 @@ -package amqp - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - json "github.com/json-iterator/go" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // private - // Ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery - ack func(multiply bool) error - - // Nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server. - // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel. - // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue. - // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time - nack func(multiply bool, requeue bool) error - - // requeueFn used as a pointer to the push function - requeueFn func(context.Context, *Item) error - delayed *int64 - multipleAsk bool - requeue bool -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -// Not used in the amqp, amqp.Table used instead -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - return i.Options.ack(i.Options.multipleAsk) -} - -func (i *Item) Nack() error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - return i.Options.nack(false, i.Options.requeue) -} - -// Requeue with the provided delay, handled by the Nack -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - errNack := i.Options.nack(false, true) - if errNack != nil { - return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack) - } - - return err - } - - // ack the job - err = i.Options.ack(false) - if err != nil { - return err - } - - return nil -} - -// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ -func (j *JobConsumer) fromDelivery(d amqp.Delivery) (*Item, error) { - const op = errors.Op("from_delivery_convert") - item, err := j.unpack(d) - if err != nil { - return nil, errors.E(op, err) - } - - i := &Item{ - Job: item.Job, - Ident: item.Ident, - Payload: item.Payload, - Headers: item.Headers, - Options: item.Options, - } - - item.Options.ack = d.Ack - item.Options.nack = d.Nack - item.Options.delayed = j.delayed - - // requeue func - item.Options.requeueFn = j.handleItem - return i, nil -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} - -// pack job metadata into headers -func pack(id string, j *Item) (amqp.Table, error) { - headers, err := json.Marshal(j.Headers) - if err != nil { - return nil, err - } - return amqp.Table{ - job.RRID: id, - job.RRJob: j.Job, - job.RRPipeline: j.Options.Pipeline, - job.RRHeaders: headers, - job.RRDelay: j.Options.Delay, - job.RRPriority: j.Options.Priority, - }, nil -} - -// unpack restores jobs.Options -func (j *JobConsumer) unpack(d amqp.Delivery) (*Item, error) { - item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ - multipleAsk: j.multipleAck, - requeue: j.requeueOnFail, - requeueFn: j.handleItem, - }} - - if _, ok := d.Headers[job.RRID].(string); !ok { - return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID)) - } - - item.Ident = d.Headers[job.RRID].(string) - - if _, ok := d.Headers[job.RRJob].(string); !ok { - return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob)) - } - - item.Job = d.Headers[job.RRJob].(string) - - if _, ok := d.Headers[job.RRPipeline].(string); ok { - item.Options.Pipeline = d.Headers[job.RRPipeline].(string) - } - - if h, ok := d.Headers[job.RRHeaders].([]byte); ok { - err := json.Unmarshal(h, &item.Headers) - if err != nil { - return nil, err - } - } - - if _, ok := d.Headers[job.RRDelay].(int64); ok { - item.Options.Delay = d.Headers[job.RRDelay].(int64) - } - - if _, ok := d.Headers[job.RRPriority]; !ok { - // set pipe's priority - item.Options.Priority = j.priority - } else { - item.Options.Priority = d.Headers[job.RRPriority].(int64) - } - - return item, nil -} diff --git a/plugins/jobs/drivers/amqp/listener.go b/plugins/jobs/drivers/amqp/listener.go deleted file mode 100644 index 0b1cd2dc..00000000 --- a/plugins/jobs/drivers/amqp/listener.go +++ /dev/null @@ -1,25 +0,0 @@ -package amqp - -import amqp "github.com/rabbitmq/amqp091-go" - -func (j *JobConsumer) listener(deliv <-chan amqp.Delivery) { - go func() { - for { //nolint:gosimple - select { - case msg, ok := <-deliv: - if !ok { - j.log.Info("delivery channel closed, leaving the rabbit listener") - return - } - - d, err := j.fromDelivery(msg) - if err != nil { - j.log.Error("amqp delivery convert", "error", err) - continue - } - // insert job into the main priority queue - j.pq.Insert(d) - } - } - }() -} diff --git a/plugins/jobs/drivers/amqp/plugin.go b/plugins/jobs/drivers/amqp/plugin.go index 624f4405..8797d20b 100644 --- a/plugins/jobs/drivers/amqp/plugin.go +++ b/plugins/jobs/drivers/amqp/plugin.go @@ -5,6 +5,7 @@ import ( "github.com/spiral/roadrunner/v2/pkg/events" priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/amqp/amqpjobs" "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -31,10 +32,10 @@ func (p *Plugin) Name() string { func (p *Plugin) Available() {} func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return NewAMQPConsumer(configKey, p.log, p.cfg, e, pq) + return amqpjobs.NewAMQPConsumer(configKey, p.log, p.cfg, e, pq) } // FromPipeline constructs AMQP driver from pipeline func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return FromPipeline(pipe, p.log, p.cfg, e, pq) + return amqpjobs.FromPipeline(pipe, p.log, p.cfg, e, pq) } diff --git a/plugins/jobs/drivers/amqp/rabbit_init.go b/plugins/jobs/drivers/amqp/rabbit_init.go deleted file mode 100644 index 56ef10c8..00000000 --- a/plugins/jobs/drivers/amqp/rabbit_init.go +++ /dev/null @@ -1,57 +0,0 @@ -package amqp - -import ( - "github.com/spiral/errors" -) - -func (j *JobConsumer) initRabbitMQ() error { - const op = errors.Op("jobs_plugin_rmq_init") - // Channel opens a unique, concurrent server channel to process the bulk of AMQP - // messages. Any error from methods on this receiver will render the receiver - // invalid and a new Channel should be opened. - channel, err := j.conn.Channel() - if err != nil { - return errors.E(op, err) - } - - // declare an exchange (idempotent operation) - err = channel.ExchangeDeclare( - j.exchangeName, - j.exchangeType, - true, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // verify or declare a queue - q, err := channel.QueueDeclare( - j.queue, - false, - false, - j.exclusive, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // bind queue to the exchange - err = channel.QueueBind( - q.Name, - j.routingKey, - j.exchangeName, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - return channel.Close() -} diff --git a/plugins/jobs/drivers/amqp/redial.go b/plugins/jobs/drivers/amqp/redial.go deleted file mode 100644 index 8dc18b8f..00000000 --- a/plugins/jobs/drivers/amqp/redial.go +++ /dev/null @@ -1,141 +0,0 @@ -package amqp - -import ( - "time" - - "github.com/cenkalti/backoff/v4" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" -) - -// redialer used to redial to the rabbitmq in case of the connection interrupts -func (j *JobConsumer) redialer() { //nolint:gocognit - go func() { - const op = errors.Op("rabbitmq_redial") - - for { - select { - case err := <-j.conn.NotifyClose(make(chan *amqp.Error)): - if err == nil { - return - } - - j.Lock() - - // trash the broken publishing channel - <-j.publishChan - - t := time.Now() - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeError, - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Error: err, - Start: time.Now(), - }) - - expb := backoff.NewExponentialBackOff() - // set the retry timeout (minutes) - expb.MaxElapsedTime = j.retryTimeout - operation := func() error { - j.log.Warn("rabbitmq reconnecting, caused by", "error", err) - var dialErr error - j.conn, dialErr = amqp.Dial(j.connStr) - if dialErr != nil { - return errors.E(op, dialErr) - } - - j.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") - - // re-init connection - errInit := j.initRabbitMQ() - if errInit != nil { - j.log.Error("rabbitmq dial", "error", errInit) - return errInit - } - - // redeclare consume channel - var errConnCh error - j.consumeChan, errConnCh = j.conn.Channel() - if errConnCh != nil { - return errors.E(op, errConnCh) - } - - // redeclare publish channel - pch, errPubCh := j.conn.Channel() - if errPubCh != nil { - return errors.E(op, errPubCh) - } - - // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // put the fresh publishing channel - j.publishChan <- pch - // restart listener - j.listener(deliv) - - j.log.Info("queues and subscribers redeclared successfully") - - return nil - } - - retryErr := backoff.Retry(operation, expb) - if retryErr != nil { - j.Unlock() - j.log.Error("backoff failed", "error", retryErr) - return - } - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Start: t, - Elapsed: time.Since(t), - }) - - j.Unlock() - - case <-j.stopCh: - if j.publishChan != nil { - pch := <-j.publishChan - err := pch.Close() - if err != nil { - j.log.Error("publish channel close", "error", err) - } - } - - if j.consumeChan != nil { - err := j.consumeChan.Close() - if err != nil { - j.log.Error("consume channel close", "error", err) - } - } - if j.conn != nil { - err := j.conn.Close() - if err != nil { - j.log.Error("amqp connection close", "error", err) - } - } - - return - } - } - }() -} diff --git a/plugins/jobs/drivers/beanstalk/consumer.go b/plugins/jobs/drivers/beanstalk/consumer.go index 6323148b..5ef89983 100644 --- a/plugins/jobs/drivers/beanstalk/consumer.go +++ b/plugins/jobs/drivers/beanstalk/consumer.go @@ -19,7 +19,7 @@ import ( "github.com/spiral/roadrunner/v2/utils" ) -type JobConsumer struct { +type consumer struct { log logger.Logger eh events.Handler pq priorityqueue.Queue @@ -43,7 +43,7 @@ type JobConsumer struct { requeueCh chan *Item } -func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { +func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { const op = errors.Op("new_beanstalk_consumer") // PARSE CONFIGURATION ------- @@ -86,7 +86,7 @@ func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Config } // initialize job consumer - jc := &JobConsumer{ + jc := &consumer{ pq: pq, log: log, eh: e, @@ -108,7 +108,7 @@ func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Config return jc, nil } -func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { +func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { const op = errors.Op("new_beanstalk_consumer") // PARSE CONFIGURATION ------- @@ -139,7 +139,7 @@ func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configu } // initialize job consumer - jc := &JobConsumer{ + jc := &consumer{ pq: pq, log: log, eh: e, @@ -160,7 +160,7 @@ func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configu return jc, nil } -func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { +func (j *consumer) Push(ctx context.Context, jb *job.Job) error { const op = errors.Op("beanstalk_push") // check if the pipeline registered @@ -178,7 +178,7 @@ func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { return nil } -func (j *JobConsumer) handleItem(ctx context.Context, item *Item) error { +func (j *consumer) handleItem(ctx context.Context, item *Item) error { const op = errors.Op("beanstalk_handle_item") bb := new(bytes.Buffer) @@ -215,14 +215,14 @@ func (j *JobConsumer) handleItem(ctx context.Context, item *Item) error { return nil } -func (j *JobConsumer) Register(_ context.Context, p *pipeline.Pipeline) error { +func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { // register the pipeline j.pipeline.Store(p) return nil } // State https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L514 -func (j *JobConsumer) State(ctx context.Context) (*jobState.State, error) { +func (j *consumer) State(ctx context.Context) (*jobState.State, error) { const op = errors.Op("beanstalk_state") stat, err := j.pool.Stats(ctx) if err != nil { @@ -258,7 +258,7 @@ func (j *JobConsumer) State(ctx context.Context) (*jobState.State, error) { return out, nil } -func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { +func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { const op = errors.Op("beanstalk_run") // check if the pipeline registered @@ -282,7 +282,7 @@ func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { return nil } -func (j *JobConsumer) Stop(context.Context) error { +func (j *consumer) Stop(context.Context) error { pipe := j.pipeline.Load().(*pipeline.Pipeline) if atomic.LoadUint32(&j.listeners) == 1 { @@ -299,7 +299,7 @@ func (j *JobConsumer) Stop(context.Context) error { return nil } -func (j *JobConsumer) Pause(_ context.Context, p string) { +func (j *consumer) Pause(_ context.Context, p string) { // load atomic value pipe := j.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { @@ -326,7 +326,7 @@ func (j *JobConsumer) Pause(_ context.Context, p string) { }) } -func (j *JobConsumer) Resume(_ context.Context, p string) { +func (j *consumer) Resume(_ context.Context, p string) { // load atomic value pipe := j.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { diff --git a/plugins/jobs/drivers/beanstalk/item.go b/plugins/jobs/drivers/beanstalk/item.go index f1d7ac76..0a6cd560 100644 --- a/plugins/jobs/drivers/beanstalk/item.go +++ b/plugins/jobs/drivers/beanstalk/item.go @@ -134,7 +134,7 @@ func (i *Item) pack(b *bytes.Buffer) error { return nil } -func (j *JobConsumer) unpack(id uint64, data []byte, out *Item) error { +func (j *consumer) unpack(id uint64, data []byte, out *Item) error { err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(out) if err != nil { return err diff --git a/plugins/jobs/drivers/beanstalk/listen.go b/plugins/jobs/drivers/beanstalk/listen.go index f1385e70..6bb159ea 100644 --- a/plugins/jobs/drivers/beanstalk/listen.go +++ b/plugins/jobs/drivers/beanstalk/listen.go @@ -4,7 +4,7 @@ import ( "github.com/beanstalkd/go-beanstalk" ) -func (j *JobConsumer) listen() { +func (j *consumer) listen() { for { select { case <-j.stopCh: diff --git a/plugins/jobs/drivers/ephemeral/consumer.go b/plugins/jobs/drivers/ephemeral/consumer.go index f0992cd6..91b8eda9 100644 --- a/plugins/jobs/drivers/ephemeral/consumer.go +++ b/plugins/jobs/drivers/ephemeral/consumer.go @@ -25,7 +25,7 @@ type Config struct { Prefetch uint64 `mapstructure:"prefetch"` } -type JobConsumer struct { +type consumer struct { cfg *Config log logger.Logger eh events.Handler @@ -43,10 +43,10 @@ type JobConsumer struct { stopCh chan struct{} } -func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { +func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) { const op = errors.Op("new_ephemeral_pipeline") - jb := &JobConsumer{ + jb := &consumer{ log: log, pq: pq, eh: eh, @@ -71,8 +71,8 @@ func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh return jb, nil } -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { - jb := &JobConsumer{ +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) { + jb := &consumer{ log: log, pq: pq, eh: eh, @@ -88,7 +88,7 @@ func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Hand return jb, nil } -func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { +func (j *consumer) Push(ctx context.Context, jb *job.Job) error { const op = errors.Op("ephemeral_push") // check if the pipeline registered @@ -105,7 +105,7 @@ func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { return nil } -func (j *JobConsumer) State(_ context.Context) (*jobState.State, error) { +func (j *consumer) State(_ context.Context) (*jobState.State, error) { pipe := j.pipeline.Load().(*pipeline.Pipeline) return &jobState.State{ Pipeline: pipe.Name(), @@ -117,12 +117,12 @@ func (j *JobConsumer) State(_ context.Context) (*jobState.State, error) { }, nil } -func (j *JobConsumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { +func (j *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { j.pipeline.Store(pipeline) return nil } -func (j *JobConsumer) Pause(_ context.Context, p string) { +func (j *consumer) Pause(_ context.Context, p string) { pipe := j.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { j.log.Error("no such pipeline", "requested pause on: ", p) @@ -149,7 +149,7 @@ func (j *JobConsumer) Pause(_ context.Context, p string) { }) } -func (j *JobConsumer) Resume(_ context.Context, p string) { +func (j *consumer) Resume(_ context.Context, p string) { pipe := j.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { j.log.Error("no such pipeline", "requested resume on: ", p) @@ -175,7 +175,7 @@ func (j *JobConsumer) Resume(_ context.Context, p string) { } // Run is no-op for the ephemeral -func (j *JobConsumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { +func (j *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { j.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Driver: pipe.Driver(), @@ -185,7 +185,7 @@ func (j *JobConsumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { return nil } -func (j *JobConsumer) Stop(ctx context.Context) error { +func (j *consumer) Stop(ctx context.Context) error { const op = errors.Op("ephemeral_plugin_stop") pipe := j.pipeline.Load().(*pipeline.Pipeline) @@ -207,7 +207,7 @@ func (j *JobConsumer) Stop(ctx context.Context) error { } } -func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { +func (j *consumer) handleItem(ctx context.Context, msg *Item) error { const op = errors.Op("ephemeral_handle_request") // handle timeouts // theoretically, some bad user may send millions requests with a delay and produce a billion (for example) @@ -245,7 +245,7 @@ func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { } } -func (j *JobConsumer) consume() { +func (j *consumer) consume() { go func() { // redirect for { diff --git a/plugins/jobs/drivers/sqs/consumer.go b/plugins/jobs/drivers/sqs/consumer.go index 17af1caa..23203190 100644 --- a/plugins/jobs/drivers/sqs/consumer.go +++ b/plugins/jobs/drivers/sqs/consumer.go @@ -24,7 +24,7 @@ import ( "github.com/spiral/roadrunner/v2/plugins/logger" ) -type JobConsumer struct { +type consumer struct { sync.Mutex pq priorityqueue.Queue log logger.Logger @@ -56,7 +56,7 @@ type JobConsumer struct { pauseCh chan struct{} } -func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { +func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { const op = errors.Op("new_sqs_consumer") // if no such key - error @@ -88,7 +88,7 @@ func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configure globalCfg.InitDefault() // initialize job consumer - jb := &JobConsumer{ + jb := &consumer{ pq: pq, log: log, eh: e, @@ -142,7 +142,7 @@ func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configure return jb, nil } -func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { +func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { const op = errors.Op("new_sqs_consumer") // if no global section @@ -173,7 +173,7 @@ func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Conf } // initialize job consumer - jb := &JobConsumer{ + jb := &consumer{ pq: pq, log: log, eh: e, @@ -227,7 +227,7 @@ func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Conf return jb, nil } -func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { +func (j *consumer) Push(ctx context.Context, jb *job.Job) error { const op = errors.Op("sqs_push") // check if the pipeline registered @@ -250,7 +250,7 @@ func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { return nil } -func (j *JobConsumer) State(ctx context.Context) (*jobState.State, error) { +func (j *consumer) State(ctx context.Context) (*jobState.State, error) { const op = errors.Op("sqs_state") attr, err := j.client.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{ QueueUrl: j.queueURL, @@ -292,12 +292,12 @@ func (j *JobConsumer) State(ctx context.Context) (*jobState.State, error) { return out, nil } -func (j *JobConsumer) Register(_ context.Context, p *pipeline.Pipeline) error { +func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { j.pipeline.Store(p) return nil } -func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { +func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { const op = errors.Op("sqs_run") j.Lock() @@ -323,7 +323,7 @@ func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { return nil } -func (j *JobConsumer) Stop(context.Context) error { +func (j *consumer) Stop(context.Context) error { j.pauseCh <- struct{}{} pipe := j.pipeline.Load().(*pipeline.Pipeline) @@ -336,7 +336,7 @@ func (j *JobConsumer) Stop(context.Context) error { return nil } -func (j *JobConsumer) Pause(_ context.Context, p string) { +func (j *consumer) Pause(_ context.Context, p string) { // load atomic value pipe := j.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { @@ -364,7 +364,7 @@ func (j *JobConsumer) Pause(_ context.Context, p string) { }) } -func (j *JobConsumer) Resume(_ context.Context, p string) { +func (j *consumer) Resume(_ context.Context, p string) { // load atomic value pipe := j.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { @@ -393,7 +393,7 @@ func (j *JobConsumer) Resume(_ context.Context, p string) { }) } -func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { +func (j *consumer) handleItem(ctx context.Context, msg *Item) error { d, err := msg.pack(j.queueURL) if err != nil { return err diff --git a/plugins/jobs/drivers/sqs/item.go b/plugins/jobs/drivers/sqs/item.go index df72b2e5..996adf6c 100644 --- a/plugins/jobs/drivers/sqs/item.go +++ b/plugins/jobs/drivers/sqs/item.go @@ -192,7 +192,7 @@ func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) { }, nil } -func (j *JobConsumer) unpack(msg *types.Message) (*Item, error) { +func (j *consumer) unpack(msg *types.Message) (*Item, error) { const op = errors.Op("sqs_unpack") // reserved if _, ok := msg.Attributes[ApproximateReceiveCount]; !ok { diff --git a/plugins/jobs/drivers/sqs/listener.go b/plugins/jobs/drivers/sqs/listener.go index 9efef90d..a4280af2 100644 --- a/plugins/jobs/drivers/sqs/listener.go +++ b/plugins/jobs/drivers/sqs/listener.go @@ -18,7 +18,7 @@ const ( NonExistentQueue string = "AWS.SimpleQueueService.NonExistentQueue" ) -func (j *JobConsumer) listen(ctx context.Context) { //nolint:gocognit +func (j *consumer) listen(ctx context.Context) { //nolint:gocognit for { select { case <-j.pauseCh: diff --git a/plugins/kv/drivers/boltdb/config.go b/plugins/kv/drivers/boltdb/config.go deleted file mode 100644 index 0beb209b..00000000 --- a/plugins/kv/drivers/boltdb/config.go +++ /dev/null @@ -1,30 +0,0 @@ -package boltdb - -type Config struct { - // File is boltDB file. No need to create it by your own, - // boltdb driver is able to create the file, or read existing - File string - // Bucket to store data in boltDB - bucket string - // db file permissions - Permissions int - // timeout - Interval int `mapstructure:"interval"` -} - -// InitDefaults initializes default values for the boltdb -func (s *Config) InitDefaults() { - s.bucket = "default" - - if s.File == "" { - s.File = "rr.db" // default file name - } - - if s.Permissions == 0 { - s.Permissions = 0777 // free for all - } - - if s.Interval == 0 { - s.Interval = 60 // default is 60 seconds timeout - } -} diff --git a/plugins/kv/drivers/boltdb/driver.go b/plugins/kv/drivers/boltdb/driver.go deleted file mode 100644 index 15a5674f..00000000 --- a/plugins/kv/drivers/boltdb/driver.go +++ /dev/null @@ -1,459 +0,0 @@ -package boltdb - -import ( - "bytes" - "encoding/gob" - "os" - "strings" - "sync" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/logger" - kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" - "github.com/spiral/roadrunner/v2/utils" - bolt "go.etcd.io/bbolt" -) - -type Driver struct { - clearMu sync.RWMutex - // db instance - DB *bolt.DB - // name should be UTF-8 - bucket []byte - log logger.Logger - cfg *Config - // gc contains key which are contain timeouts - gc sync.Map - // default timeout for cache cleanup is 1 minute - timeout time.Duration - - // stop is used to stop keys GC and close boltdb connection - stop chan struct{} -} - -func NewBoltDBDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stop chan struct{}) (*Driver, error) { - const op = errors.Op("new_boltdb_driver") - - d := &Driver{ - log: log, - stop: stop, - } - - err := cfgPlugin.UnmarshalKey(key, &d.cfg) - if err != nil { - return nil, errors.E(op, err) - } - - // add default values - d.cfg.InitDefaults() - - d.bucket = []byte(d.cfg.bucket) - d.timeout = time.Duration(d.cfg.Interval) * time.Second - d.gc = sync.Map{} - - db, err := bolt.Open(d.cfg.File, os.FileMode(d.cfg.Permissions), &bolt.Options{ - Timeout: time.Second * 20, - NoGrowSync: false, - NoFreelistSync: false, - ReadOnly: false, - NoSync: false, - }) - - if err != nil { - return nil, errors.E(op, err) - } - - d.DB = db - - // create bucket if it does not exist - // tx.Commit invokes via the db.Update - err = db.Update(func(tx *bolt.Tx) error { - const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists([]byte(d.cfg.bucket)) - if err != nil { - return errors.E(op, upOp) - } - return nil - }) - - if err != nil { - return nil, errors.E(op, err) - } - - go d.startGCLoop() - - return d, nil -} - -func (d *Driver) Has(keys ...string) (map[string]bool, error) { - const op = errors.Op("boltdb_driver_has") - d.log.Debug("boltdb HAS method called", "args", keys) - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - m := make(map[string]bool, len(keys)) - - // this is readable transaction - err := d.DB.View(func(tx *bolt.Tx) error { - // Get retrieves the value for a key in the bucket. - // Returns a nil value if the key does not exist or if the key is a nested bucket. - // The returned value is only valid for the life of the transaction. - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return errors.E(op, errors.EmptyKey) - } - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - exist := b.Get([]byte(keys[i])) - if exist != nil { - m[keys[i]] = true - } - } - return nil - }) - if err != nil { - return nil, errors.E(op, err) - } - - d.log.Debug("boltdb HAS method finished") - return m, nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (d *Driver) Get(key string) ([]byte, error) { - const op = errors.Op("boltdb_driver_get") - // to get cases like " " - keyTrimmed := strings.TrimSpace(key) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - - var val []byte - err := d.DB.View(func(tx *bolt.Tx) error { - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - val = b.Get([]byte(key)) - - // try to decode values - if val != nil { - buf := bytes.NewReader(val) - decoder := gob.NewDecoder(buf) - - var i string - err := decoder.Decode(&i) - if err != nil { - // unsafe (w/o runes) convert - return errors.E(op, err) - } - - // set the value - val = []byte(i) - } - return nil - }) - if err != nil { - return nil, errors.E(op, err) - } - - return val, nil -} - -func (d *Driver) MGet(keys ...string) (map[string][]byte, error) { - const op = errors.Op("boltdb_driver_mget") - // defense - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - } - - m := make(map[string][]byte, len(keys)) - - err := d.DB.View(func(tx *bolt.Tx) error { - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - - buf := new(bytes.Buffer) - var out []byte - buf.Grow(100) - for i := range keys { - value := b.Get([]byte(keys[i])) - buf.Write(value) - // allocate enough space - dec := gob.NewDecoder(buf) - if value != nil { - err := dec.Decode(&out) - if err != nil { - return errors.E(op, err) - } - m[keys[i]] = out - buf.Reset() - out = nil - } - } - - return nil - }) - if err != nil { - return nil, errors.E(op, err) - } - - return m, nil -} - -// Set puts the K/V to the bolt -func (d *Driver) Set(items ...*kvv1.Item) error { - const op = errors.Op("boltdb_driver_set") - if items == nil { - return errors.E(op, errors.NoKeys) - } - - // start writable transaction - tx, err := d.DB.Begin(true) - if err != nil { - return errors.E(op, err) - } - defer func() { - err = tx.Commit() - if err != nil { - errRb := tx.Rollback() - if errRb != nil { - d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb) - } - } - }() - - b := tx.Bucket(d.bucket) - // use access by index to avoid copying - for i := range items { - // performance note: pass a prepared bytes slice with initial cap - // we can't move buf and gob out of loop, because we need to clear both from data - // but gob will contain (w/o re-init) the past data - buf := new(bytes.Buffer) - encoder := gob.NewEncoder(buf) - if errors.Is(errors.EmptyItem, err) { - return errors.E(op, errors.EmptyItem) - } - - // Encode value - err = encoder.Encode(&items[i].Value) - if err != nil { - return errors.E(op, err) - } - // buf.Bytes will copy the underlying slice. Take a look in case of performance problems - err = b.Put([]byte(items[i].Key), buf.Bytes()) - if err != nil { - return errors.E(op, err) - } - - // if there are no errors, and TTL > 0, we put the key with timeout to the hashmap, for future check - // we do not need mutex here, since we use sync.Map - if items[i].Timeout != "" { - // check correctness of provided TTL - _, err := time.Parse(time.RFC3339, items[i].Timeout) - if err != nil { - return errors.E(op, err) - } - // Store key TTL in the separate map - d.gc.Store(items[i].Key, items[i].Timeout) - } - - buf.Reset() - } - - return nil -} - -// Delete all keys from DB -func (d *Driver) Delete(keys ...string) error { - const op = errors.Op("boltdb_driver_delete") - if keys == nil { - return errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for _, key := range keys { - keyTrimmed := strings.TrimSpace(key) - if keyTrimmed == "" { - return errors.E(op, errors.EmptyKey) - } - } - - // start writable transaction - tx, err := d.DB.Begin(true) - if err != nil { - return errors.E(op, err) - } - - defer func() { - err = tx.Commit() - if err != nil { - errRb := tx.Rollback() - if errRb != nil { - d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb) - } - } - }() - - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - - for _, key := range keys { - err = b.Delete([]byte(key)) - if err != nil { - return errors.E(op, err) - } - } - - return nil -} - -// MExpire sets the expiration time to the key -// If key already has the expiration time, it will be overwritten -func (d *Driver) MExpire(items ...*kvv1.Item) error { - const op = errors.Op("boltdb_driver_mexpire") - for i := range items { - if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" { - return errors.E(op, errors.Str("should set timeout and at least one key")) - } - - // verify provided TTL - _, err := time.Parse(time.RFC3339, items[i].Timeout) - if err != nil { - return errors.E(op, err) - } - - d.gc.Store(items[i].Key, items[i].Timeout) - } - return nil -} - -func (d *Driver) TTL(keys ...string) (map[string]string, error) { - const op = errors.Op("boltdb_driver_ttl") - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - } - - m := make(map[string]string, len(keys)) - - for i := range keys { - if item, ok := d.gc.Load(keys[i]); ok { - // a little bit dangerous operation, but user can't store value other that kv.Item.TTL --> int64 - m[keys[i]] = item.(string) - } - } - return m, nil -} - -func (d *Driver) Clear() error { - err := d.DB.Update(func(tx *bolt.Tx) error { - err := tx.DeleteBucket(d.bucket) - if err != nil { - d.log.Error("boltdb delete bucket", "error", err) - return err - } - - _, err = tx.CreateBucket(d.bucket) - if err != nil { - d.log.Error("boltdb create bucket", "error", err) - return err - } - - return nil - }) - - if err != nil { - d.log.Error("clear transaction failed", "error", err) - return err - } - - d.clearMu.Lock() - d.gc = sync.Map{} - d.clearMu.Unlock() - - return nil -} - -// ========================= PRIVATE ================================= - -func (d *Driver) startGCLoop() { //nolint:gocognit - go func() { - t := time.NewTicker(d.timeout) - defer t.Stop() - for { - select { - case <-t.C: - d.clearMu.RLock() - - // calculate current time before loop started to be fair - now := time.Now() - d.gc.Range(func(key, value interface{}) bool { - const op = errors.Op("boltdb_plugin_gc") - k := key.(string) - v, err := time.Parse(time.RFC3339, value.(string)) - if err != nil { - return false - } - - if now.After(v) { - // time expired - d.gc.Delete(k) - d.log.Debug("key deleted", "key", k) - err := d.DB.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - err := b.Delete(utils.AsBytes(k)) - if err != nil { - return errors.E(op, err) - } - return nil - }) - if err != nil { - d.log.Error("error during the gc phase of update", "error", err) - return false - } - } - return true - }) - - d.clearMu.RUnlock() - case <-d.stop: - err := d.DB.Close() - if err != nil { - d.log.Error("error") - } - return - } - } - }() -} diff --git a/plugins/kv/drivers/boltdb/plugin.go b/plugins/kv/drivers/boltdb/plugin.go deleted file mode 100644 index c839130f..00000000 --- a/plugins/kv/drivers/boltdb/plugin.go +++ /dev/null @@ -1,71 +0,0 @@ -package boltdb - -import ( - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/common/kv" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - PluginName string = "boltdb" - RootPluginName string = "kv" -) - -// Plugin BoltDB K/V storage. -type Plugin struct { - cfgPlugin config.Configurer - // logger - log logger.Logger - // stop is used to stop keys GC and close boltdb connection - stop chan struct{} - - drivers uint -} - -func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - if !cfg.Has(RootPluginName) { - return errors.E(errors.Disabled) - } - - s.stop = make(chan struct{}) - s.log = log - s.cfgPlugin = cfg - return nil -} - -// Serve is noop here -func (s *Plugin) Serve() chan error { - return make(chan error, 1) -} - -func (s *Plugin) Stop() error { - if s.drivers > 0 { - for i := uint(0); i < s.drivers; i++ { - // send close signal to every driver - s.stop <- struct{}{} - } - } - return nil -} - -func (s *Plugin) KVConstruct(key string) (kv.Storage, error) { - const op = errors.Op("boltdb_plugin_provide") - st, err := NewBoltDBDriver(s.log, key, s.cfgPlugin, s.stop) - if err != nil { - return nil, errors.E(op, err) - } - - // save driver number to release resources after Stop - s.drivers++ - - return st, nil -} - -// Name returns plugin name -func (s *Plugin) Name() string { - return PluginName -} - -// Available interface implementation -func (s *Plugin) Available() {} diff --git a/plugins/kv/plugin.go b/plugins/kv/plugin.go index 53fade97..9a19f96c 100644 --- a/plugins/kv/plugin.go +++ b/plugins/kv/plugin.go @@ -109,6 +109,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit // config key for the particular sub-driver kv.memcached configKey := fmt.Sprintf("%s.%s", PluginName, k) // at this point we know, that driver field present in the configuration + // TODO(rustatian): refactor, made generic, with checks like in the broadcast, websockets or jobs switch v.(map[string]interface{})[driver] { case memcached: if _, ok := p.constructors[memcached]; !ok { @@ -220,5 +221,4 @@ func (p *Plugin) Name() string { } // Available interface implementation -func (p *Plugin) Available() { -} +func (p *Plugin) Available() {} -- cgit v1.2.3 From efb3efa98c8555815330274f0618bfc080f4c65c Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Thu, 26 Aug 2021 18:32:51 +0300 Subject: Move drivers to the plugin's root. Fix #771, add tests. Signed-off-by: Valery Piashchynski --- plugins/amqp/amqpjobs/config.go | 67 +++ plugins/amqp/amqpjobs/consumer.go | 512 ++++++++++++++++++++++ plugins/amqp/amqpjobs/item.go | 239 ++++++++++ plugins/amqp/amqpjobs/listener.go | 25 ++ plugins/amqp/amqpjobs/rabbit_init.go | 57 +++ plugins/amqp/amqpjobs/redial.go | 141 ++++++ plugins/amqp/plugin.go | 41 ++ plugins/beanstalk/config.go | 53 +++ plugins/beanstalk/connection.go | 223 ++++++++++ plugins/beanstalk/consumer.go | 360 +++++++++++++++ plugins/beanstalk/encode_test.go | 75 ++++ plugins/beanstalk/item.go | 147 +++++++ plugins/beanstalk/listen.go | 39 ++ plugins/beanstalk/plugin.go | 47 ++ plugins/boltdb/boltjobs/listener.go | 2 +- plugins/broadcast/plugin.go | 63 +-- plugins/ephemeral/consumer.go | 274 ++++++++++++ plugins/ephemeral/item.go | 133 ++++++ plugins/ephemeral/plugin.go | 41 ++ plugins/jobs/drivers/amqp/amqpjobs/config.go | 67 --- plugins/jobs/drivers/amqp/amqpjobs/consumer.go | 512 ---------------------- plugins/jobs/drivers/amqp/amqpjobs/item.go | 239 ---------- plugins/jobs/drivers/amqp/amqpjobs/listener.go | 25 -- plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go | 57 --- plugins/jobs/drivers/amqp/amqpjobs/redial.go | 141 ------ plugins/jobs/drivers/amqp/plugin.go | 41 -- plugins/jobs/drivers/beanstalk/config.go | 53 --- plugins/jobs/drivers/beanstalk/connection.go | 223 ---------- plugins/jobs/drivers/beanstalk/consumer.go | 360 --------------- plugins/jobs/drivers/beanstalk/encode_test.go | 75 ---- plugins/jobs/drivers/beanstalk/item.go | 147 ------- plugins/jobs/drivers/beanstalk/listen.go | 39 -- plugins/jobs/drivers/beanstalk/plugin.go | 47 -- plugins/jobs/drivers/ephemeral/consumer.go | 274 ------------ plugins/jobs/drivers/ephemeral/item.go | 133 ------ plugins/jobs/drivers/ephemeral/plugin.go | 41 -- plugins/jobs/drivers/sqs/config.go | 114 ----- plugins/jobs/drivers/sqs/consumer.go | 411 ----------------- plugins/jobs/drivers/sqs/item.go | 247 ----------- plugins/jobs/drivers/sqs/listener.go | 87 ---- plugins/jobs/drivers/sqs/plugin.go | 39 -- plugins/kv/drivers/memcached/config.go | 12 - plugins/kv/drivers/memcached/driver.go | 248 ----------- plugins/kv/drivers/memcached/plugin.go | 48 -- plugins/kv/plugin.go | 116 +---- plugins/memcached/config.go | 12 + plugins/memcached/driver.go | 248 +++++++++++ plugins/memcached/plugin.go | 48 ++ plugins/sqs/config.go | 114 +++++ plugins/sqs/consumer.go | 411 +++++++++++++++++ plugins/sqs/item.go | 247 +++++++++++ plugins/sqs/listener.go | 87 ++++ plugins/sqs/plugin.go | 39 ++ 53 files changed, 3713 insertions(+), 3828 deletions(-) create mode 100644 plugins/amqp/amqpjobs/config.go create mode 100644 plugins/amqp/amqpjobs/consumer.go create mode 100644 plugins/amqp/amqpjobs/item.go create mode 100644 plugins/amqp/amqpjobs/listener.go create mode 100644 plugins/amqp/amqpjobs/rabbit_init.go create mode 100644 plugins/amqp/amqpjobs/redial.go create mode 100644 plugins/amqp/plugin.go create mode 100644 plugins/beanstalk/config.go create mode 100644 plugins/beanstalk/connection.go create mode 100644 plugins/beanstalk/consumer.go create mode 100644 plugins/beanstalk/encode_test.go create mode 100644 plugins/beanstalk/item.go create mode 100644 plugins/beanstalk/listen.go create mode 100644 plugins/beanstalk/plugin.go create mode 100644 plugins/ephemeral/consumer.go create mode 100644 plugins/ephemeral/item.go create mode 100644 plugins/ephemeral/plugin.go delete mode 100644 plugins/jobs/drivers/amqp/amqpjobs/config.go delete mode 100644 plugins/jobs/drivers/amqp/amqpjobs/consumer.go delete mode 100644 plugins/jobs/drivers/amqp/amqpjobs/item.go delete mode 100644 plugins/jobs/drivers/amqp/amqpjobs/listener.go delete mode 100644 plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go delete mode 100644 plugins/jobs/drivers/amqp/amqpjobs/redial.go delete mode 100644 plugins/jobs/drivers/amqp/plugin.go delete mode 100644 plugins/jobs/drivers/beanstalk/config.go delete mode 100644 plugins/jobs/drivers/beanstalk/connection.go delete mode 100644 plugins/jobs/drivers/beanstalk/consumer.go delete mode 100644 plugins/jobs/drivers/beanstalk/encode_test.go delete mode 100644 plugins/jobs/drivers/beanstalk/item.go delete mode 100644 plugins/jobs/drivers/beanstalk/listen.go delete mode 100644 plugins/jobs/drivers/beanstalk/plugin.go delete mode 100644 plugins/jobs/drivers/ephemeral/consumer.go delete mode 100644 plugins/jobs/drivers/ephemeral/item.go delete mode 100644 plugins/jobs/drivers/ephemeral/plugin.go delete mode 100644 plugins/jobs/drivers/sqs/config.go delete mode 100644 plugins/jobs/drivers/sqs/consumer.go delete mode 100644 plugins/jobs/drivers/sqs/item.go delete mode 100644 plugins/jobs/drivers/sqs/listener.go delete mode 100644 plugins/jobs/drivers/sqs/plugin.go delete mode 100644 plugins/kv/drivers/memcached/config.go delete mode 100644 plugins/kv/drivers/memcached/driver.go delete mode 100644 plugins/kv/drivers/memcached/plugin.go create mode 100644 plugins/memcached/config.go create mode 100644 plugins/memcached/driver.go create mode 100644 plugins/memcached/plugin.go create mode 100644 plugins/sqs/config.go create mode 100644 plugins/sqs/consumer.go create mode 100644 plugins/sqs/item.go create mode 100644 plugins/sqs/listener.go create mode 100644 plugins/sqs/plugin.go (limited to 'plugins') diff --git a/plugins/amqp/amqpjobs/config.go b/plugins/amqp/amqpjobs/config.go new file mode 100644 index 00000000..ac2f6e53 --- /dev/null +++ b/plugins/amqp/amqpjobs/config.go @@ -0,0 +1,67 @@ +package amqpjobs + +// pipeline rabbitmq info +const ( + exchangeKey string = "exchange" + exchangeType string = "exchange_type" + queue string = "queue" + routingKey string = "routing_key" + prefetch string = "prefetch" + exclusive string = "exclusive" + priority string = "priority" + multipleAsk string = "multiple_ask" + requeueOnFail string = "requeue_on_fail" + + dlx string = "x-dead-letter-exchange" + dlxRoutingKey string = "x-dead-letter-routing-key" + dlxTTL string = "x-message-ttl" + dlxExpires string = "x-expires" + + contentType string = "application/octet-stream" +) + +type GlobalCfg struct { + Addr string `mapstructure:"addr"` +} + +// Config is used to parse pipeline configuration +type Config struct { + Prefetch int `mapstructure:"prefetch"` + Queue string `mapstructure:"queue"` + Priority int64 `mapstructure:"priority"` + Exchange string `mapstructure:"exchange"` + ExchangeType string `mapstructure:"exchange_type"` + RoutingKey string `mapstructure:"routing_key"` + Exclusive bool `mapstructure:"exclusive"` + MultipleAck bool `mapstructure:"multiple_ask"` + RequeueOnFail bool `mapstructure:"requeue_on_fail"` +} + +func (c *Config) InitDefault() { + // all options should be in sync with the pipeline defaults in the FromPipeline method + if c.ExchangeType == "" { + c.ExchangeType = "direct" + } + + if c.Exchange == "" { + c.Exchange = "amqp.default" + } + + if c.Queue == "" { + c.Queue = "default" + } + + if c.Prefetch == 0 { + c.Prefetch = 10 + } + + if c.Priority == 0 { + c.Priority = 10 + } +} + +func (c *GlobalCfg) InitDefault() { + if c.Addr == "" { + c.Addr = "amqp://guest:guest@127.0.0.1:5672/" + } +} diff --git a/plugins/amqp/amqpjobs/consumer.go b/plugins/amqp/amqpjobs/consumer.go new file mode 100644 index 00000000..1931ceaa --- /dev/null +++ b/plugins/amqp/amqpjobs/consumer.go @@ -0,0 +1,512 @@ +package amqpjobs + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + jobState "github.com/spiral/roadrunner/v2/pkg/state/job" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + pluginName string = "amqp" +) + +type consumer struct { + sync.Mutex + log logger.Logger + pq priorityqueue.Queue + eh events.Handler + + pipeline atomic.Value + + // amqp connection + conn *amqp.Connection + consumeChan *amqp.Channel + publishChan chan *amqp.Channel + consumeID string + connStr string + + retryTimeout time.Duration + // + // prefetch QoS AMQP + // + prefetch int + // + // pipeline's priority + // + priority int64 + exchangeName string + queue string + exclusive bool + exchangeType string + routingKey string + multipleAck bool + requeueOnFail bool + + listeners uint32 + delayed *int64 + stopCh chan struct{} +} + +// NewAMQPConsumer initializes rabbitmq pipeline +func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_amqp_consumer") + // we need to obtain two parts of the amqp information here. + // firs part - address to connect, it is located in the global section under the amqp pluginName + // second part - queues and other pipeline information + // if no such key - error + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) + } + + // PARSE CONFIGURATION START ------- + var pipeCfg Config + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + // PARSE CONFIGURATION END ------- + + jb := &consumer{ + log: log, + pq: pq, + eh: e, + consumeID: uuid.NewString(), + stopCh: make(chan struct{}), + // TODO to config + retryTimeout: time.Minute * 5, + priority: pipeCfg.Priority, + delayed: utils.Int64(0), + + publishChan: make(chan *amqp.Channel, 1), + routingKey: pipeCfg.RoutingKey, + queue: pipeCfg.Queue, + exchangeType: pipeCfg.ExchangeType, + exchangeName: pipeCfg.Exchange, + prefetch: pipeCfg.Prefetch, + exclusive: pipeCfg.Exclusive, + multipleAck: pipeCfg.MultipleAck, + requeueOnFail: pipeCfg.RequeueOnFail, + } + + jb.conn, err = amqp.Dial(globalCfg.Addr) + if err != nil { + return nil, errors.E(op, err) + } + + // save address + jb.connStr = globalCfg.Addr + + err = jb.initRabbitMQ() + if err != nil { + return nil, errors.E(op, err) + } + + pch, err := jb.conn.Channel() + if err != nil { + return nil, errors.E(op, err) + } + + jb.publishChan <- pch + + // run redialer and requeue listener for the connection + jb.redialer() + + return jb, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_amqp_consumer_from_pipeline") + // we need to obtain two parts of the amqp information here. + // firs part - address to connect, it is located in the global section under the amqp pluginName + // second part - queues and other pipeline information + + // only global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) + } + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + jb := &consumer{ + log: log, + eh: e, + pq: pq, + consumeID: uuid.NewString(), + stopCh: make(chan struct{}), + retryTimeout: time.Minute * 5, + delayed: utils.Int64(0), + + publishChan: make(chan *amqp.Channel, 1), + routingKey: pipeline.String(routingKey, ""), + queue: pipeline.String(queue, "default"), + exchangeType: pipeline.String(exchangeType, "direct"), + exchangeName: pipeline.String(exchangeKey, "amqp.default"), + prefetch: pipeline.Int(prefetch, 10), + priority: int64(pipeline.Int(priority, 10)), + exclusive: pipeline.Bool(exclusive, false), + multipleAck: pipeline.Bool(multipleAsk, false), + requeueOnFail: pipeline.Bool(requeueOnFail, false), + } + + jb.conn, err = amqp.Dial(globalCfg.Addr) + if err != nil { + return nil, errors.E(op, err) + } + + // save address + jb.connStr = globalCfg.Addr + + err = jb.initRabbitMQ() + if err != nil { + return nil, errors.E(op, err) + } + + pch, err := jb.conn.Channel() + if err != nil { + return nil, errors.E(op, err) + } + + jb.publishChan <- pch + + // register the pipeline + // error here is always nil + _ = jb.Register(context.Background(), pipeline) + + // run redialer for the connection + jb.redialer() + + return jb, nil +} + +func (j *consumer) Push(ctx context.Context, job *job.Job) error { + const op = errors.Op("rabbitmq_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != job.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) + } + + err := j.handleItem(ctx, fromJob(job)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { + j.pipeline.Store(p) + return nil +} + +func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("rabbit_consume") + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) + } + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + var err error + j.consumeChan, err = j.conn.Channel() + if err != nil { + return errors.E(op, err) + } + + err = j.consumeChan.Qos(j.prefetch, 0, false) + if err != nil { + return errors.E(op, err) + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // run listener + j.listener(deliv) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *consumer) State(ctx context.Context) (*jobState.State, error) { + const op = errors.Op("amqp_driver_state") + select { + case pch := <-j.publishChan: + defer func() { + j.publishChan <- pch + }() + + q, err := pch.QueueInspect(j.queue) + if err != nil { + return nil, errors.E(op, err) + } + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + return &jobState.State{ + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Queue: q.Name, + Active: int64(q.Messages), + Delayed: atomic.LoadInt64(j.delayed), + Ready: ready(atomic.LoadUint32(&j.listeners)), + }, nil + + case <-ctx.Done(): + return nil, errors.E(op, errors.TimeOut, ctx.Err()) + } +} + +func (j *consumer) Pause(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested pause on: ", p) + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + err := j.consumeChan.Cancel(j.consumeID, true) + if err != nil { + j.log.Error("cancel publish channel, forcing close", "error", err) + errCl := j.consumeChan.Close() + if errCl != nil { + j.log.Error("force close failed", "error", err) + return + } + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) Resume(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested resume on: ", p) + } + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("amqp listener already in the active state") + return + } + + var err error + j.consumeChan, err = j.conn.Channel() + if err != nil { + j.log.Error("create channel on rabbitmq connection", "error", err) + return + } + + err = j.consumeChan.Qos(j.prefetch, 0, false) + if err != nil { + j.log.Error("qos set failed", "error", err) + return + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + j.log.Error("consume operation failed", "error", err) + return + } + + // run listener + j.listener(deliv) + + // increase number of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) Stop(context.Context) error { + j.stopCh <- struct{}{} + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} + +// handleItem +func (j *consumer) handleItem(ctx context.Context, msg *Item) error { + const op = errors.Op("rabbitmq_handle_item") + select { + case pch := <-j.publishChan: + // return the channel back + defer func() { + j.publishChan <- pch + }() + + // convert + table, err := pack(msg.ID(), msg) + if err != nil { + return errors.E(op, err) + } + + const op = errors.Op("rabbitmq_handle_item") + // handle timeouts + if msg.Options.DelayDuration() > 0 { + atomic.AddInt64(j.delayed, 1) + // TODO declare separate method for this if condition + // TODO dlx cache channel?? + delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) + tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, j.exchangeName, j.queue) + _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ + dlx: j.exchangeName, + dlxRoutingKey: j.routingKey, + dlxTTL: delayMs, + dlxExpires: delayMs * 2, + }) + if err != nil { + atomic.AddInt64(j.delayed, ^int64(0)) + return errors.E(op, err) + } + + err = pch.QueueBind(tmpQ, tmpQ, j.exchangeName, false, nil) + if err != nil { + atomic.AddInt64(j.delayed, ^int64(0)) + return errors.E(op, err) + } + + // insert to the local, limited pipeline + err = pch.Publish(j.exchangeName, tmpQ, false, false, amqp.Publishing{ + Headers: table, + ContentType: contentType, + Timestamp: time.Now().UTC(), + DeliveryMode: amqp.Persistent, + Body: msg.Body(), + }) + + if err != nil { + atomic.AddInt64(j.delayed, ^int64(0)) + return errors.E(op, err) + } + + return nil + } + + // insert to the local, limited pipeline + err = pch.Publish(j.exchangeName, j.routingKey, false, false, amqp.Publishing{ + Headers: table, + ContentType: contentType, + Timestamp: time.Now(), + DeliveryMode: amqp.Persistent, + Body: msg.Body(), + }) + + if err != nil { + return errors.E(op, err) + } + + return nil + case <-ctx.Done(): + return errors.E(op, errors.TimeOut, ctx.Err()) + } +} + +func ready(r uint32) bool { + return r > 0 +} diff --git a/plugins/amqp/amqpjobs/item.go b/plugins/amqp/amqpjobs/item.go new file mode 100644 index 00000000..a8e305ea --- /dev/null +++ b/plugins/amqp/amqpjobs/item.go @@ -0,0 +1,239 @@ +package amqpjobs + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + json "github.com/json-iterator/go" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // private + // Ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery + ack func(multiply bool) error + + // Nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server. + // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel. + // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue. + // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time + nack func(multiply bool, requeue bool) error + + // requeueFn used as a pointer to the push function + requeueFn func(context.Context, *Item) error + delayed *int64 + multipleAsk bool + requeue bool +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the amqp, amqp.Table used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + } + return i.Options.ack(i.Options.multipleAsk) +} + +func (i *Item) Nack() error { + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + } + return i.Options.nack(false, i.Options.requeue) +} + +// Requeue with the provided delay, handled by the Nack +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + } + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + errNack := i.Options.nack(false, true) + if errNack != nil { + return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack) + } + + return err + } + + // ack the job + err = i.Options.ack(false) + if err != nil { + return err + } + + return nil +} + +// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ +func (j *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { + const op = errors.Op("from_delivery_convert") + item, err := j.unpack(d) + if err != nil { + return nil, errors.E(op, err) + } + + i := &Item{ + Job: item.Job, + Ident: item.Ident, + Payload: item.Payload, + Headers: item.Headers, + Options: item.Options, + } + + item.Options.ack = d.Ack + item.Options.nack = d.Nack + item.Options.delayed = j.delayed + + // requeue func + item.Options.requeueFn = j.handleItem + return i, nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +// pack job metadata into headers +func pack(id string, j *Item) (amqp.Table, error) { + headers, err := json.Marshal(j.Headers) + if err != nil { + return nil, err + } + return amqp.Table{ + job.RRID: id, + job.RRJob: j.Job, + job.RRPipeline: j.Options.Pipeline, + job.RRHeaders: headers, + job.RRDelay: j.Options.Delay, + job.RRPriority: j.Options.Priority, + }, nil +} + +// unpack restores jobs.Options +func (j *consumer) unpack(d amqp.Delivery) (*Item, error) { + item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ + multipleAsk: j.multipleAck, + requeue: j.requeueOnFail, + requeueFn: j.handleItem, + }} + + if _, ok := d.Headers[job.RRID].(string); !ok { + return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID)) + } + + item.Ident = d.Headers[job.RRID].(string) + + if _, ok := d.Headers[job.RRJob].(string); !ok { + return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob)) + } + + item.Job = d.Headers[job.RRJob].(string) + + if _, ok := d.Headers[job.RRPipeline].(string); ok { + item.Options.Pipeline = d.Headers[job.RRPipeline].(string) + } + + if h, ok := d.Headers[job.RRHeaders].([]byte); ok { + err := json.Unmarshal(h, &item.Headers) + if err != nil { + return nil, err + } + } + + if _, ok := d.Headers[job.RRDelay].(int64); ok { + item.Options.Delay = d.Headers[job.RRDelay].(int64) + } + + if _, ok := d.Headers[job.RRPriority]; !ok { + // set pipe's priority + item.Options.Priority = j.priority + } else { + item.Options.Priority = d.Headers[job.RRPriority].(int64) + } + + return item, nil +} diff --git a/plugins/amqp/amqpjobs/listener.go b/plugins/amqp/amqpjobs/listener.go new file mode 100644 index 00000000..0156d55c --- /dev/null +++ b/plugins/amqp/amqpjobs/listener.go @@ -0,0 +1,25 @@ +package amqpjobs + +import amqp "github.com/rabbitmq/amqp091-go" + +func (j *consumer) listener(deliv <-chan amqp.Delivery) { + go func() { + for { //nolint:gosimple + select { + case msg, ok := <-deliv: + if !ok { + j.log.Info("delivery channel closed, leaving the rabbit listener") + return + } + + d, err := j.fromDelivery(msg) + if err != nil { + j.log.Error("amqp delivery convert", "error", err) + continue + } + // insert job into the main priority queue + j.pq.Insert(d) + } + } + }() +} diff --git a/plugins/amqp/amqpjobs/rabbit_init.go b/plugins/amqp/amqpjobs/rabbit_init.go new file mode 100644 index 00000000..e260fabe --- /dev/null +++ b/plugins/amqp/amqpjobs/rabbit_init.go @@ -0,0 +1,57 @@ +package amqpjobs + +import ( + "github.com/spiral/errors" +) + +func (j *consumer) initRabbitMQ() error { + const op = errors.Op("jobs_plugin_rmq_init") + // Channel opens a unique, concurrent server channel to process the bulk of AMQP + // messages. Any error from methods on this receiver will render the receiver + // invalid and a new Channel should be opened. + channel, err := j.conn.Channel() + if err != nil { + return errors.E(op, err) + } + + // declare an exchange (idempotent operation) + err = channel.ExchangeDeclare( + j.exchangeName, + j.exchangeType, + true, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // verify or declare a queue + q, err := channel.QueueDeclare( + j.queue, + false, + false, + j.exclusive, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // bind queue to the exchange + err = channel.QueueBind( + q.Name, + j.routingKey, + j.exchangeName, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + return channel.Close() +} diff --git a/plugins/amqp/amqpjobs/redial.go b/plugins/amqp/amqpjobs/redial.go new file mode 100644 index 00000000..0835e3ea --- /dev/null +++ b/plugins/amqp/amqpjobs/redial.go @@ -0,0 +1,141 @@ +package amqpjobs + +import ( + "time" + + "github.com/cenkalti/backoff/v4" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" +) + +// redialer used to redial to the rabbitmq in case of the connection interrupts +func (j *consumer) redialer() { //nolint:gocognit + go func() { + const op = errors.Op("rabbitmq_redial") + + for { + select { + case err := <-j.conn.NotifyClose(make(chan *amqp.Error)): + if err == nil { + return + } + + j.Lock() + + // trash the broken publishing channel + <-j.publishChan + + t := time.Now() + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeError, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Error: err, + Start: time.Now(), + }) + + expb := backoff.NewExponentialBackOff() + // set the retry timeout (minutes) + expb.MaxElapsedTime = j.retryTimeout + operation := func() error { + j.log.Warn("rabbitmq reconnecting, caused by", "error", err) + var dialErr error + j.conn, dialErr = amqp.Dial(j.connStr) + if dialErr != nil { + return errors.E(op, dialErr) + } + + j.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") + + // re-init connection + errInit := j.initRabbitMQ() + if errInit != nil { + j.log.Error("rabbitmq dial", "error", errInit) + return errInit + } + + // redeclare consume channel + var errConnCh error + j.consumeChan, errConnCh = j.conn.Channel() + if errConnCh != nil { + return errors.E(op, errConnCh) + } + + // redeclare publish channel + pch, errPubCh := j.conn.Channel() + if errPubCh != nil { + return errors.E(op, errPubCh) + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // put the fresh publishing channel + j.publishChan <- pch + // restart listener + j.listener(deliv) + + j.log.Info("queues and subscribers redeclared successfully") + + return nil + } + + retryErr := backoff.Retry(operation, expb) + if retryErr != nil { + j.Unlock() + j.log.Error("backoff failed", "error", retryErr) + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Start: t, + Elapsed: time.Since(t), + }) + + j.Unlock() + + case <-j.stopCh: + if j.publishChan != nil { + pch := <-j.publishChan + err := pch.Close() + if err != nil { + j.log.Error("publish channel close", "error", err) + } + } + + if j.consumeChan != nil { + err := j.consumeChan.Close() + if err != nil { + j.log.Error("consume channel close", "error", err) + } + } + if j.conn != nil { + err := j.conn.Close() + if err != nil { + j.log.Error("amqp connection close", "error", err) + } + } + + return + } + } + }() +} diff --git a/plugins/amqp/plugin.go b/plugins/amqp/plugin.go new file mode 100644 index 00000000..c4f5f1da --- /dev/null +++ b/plugins/amqp/plugin.go @@ -0,0 +1,41 @@ +package amqp + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/amqp/amqpjobs" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + pluginName string = "amqp" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Name() string { + return pluginName +} + +func (p *Plugin) Available() {} + +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return amqpjobs.NewAMQPConsumer(configKey, p.log, p.cfg, e, pq) +} + +// FromPipeline constructs AMQP driver from pipeline +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return amqpjobs.FromPipeline(pipe, p.log, p.cfg, e, pq) +} diff --git a/plugins/beanstalk/config.go b/plugins/beanstalk/config.go new file mode 100644 index 00000000..a8069f5d --- /dev/null +++ b/plugins/beanstalk/config.go @@ -0,0 +1,53 @@ +package beanstalk + +import ( + "time" + + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + tubePriority string = "tube_priority" + tube string = "tube" + reserveTimeout string = "reserve_timeout" +) + +type GlobalCfg struct { + Addr string `mapstructure:"addr"` + Timeout time.Duration `mapstructure:"timeout"` +} + +func (c *GlobalCfg) InitDefault() { + if c.Addr == "" { + c.Addr = "tcp://127.0.0.1:11300" + } + + if c.Timeout == 0 { + c.Timeout = time.Second * 30 + } +} + +type Config struct { + PipePriority int64 `mapstructure:"priority"` + TubePriority *uint32 `mapstructure:"tube_priority"` + Tube string `mapstructure:"tube"` + ReserveTimeout time.Duration `mapstructure:"reserve_timeout"` +} + +func (c *Config) InitDefault() { + if c.Tube == "" { + c.Tube = "default" + } + + if c.ReserveTimeout == 0 { + c.ReserveTimeout = time.Second * 1 + } + + if c.TubePriority == nil { + c.TubePriority = utils.Uint32(0) + } + + if c.PipePriority == 0 { + c.PipePriority = 10 + } +} diff --git a/plugins/beanstalk/connection.go b/plugins/beanstalk/connection.go new file mode 100644 index 00000000..d3241b37 --- /dev/null +++ b/plugins/beanstalk/connection.go @@ -0,0 +1,223 @@ +package beanstalk + +import ( + "context" + "net" + "sync" + "time" + + "github.com/beanstalkd/go-beanstalk" + "github.com/cenkalti/backoff/v4" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +type ConnPool struct { + sync.RWMutex + + log logger.Logger + + conn *beanstalk.Conn + connT *beanstalk.Conn + ts *beanstalk.TubeSet + t *beanstalk.Tube + + network string + address string + tName string + tout time.Duration +} + +func NewConnPool(network, address, tName string, tout time.Duration, log logger.Logger) (*ConnPool, error) { + connT, err := beanstalk.DialTimeout(network, address, tout) + if err != nil { + return nil, err + } + + connTS, err := beanstalk.DialTimeout(network, address, tout) + if err != nil { + return nil, err + } + + tube := beanstalk.NewTube(connT, tName) + ts := beanstalk.NewTubeSet(connTS, tName) + + return &ConnPool{ + log: log, + network: network, + address: address, + tName: tName, + tout: tout, + conn: connTS, + connT: connT, + ts: ts, + t: tube, + }, nil +} + +// Put the payload +// TODO use the context ?? +func (cp *ConnPool) Put(_ context.Context, body []byte, pri uint32, delay, ttr time.Duration) (uint64, error) { + cp.RLock() + defer cp.RUnlock() + + // TODO(rustatian): redial based on the token + id, err := cp.t.Put(body, pri, delay, ttr) + if err != nil { + // errN contains both, err and internal checkAndRedial error + errN := cp.checkAndRedial(err) + if errN != nil { + return 0, errors.Errorf("err: %s\nerr redial: %s", err, errN) + } else { + // retry put only when we redialed + return cp.t.Put(body, pri, delay, ttr) + } + } + + return id, nil +} + +// Reserve reserves and returns a job from one of the tubes in t. If no +// job is available before time timeout has passed, Reserve returns a +// ConnError recording ErrTimeout. +// +// Typically, a client will reserve a job, perform some work, then delete +// the job with Conn.Delete. +func (cp *ConnPool) Reserve(reserveTimeout time.Duration) (uint64, []byte, error) { + cp.RLock() + defer cp.RUnlock() + + id, body, err := cp.ts.Reserve(reserveTimeout) + if err != nil { + // errN contains both, err and internal checkAndRedial error + errN := cp.checkAndRedial(err) + if errN != nil { + return 0, nil, errors.Errorf("err: %s\nerr redial: %s", err, errN) + } else { + // retry Reserve only when we redialed + return cp.ts.Reserve(reserveTimeout) + } + } + + return id, body, nil +} + +func (cp *ConnPool) Delete(_ context.Context, id uint64) error { + cp.RLock() + defer cp.RUnlock() + + err := cp.conn.Delete(id) + if err != nil { + // errN contains both, err and internal checkAndRedial error + errN := cp.checkAndRedial(err) + if errN != nil { + return errors.Errorf("err: %s\nerr redial: %s", err, errN) + } else { + // retry Delete only when we redialed + return cp.conn.Delete(id) + } + } + return nil +} + +func (cp *ConnPool) Stats(_ context.Context) (map[string]string, error) { + cp.RLock() + defer cp.RUnlock() + + stat, err := cp.conn.Stats() + if err != nil { + errR := cp.checkAndRedial(err) + if errR != nil { + return nil, errors.Errorf("err: %s\nerr redial: %s", err, errR) + } else { + return cp.conn.Stats() + } + } + + return stat, nil +} + +func (cp *ConnPool) redial() error { + const op = errors.Op("connection_pool_redial") + + cp.Lock() + // backoff here + expb := backoff.NewExponentialBackOff() + // TODO(rustatian) set via config + expb.MaxElapsedTime = time.Minute + + operation := func() error { + connT, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) + if err != nil { + return err + } + if connT == nil { + return errors.E(op, errors.Str("connectionT is nil")) + } + + connTS, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) + if err != nil { + return err + } + + if connTS == nil { + return errors.E(op, errors.Str("connectionTS is nil")) + } + + cp.t = beanstalk.NewTube(connT, cp.tName) + cp.ts = beanstalk.NewTubeSet(connTS, cp.tName) + cp.conn = connTS + cp.connT = connT + + cp.log.Info("beanstalk redial was successful") + return nil + } + + retryErr := backoff.Retry(operation, expb) + if retryErr != nil { + cp.Unlock() + return retryErr + } + cp.Unlock() + + return nil +} + +var connErrors = map[string]struct{}{"EOF": {}} + +func (cp *ConnPool) checkAndRedial(err error) error { + const op = errors.Op("connection_pool_check_redial") + switch et := err.(type) { //nolint:gocritic + // check if the error + case beanstalk.ConnError: + switch bErr := et.Err.(type) { + case *net.OpError: + cp.RUnlock() + errR := cp.redial() + cp.RLock() + // if redial failed - return + if errR != nil { + return errors.E(op, errors.Errorf("%v:%v", bErr, errR)) + } + + // if redial was successful -> continue listening + return nil + default: + if _, ok := connErrors[et.Err.Error()]; ok { + // if error is related to the broken connection - redial + cp.RUnlock() + errR := cp.redial() + cp.RLock() + // if redial failed - return + if errR != nil { + return errors.E(op, errors.Errorf("%v:%v", err, errR)) + } + // if redial was successful -> continue listening + return nil + } + } + } + + // return initial error + return err +} diff --git a/plugins/beanstalk/consumer.go b/plugins/beanstalk/consumer.go new file mode 100644 index 00000000..5ef89983 --- /dev/null +++ b/plugins/beanstalk/consumer.go @@ -0,0 +1,360 @@ +package beanstalk + +import ( + "bytes" + "context" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + jobState "github.com/spiral/roadrunner/v2/pkg/state/job" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/utils" +) + +type consumer struct { + log logger.Logger + eh events.Handler + pq priorityqueue.Queue + + pipeline atomic.Value + listeners uint32 + + // beanstalk + pool *ConnPool + addr string + network string + reserveTimeout time.Duration + reconnectCh chan struct{} + tout time.Duration + // tube name + tName string + tubePriority *uint32 + priority int64 + + stopCh chan struct{} + requeueCh chan *Item +} + +func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_beanstalk_consumer") + + // PARSE CONFIGURATION ------- + var pipeCfg Config + var globalCfg GlobalCfg + + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) + } + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + dsn := strings.Split(globalCfg.Addr, "://") + if len(dsn) != 2 { + return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) + } + + cPool, err := NewConnPool(dsn[0], dsn[1], pipeCfg.Tube, globalCfg.Timeout, log) + if err != nil { + return nil, errors.E(op, err) + } + + // initialize job consumer + jc := &consumer{ + pq: pq, + log: log, + eh: e, + pool: cPool, + network: dsn[0], + addr: dsn[1], + tout: globalCfg.Timeout, + tName: pipeCfg.Tube, + reserveTimeout: pipeCfg.ReserveTimeout, + tubePriority: pipeCfg.TubePriority, + priority: pipeCfg.PipePriority, + + // buffered with two because jobs root plugin can call Stop at the same time as Pause + stopCh: make(chan struct{}, 2), + requeueCh: make(chan *Item, 1000), + reconnectCh: make(chan struct{}, 2), + } + + return jc, nil +} + +func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_beanstalk_consumer") + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) + } + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + dsn := strings.Split(globalCfg.Addr, "://") + if len(dsn) != 2 { + return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) + } + + cPool, err := NewConnPool(dsn[0], dsn[1], pipe.String(tube, "default"), globalCfg.Timeout, log) + if err != nil { + return nil, errors.E(op, err) + } + + // initialize job consumer + jc := &consumer{ + pq: pq, + log: log, + eh: e, + pool: cPool, + network: dsn[0], + addr: dsn[1], + tout: globalCfg.Timeout, + tName: pipe.String(tube, "default"), + reserveTimeout: time.Second * time.Duration(pipe.Int(reserveTimeout, 5)), + tubePriority: utils.Uint32(uint32(pipe.Int(tubePriority, 1))), + priority: pipe.Priority(), + + // buffered with two because jobs root plugin can call Stop at the same time as Pause + stopCh: make(chan struct{}, 2), + requeueCh: make(chan *Item, 1000), + reconnectCh: make(chan struct{}, 2), + } + + return jc, nil +} +func (j *consumer) Push(ctx context.Context, jb *job.Job) error { + const op = errors.Op("beanstalk_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != jb.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) + } + + err := j.handleItem(ctx, fromJob(jb)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (j *consumer) handleItem(ctx context.Context, item *Item) error { + const op = errors.Op("beanstalk_handle_item") + + bb := new(bytes.Buffer) + bb.Grow(64) + err := item.pack(bb) + if err != nil { + return errors.E(op, err) + } + + // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L458 + // is an integer < 2**32. Jobs with smaller priority values will be + // scheduled before jobs with larger priorities. The most urgent priority is 0; + // the least urgent priority is 4,294,967,295. + // + // is an integer number of seconds to wait before putting the job in + // the ready queue. The job will be in the "delayed" state during this time. + // Maximum delay is 2**32-1. + // + // -- time to run -- is an integer number of seconds to allow a worker + // to run this job. This time is counted from the moment a worker reserves + // this job. If the worker does not delete, release, or bury the job within + // seconds, the job will time out and the server will release the job. + // The minimum ttr is 1. If the client sends 0, the server will silently + // increase the ttr to 1. Maximum ttr is 2**32-1. + id, err := j.pool.Put(ctx, bb.Bytes(), *j.tubePriority, item.Options.DelayDuration(), j.tout) + if err != nil { + errD := j.pool.Delete(ctx, id) + if errD != nil { + return errors.E(op, errors.Errorf("%s:%s", err.Error(), errD.Error())) + } + return errors.E(op, err) + } + + return nil +} + +func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { + // register the pipeline + j.pipeline.Store(p) + return nil +} + +// State https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L514 +func (j *consumer) State(ctx context.Context) (*jobState.State, error) { + const op = errors.Op("beanstalk_state") + stat, err := j.pool.Stats(ctx) + if err != nil { + return nil, errors.E(op, err) + } + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + out := &jobState.State{ + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Queue: j.tName, + Ready: ready(atomic.LoadUint32(&j.listeners)), + } + + // set stat, skip errors (replace with 0) + // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L523 + if v, err := strconv.Atoi(stat["current-jobs-ready"]); err == nil { + out.Active = int64(v) + } + + // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L525 + if v, err := strconv.Atoi(stat["current-jobs-reserved"]); err == nil { + // this is not an error, reserved in beanstalk behaves like an active jobs + out.Reserved = int64(v) + } + + // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L528 + if v, err := strconv.Atoi(stat["current-jobs-delayed"]); err == nil { + out.Delayed = int64(v) + } + + return out, nil +} + +func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("beanstalk_run") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", p.Name(), pipe.Name())) + } + + atomic.AddUint32(&j.listeners, 1) + + go j.listen() + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *consumer) Stop(context.Context) error { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + if atomic.LoadUint32(&j.listeners) == 1 { + j.stopCh <- struct{}{} + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *consumer) Pause(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + j.stopCh <- struct{}{} + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) Resume(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("sqs listener already in the active state") + return + } + + // start listener + go j.listen() + + // increase num of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func ready(r uint32) bool { + return r > 0 +} diff --git a/plugins/beanstalk/encode_test.go b/plugins/beanstalk/encode_test.go new file mode 100644 index 00000000..e43207eb --- /dev/null +++ b/plugins/beanstalk/encode_test.go @@ -0,0 +1,75 @@ +package beanstalk + +import ( + "bytes" + "crypto/rand" + "encoding/gob" + "testing" + + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/utils" +) + +func BenchmarkEncodeGob(b *testing.B) { + tb := make([]byte, 1024*10) + _, err := rand.Read(tb) + if err != nil { + b.Fatal(err) + } + + item := &Item{ + Job: "/super/test/php/class/loooooong", + Ident: "12341234-asdfasdfa-1234234-asdfasdfas", + Payload: utils.AsString(tb), + Headers: map[string][]string{"Test": {"test1", "test2"}}, + Options: &Options{ + Priority: 10, + Pipeline: "test-local-pipe", + Delay: 10, + }, + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + bb := new(bytes.Buffer) + err := gob.NewEncoder(bb).Encode(item) + if err != nil { + b.Fatal(err) + } + _ = bb.Bytes() + bb.Reset() + } +} + +func BenchmarkEncodeJsonIter(b *testing.B) { + tb := make([]byte, 1024*10) + _, err := rand.Read(tb) + if err != nil { + b.Fatal(err) + } + + item := &Item{ + Job: "/super/test/php/class/loooooong", + Ident: "12341234-asdfasdfa-1234234-asdfasdfas", + Payload: utils.AsString(tb), + Headers: map[string][]string{"Test": {"test1", "test2"}}, + Options: &Options{ + Priority: 10, + Pipeline: "test-local-pipe", + Delay: 10, + }, + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + bb, err := json.Marshal(item) + if err != nil { + b.Fatal(err) + } + _ = bb + } +} diff --git a/plugins/beanstalk/item.go b/plugins/beanstalk/item.go new file mode 100644 index 00000000..0a6cd560 --- /dev/null +++ b/plugins/beanstalk/item.go @@ -0,0 +1,147 @@ +package beanstalk + +import ( + "bytes" + "context" + "encoding/gob" + "time" + + "github.com/beanstalkd/go-beanstalk" + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // Private ================ + id uint64 + conn *beanstalk.Conn + requeueFn func(context.Context, *Item) error +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the sqs, MessageAttributes used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + return i.Options.conn.Delete(i.Options.id) +} + +func (i *Item) Nack() error { + return i.Options.conn.Delete(i.Options.id) +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + // delete old job + err = i.Options.conn.Delete(i.Options.id) + if err != nil { + return err + } + + return nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +func (i *Item) pack(b *bytes.Buffer) error { + err := gob.NewEncoder(b).Encode(i) + if err != nil { + return err + } + + return nil +} + +func (j *consumer) unpack(id uint64, data []byte, out *Item) error { + err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(out) + if err != nil { + return err + } + out.Options.conn = j.pool.conn + out.Options.id = id + out.Options.requeueFn = j.handleItem + + return nil +} diff --git a/plugins/beanstalk/listen.go b/plugins/beanstalk/listen.go new file mode 100644 index 00000000..6bb159ea --- /dev/null +++ b/plugins/beanstalk/listen.go @@ -0,0 +1,39 @@ +package beanstalk + +import ( + "github.com/beanstalkd/go-beanstalk" +) + +func (j *consumer) listen() { + for { + select { + case <-j.stopCh: + j.log.Warn("beanstalk listener stopped") + return + default: + id, body, err := j.pool.Reserve(j.reserveTimeout) + if err != nil { + if errB, ok := err.(beanstalk.ConnError); ok { + switch errB.Err { //nolint:gocritic + case beanstalk.ErrTimeout: + j.log.Info("beanstalk reserve timeout", "warn", errB.Op) + continue + } + } + // in case of other error - continue + j.log.Error("beanstalk reserve", "error", err) + continue + } + + item := &Item{} + err = j.unpack(id, body, item) + if err != nil { + j.log.Error("beanstalk unpack item", "error", err) + continue + } + + // insert job into the priority queue + j.pq.Insert(item) + } + } +} diff --git a/plugins/beanstalk/plugin.go b/plugins/beanstalk/plugin.go new file mode 100644 index 00000000..529d1474 --- /dev/null +++ b/plugins/beanstalk/plugin.go @@ -0,0 +1,47 @@ +package beanstalk + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + pluginName string = "beanstalk" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Serve() chan error { + return make(chan error) +} + +func (p *Plugin) Stop() error { + return nil +} + +func (p *Plugin) Name() string { + return pluginName +} + +func (p *Plugin) Available() {} + +func (p *Plugin) JobsConstruct(configKey string, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewBeanstalkConsumer(configKey, p.log, p.cfg, eh, pq) +} + +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipe, p.log, p.cfg, eh, pq) +} diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go index 1f8e6ff1..4a8d6cd9 100644 --- a/plugins/boltdb/boltjobs/listener.go +++ b/plugins/boltdb/boltjobs/listener.go @@ -11,7 +11,7 @@ func (c *consumer) listener() { if err != nil { panic(err) } - //cursor := tx.Cursor() + // cursor := tx.Cursor() err = tx.Commit() if err != nil { diff --git a/plugins/broadcast/plugin.go b/plugins/broadcast/plugin.go index 889dc2fa..a2390df5 100644 --- a/plugins/broadcast/plugin.go +++ b/plugins/broadcast/plugin.go @@ -4,7 +4,6 @@ import ( "fmt" "sync" - "github.com/google/uuid" endure "github.com/spiral/endure/pkg/container" "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/common/pubsub" @@ -16,9 +15,6 @@ const ( PluginName string = "broadcast" // driver is the mandatory field which should present in every storage driver string = "driver" - - redis string = "redis" - memory string = "memory" ) type Plugin struct { @@ -97,6 +93,7 @@ func (p *Plugin) Publish(m *pubsub.Message) error { } func (p *Plugin) PublishAsync(m *pubsub.Message) { + // TODO(rustatian) channel here? go func() { p.Lock() defer p.Unlock() @@ -106,7 +103,7 @@ func (p *Plugin) PublishAsync(m *pubsub.Message) { err := p.publishers[j].Publish(m) if err != nil { p.log.Error("publishAsync", "error", err) - // continue publish to other registered publishers + // continue publishing to the other registered publishers continue } } @@ -116,7 +113,7 @@ func (p *Plugin) PublishAsync(m *pubsub.Message) { }() } -func (p *Plugin) GetDriver(key string) (pubsub.SubReader, error) { //nolint:gocognit +func (p *Plugin) GetDriver(key string) (pubsub.SubReader, error) { const op = errors.Op("broadcast_plugin_get_driver") // choose a driver @@ -136,57 +133,37 @@ func (p *Plugin) GetDriver(key string) (pubsub.SubReader, error) { //nolint:goco // config key for the particular sub-driver kv.memcached configKey := fmt.Sprintf("%s.%s", PluginName, key) - switch val.(map[string]interface{})[driver] { - case memory: - if _, ok := p.constructors[memory]; !ok { - return nil, errors.E(op, errors.Errorf("no memory drivers registered, registered: %s", p.publishers)) - } - ps, err := p.constructors[memory].PSConstruct(configKey) - if err != nil { - return nil, errors.E(op, err) - } - - // save the initialized publisher channel - // for the in-memory, register new publishers - p.publishers[uuid.NewString()] = ps + drName := val.(map[string]interface{})[driver] - return ps, nil - case redis: - if _, ok := p.constructors[redis]; !ok { - return nil, errors.E(op, errors.Errorf("no redis drivers registered, registered: %s", p.publishers)) + // driver name should be a string + if drStr, ok := drName.(string); ok { + if _, ok := p.constructors[drStr]; !ok { + return nil, errors.E(op, errors.Errorf("no drivers with the requested name registered, registered: %s, requested: %s", p.publishers, drStr)) } - // first - try local configuration - switch { - case p.cfgPlugin.Has(configKey): - ps, err := p.constructors[redis].PSConstruct(configKey) + // try local config first + if p.cfgPlugin.Has(configKey) { + ps, err := p.constructors[drStr].PSConstruct(configKey) if err != nil { return nil, errors.E(op, err) } - // if section already exists, return new connection - if _, ok := p.publishers[configKey]; ok { - return ps, nil - } - - // if not - initialize a connection + // save the initialized publisher channel + // for the in-memory, register new publishers p.publishers[configKey] = ps - return ps, nil - // then try global if local does not exist - case p.cfgPlugin.Has(redis): - ps, err := p.constructors[redis].PSConstruct(configKey) + return ps, nil + } else { + // try global driver section + ps, err := p.constructors[drStr].PSConstruct(drStr) if err != nil { return nil, errors.E(op, err) } - // if section already exists, return new connection - if _, ok := p.publishers[configKey]; ok { - return ps, nil - } - - // if not - initialize a connection + // save the initialized publisher channel + // for the in-memory, register new publishers p.publishers[configKey] = ps + return ps, nil } } diff --git a/plugins/ephemeral/consumer.go b/plugins/ephemeral/consumer.go new file mode 100644 index 00000000..91b8eda9 --- /dev/null +++ b/plugins/ephemeral/consumer.go @@ -0,0 +1,274 @@ +package ephemeral + +import ( + "context" + "sync/atomic" + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + jobState "github.com/spiral/roadrunner/v2/pkg/state/job" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + prefetch string = "prefetch" + goroutinesMax uint64 = 1000 +) + +type Config struct { + Prefetch uint64 `mapstructure:"prefetch"` +} + +type consumer struct { + cfg *Config + log logger.Logger + eh events.Handler + pipeline atomic.Value + pq priorityqueue.Queue + localPrefetch chan *Item + + // time.sleep goroutines max number + goroutines uint64 + + delayed *int64 + active *int64 + + listeners uint32 + stopCh chan struct{} +} + +func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_ephemeral_pipeline") + + jb := &consumer{ + log: log, + pq: pq, + eh: eh, + goroutines: 0, + active: utils.Int64(0), + delayed: utils.Int64(0), + stopCh: make(chan struct{}, 1), + } + + err := cfg.UnmarshalKey(configKey, &jb.cfg) + if err != nil { + return nil, errors.E(op, err) + } + + if jb.cfg.Prefetch == 0 { + jb.cfg.Prefetch = 100_000 + } + + // initialize a local queue + jb.localPrefetch = make(chan *Item, jb.cfg.Prefetch) + + return jb, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) { + jb := &consumer{ + log: log, + pq: pq, + eh: eh, + goroutines: 0, + active: utils.Int64(0), + delayed: utils.Int64(0), + stopCh: make(chan struct{}, 1), + } + + // initialize a local queue + jb.localPrefetch = make(chan *Item, pipeline.Int(prefetch, 100_000)) + + return jb, nil +} + +func (j *consumer) Push(ctx context.Context, jb *job.Job) error { + const op = errors.Op("ephemeral_push") + + // check if the pipeline registered + _, ok := j.pipeline.Load().(*pipeline.Pipeline) + if !ok { + return errors.E(op, errors.Errorf("no such pipeline: %s", jb.Options.Pipeline)) + } + + err := j.handleItem(ctx, fromJob(jb)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (j *consumer) State(_ context.Context) (*jobState.State, error) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + return &jobState.State{ + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Queue: pipe.Name(), + Active: atomic.LoadInt64(j.active), + Delayed: atomic.LoadInt64(j.delayed), + Ready: ready(atomic.LoadUint32(&j.listeners)), + }, nil +} + +func (j *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { + j.pipeline.Store(pipeline) + return nil +} + +func (j *consumer) Pause(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested pause on: ", p) + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + // stop the consumer + j.stopCh <- struct{}{} + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + Elapsed: 0, + }) +} + +func (j *consumer) Resume(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested resume on: ", p) + } + + l := atomic.LoadUint32(&j.listeners) + // listener already active + if l == 1 { + j.log.Warn("listener already in the active state") + return + } + + // resume the consumer on the same channel + j.consume() + + atomic.StoreUint32(&j.listeners, 1) + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Pipeline: pipe.Name(), + Start: time.Now(), + Elapsed: 0, + }) +} + +// Run is no-op for the ephemeral +func (j *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} + +func (j *consumer) Stop(ctx context.Context) error { + const op = errors.Op("ephemeral_plugin_stop") + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + select { + // return from the consumer + case j.stopCh <- struct{}{}: + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Pipeline: pipe.Name(), + Start: time.Now(), + Elapsed: 0, + }) + + return nil + + case <-ctx.Done(): + return errors.E(op, ctx.Err()) + } +} + +func (j *consumer) handleItem(ctx context.Context, msg *Item) error { + const op = errors.Op("ephemeral_handle_request") + // handle timeouts + // theoretically, some bad user may send millions requests with a delay and produce a billion (for example) + // goroutines here. We should limit goroutines here. + if msg.Options.Delay > 0 { + // if we have 1000 goroutines waiting on the delay - reject 1001 + if atomic.LoadUint64(&j.goroutines) >= goroutinesMax { + return errors.E(op, errors.Str("max concurrency number reached")) + } + + go func(jj *Item) { + atomic.AddUint64(&j.goroutines, 1) + atomic.AddInt64(j.delayed, 1) + + time.Sleep(jj.Options.DelayDuration()) + + // send the item after timeout expired + j.localPrefetch <- jj + + atomic.AddUint64(&j.goroutines, ^uint64(0)) + }(msg) + + return nil + } + + // increase number of the active jobs + atomic.AddInt64(j.active, 1) + + // insert to the local, limited pipeline + select { + case j.localPrefetch <- msg: + return nil + case <-ctx.Done(): + return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", j.cfg.Prefetch, ctx.Err())) + } +} + +func (j *consumer) consume() { + go func() { + // redirect + for { + select { + case item, ok := <-j.localPrefetch: + if !ok { + j.log.Warn("ephemeral local prefetch queue was closed") + return + } + + // set requeue channel + item.Options.requeueFn = j.handleItem + item.Options.active = j.active + item.Options.delayed = j.delayed + + j.pq.Insert(item) + case <-j.stopCh: + return + } + } + }() +} + +func ready(r uint32) bool { + return r > 0 +} diff --git a/plugins/ephemeral/item.go b/plugins/ephemeral/item.go new file mode 100644 index 00000000..3298424d --- /dev/null +++ b/plugins/ephemeral/item.go @@ -0,0 +1,133 @@ +package ephemeral + +import ( + "context" + "sync/atomic" + "time" + + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains name of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // private + requeueFn func(context.Context, *Item) error + active *int64 + delayed *int64 +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + i.atomicallyReduceCount() + return nil +} + +func (i *Item) Nack() error { + i.atomicallyReduceCount() + return nil +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + i.atomicallyReduceCount() + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + return nil +} + +// atomicallyReduceCount reduces counter of active or delayed jobs +func (i *Item) atomicallyReduceCount() { + // if job was delayed, reduce number of the delayed jobs + if i.Options.Delay > 0 { + atomic.AddInt64(i.Options.delayed, ^int64(0)) + return + } + + // otherwise, reduce number of the active jobs + atomic.AddInt64(i.Options.active, ^int64(0)) + // noop for the in-memory +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} diff --git a/plugins/ephemeral/plugin.go b/plugins/ephemeral/plugin.go new file mode 100644 index 00000000..28495abb --- /dev/null +++ b/plugins/ephemeral/plugin.go @@ -0,0 +1,41 @@ +package ephemeral + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + PluginName string = "ephemeral" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Name() string { + return PluginName +} + +func (p *Plugin) Available() {} + +// JobsConstruct creates new ephemeral consumer from the configuration +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewJobBroker(configKey, p.log, p.cfg, e, pq) +} + +// FromPipeline creates new ephemeral consumer from the provided pipeline +func (p *Plugin) FromPipeline(pipeline *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipeline, p.log, e, pq) +} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/config.go b/plugins/jobs/drivers/amqp/amqpjobs/config.go deleted file mode 100644 index ac2f6e53..00000000 --- a/plugins/jobs/drivers/amqp/amqpjobs/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package amqpjobs - -// pipeline rabbitmq info -const ( - exchangeKey string = "exchange" - exchangeType string = "exchange_type" - queue string = "queue" - routingKey string = "routing_key" - prefetch string = "prefetch" - exclusive string = "exclusive" - priority string = "priority" - multipleAsk string = "multiple_ask" - requeueOnFail string = "requeue_on_fail" - - dlx string = "x-dead-letter-exchange" - dlxRoutingKey string = "x-dead-letter-routing-key" - dlxTTL string = "x-message-ttl" - dlxExpires string = "x-expires" - - contentType string = "application/octet-stream" -) - -type GlobalCfg struct { - Addr string `mapstructure:"addr"` -} - -// Config is used to parse pipeline configuration -type Config struct { - Prefetch int `mapstructure:"prefetch"` - Queue string `mapstructure:"queue"` - Priority int64 `mapstructure:"priority"` - Exchange string `mapstructure:"exchange"` - ExchangeType string `mapstructure:"exchange_type"` - RoutingKey string `mapstructure:"routing_key"` - Exclusive bool `mapstructure:"exclusive"` - MultipleAck bool `mapstructure:"multiple_ask"` - RequeueOnFail bool `mapstructure:"requeue_on_fail"` -} - -func (c *Config) InitDefault() { - // all options should be in sync with the pipeline defaults in the FromPipeline method - if c.ExchangeType == "" { - c.ExchangeType = "direct" - } - - if c.Exchange == "" { - c.Exchange = "amqp.default" - } - - if c.Queue == "" { - c.Queue = "default" - } - - if c.Prefetch == 0 { - c.Prefetch = 10 - } - - if c.Priority == 0 { - c.Priority = 10 - } -} - -func (c *GlobalCfg) InitDefault() { - if c.Addr == "" { - c.Addr = "amqp://guest:guest@127.0.0.1:5672/" - } -} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/consumer.go b/plugins/jobs/drivers/amqp/amqpjobs/consumer.go deleted file mode 100644 index 1931ceaa..00000000 --- a/plugins/jobs/drivers/amqp/amqpjobs/consumer.go +++ /dev/null @@ -1,512 +0,0 @@ -package amqpjobs - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/google/uuid" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" -) - -const ( - pluginName string = "amqp" -) - -type consumer struct { - sync.Mutex - log logger.Logger - pq priorityqueue.Queue - eh events.Handler - - pipeline atomic.Value - - // amqp connection - conn *amqp.Connection - consumeChan *amqp.Channel - publishChan chan *amqp.Channel - consumeID string - connStr string - - retryTimeout time.Duration - // - // prefetch QoS AMQP - // - prefetch int - // - // pipeline's priority - // - priority int64 - exchangeName string - queue string - exclusive bool - exchangeType string - routingKey string - multipleAck bool - requeueOnFail bool - - listeners uint32 - delayed *int64 - stopCh chan struct{} -} - -// NewAMQPConsumer initializes rabbitmq pipeline -func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_amqp_consumer") - // we need to obtain two parts of the amqp information here. - // firs part - address to connect, it is located in the global section under the amqp pluginName - // second part - queues and other pipeline information - // if no such key - error - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) - } - - // PARSE CONFIGURATION START ------- - var pipeCfg Config - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(configKey, &pipeCfg) - if err != nil { - return nil, errors.E(op, err) - } - - pipeCfg.InitDefault() - - err = cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - // PARSE CONFIGURATION END ------- - - jb := &consumer{ - log: log, - pq: pq, - eh: e, - consumeID: uuid.NewString(), - stopCh: make(chan struct{}), - // TODO to config - retryTimeout: time.Minute * 5, - priority: pipeCfg.Priority, - delayed: utils.Int64(0), - - publishChan: make(chan *amqp.Channel, 1), - routingKey: pipeCfg.RoutingKey, - queue: pipeCfg.Queue, - exchangeType: pipeCfg.ExchangeType, - exchangeName: pipeCfg.Exchange, - prefetch: pipeCfg.Prefetch, - exclusive: pipeCfg.Exclusive, - multipleAck: pipeCfg.MultipleAck, - requeueOnFail: pipeCfg.RequeueOnFail, - } - - jb.conn, err = amqp.Dial(globalCfg.Addr) - if err != nil { - return nil, errors.E(op, err) - } - - // save address - jb.connStr = globalCfg.Addr - - err = jb.initRabbitMQ() - if err != nil { - return nil, errors.E(op, err) - } - - pch, err := jb.conn.Channel() - if err != nil { - return nil, errors.E(op, err) - } - - jb.publishChan <- pch - - // run redialer and requeue listener for the connection - jb.redialer() - - return jb, nil -} - -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_amqp_consumer_from_pipeline") - // we need to obtain two parts of the amqp information here. - // firs part - address to connect, it is located in the global section under the amqp pluginName - // second part - queues and other pipeline information - - // only global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) - } - - // PARSE CONFIGURATION ------- - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - jb := &consumer{ - log: log, - eh: e, - pq: pq, - consumeID: uuid.NewString(), - stopCh: make(chan struct{}), - retryTimeout: time.Minute * 5, - delayed: utils.Int64(0), - - publishChan: make(chan *amqp.Channel, 1), - routingKey: pipeline.String(routingKey, ""), - queue: pipeline.String(queue, "default"), - exchangeType: pipeline.String(exchangeType, "direct"), - exchangeName: pipeline.String(exchangeKey, "amqp.default"), - prefetch: pipeline.Int(prefetch, 10), - priority: int64(pipeline.Int(priority, 10)), - exclusive: pipeline.Bool(exclusive, false), - multipleAck: pipeline.Bool(multipleAsk, false), - requeueOnFail: pipeline.Bool(requeueOnFail, false), - } - - jb.conn, err = amqp.Dial(globalCfg.Addr) - if err != nil { - return nil, errors.E(op, err) - } - - // save address - jb.connStr = globalCfg.Addr - - err = jb.initRabbitMQ() - if err != nil { - return nil, errors.E(op, err) - } - - pch, err := jb.conn.Channel() - if err != nil { - return nil, errors.E(op, err) - } - - jb.publishChan <- pch - - // register the pipeline - // error here is always nil - _ = jb.Register(context.Background(), pipeline) - - // run redialer for the connection - jb.redialer() - - return jb, nil -} - -func (j *consumer) Push(ctx context.Context, job *job.Job) error { - const op = errors.Op("rabbitmq_push") - // check if the pipeline registered - - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != job.Options.Pipeline { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) - } - - err := j.handleItem(ctx, fromJob(job)) - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - j.pipeline.Store(p) - return nil -} - -func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("rabbit_consume") - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) - } - - // protect connection (redial) - j.Lock() - defer j.Unlock() - - var err error - j.consumeChan, err = j.conn.Channel() - if err != nil { - return errors.E(op, err) - } - - err = j.consumeChan.Qos(j.prefetch, 0, false) - if err != nil { - return errors.E(op, err) - } - - // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // run listener - j.listener(deliv) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - - return nil -} - -func (j *consumer) State(ctx context.Context) (*jobState.State, error) { - const op = errors.Op("amqp_driver_state") - select { - case pch := <-j.publishChan: - defer func() { - j.publishChan <- pch - }() - - q, err := pch.QueueInspect(j.queue) - if err != nil { - return nil, errors.E(op, err) - } - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - return &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: q.Name, - Active: int64(q.Messages), - Delayed: atomic.LoadInt64(j.delayed), - Ready: ready(atomic.LoadUint32(&j.listeners)), - }, nil - - case <-ctx.Done(): - return nil, errors.E(op, errors.TimeOut, ctx.Err()) - } -} - -func (j *consumer) Pause(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested pause on: ", p) - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 0 { - j.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&j.listeners, ^uint32(0)) - - // protect connection (redial) - j.Lock() - defer j.Unlock() - - err := j.consumeChan.Cancel(j.consumeID, true) - if err != nil { - j.log.Error("cancel publish channel, forcing close", "error", err) - errCl := j.consumeChan.Close() - if errCl != nil { - j.log.Error("force close failed", "error", err) - return - } - return - } - - j.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *consumer) Resume(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested resume on: ", p) - } - - // protect connection (redial) - j.Lock() - defer j.Unlock() - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 1 { - j.log.Warn("amqp listener already in the active state") - return - } - - var err error - j.consumeChan, err = j.conn.Channel() - if err != nil { - j.log.Error("create channel on rabbitmq connection", "error", err) - return - } - - err = j.consumeChan.Qos(j.prefetch, 0, false) - if err != nil { - j.log.Error("qos set failed", "error", err) - return - } - - // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - j.log.Error("consume operation failed", "error", err) - return - } - - // run listener - j.listener(deliv) - - // increase number of listeners - atomic.AddUint32(&j.listeners, 1) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *consumer) Stop(context.Context) error { - j.stopCh <- struct{}{} - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - return nil -} - -// handleItem -func (j *consumer) handleItem(ctx context.Context, msg *Item) error { - const op = errors.Op("rabbitmq_handle_item") - select { - case pch := <-j.publishChan: - // return the channel back - defer func() { - j.publishChan <- pch - }() - - // convert - table, err := pack(msg.ID(), msg) - if err != nil { - return errors.E(op, err) - } - - const op = errors.Op("rabbitmq_handle_item") - // handle timeouts - if msg.Options.DelayDuration() > 0 { - atomic.AddInt64(j.delayed, 1) - // TODO declare separate method for this if condition - // TODO dlx cache channel?? - delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) - tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, j.exchangeName, j.queue) - _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ - dlx: j.exchangeName, - dlxRoutingKey: j.routingKey, - dlxTTL: delayMs, - dlxExpires: delayMs * 2, - }) - if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) - return errors.E(op, err) - } - - err = pch.QueueBind(tmpQ, tmpQ, j.exchangeName, false, nil) - if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) - return errors.E(op, err) - } - - // insert to the local, limited pipeline - err = pch.Publish(j.exchangeName, tmpQ, false, false, amqp.Publishing{ - Headers: table, - ContentType: contentType, - Timestamp: time.Now().UTC(), - DeliveryMode: amqp.Persistent, - Body: msg.Body(), - }) - - if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) - return errors.E(op, err) - } - - return nil - } - - // insert to the local, limited pipeline - err = pch.Publish(j.exchangeName, j.routingKey, false, false, amqp.Publishing{ - Headers: table, - ContentType: contentType, - Timestamp: time.Now(), - DeliveryMode: amqp.Persistent, - Body: msg.Body(), - }) - - if err != nil { - return errors.E(op, err) - } - - return nil - case <-ctx.Done(): - return errors.E(op, errors.TimeOut, ctx.Err()) - } -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/item.go b/plugins/jobs/drivers/amqp/amqpjobs/item.go deleted file mode 100644 index a8e305ea..00000000 --- a/plugins/jobs/drivers/amqp/amqpjobs/item.go +++ /dev/null @@ -1,239 +0,0 @@ -package amqpjobs - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - json "github.com/json-iterator/go" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // private - // Ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery - ack func(multiply bool) error - - // Nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server. - // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel. - // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue. - // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time - nack func(multiply bool, requeue bool) error - - // requeueFn used as a pointer to the push function - requeueFn func(context.Context, *Item) error - delayed *int64 - multipleAsk bool - requeue bool -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -// Not used in the amqp, amqp.Table used instead -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - return i.Options.ack(i.Options.multipleAsk) -} - -func (i *Item) Nack() error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - return i.Options.nack(false, i.Options.requeue) -} - -// Requeue with the provided delay, handled by the Nack -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - errNack := i.Options.nack(false, true) - if errNack != nil { - return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack) - } - - return err - } - - // ack the job - err = i.Options.ack(false) - if err != nil { - return err - } - - return nil -} - -// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ -func (j *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { - const op = errors.Op("from_delivery_convert") - item, err := j.unpack(d) - if err != nil { - return nil, errors.E(op, err) - } - - i := &Item{ - Job: item.Job, - Ident: item.Ident, - Payload: item.Payload, - Headers: item.Headers, - Options: item.Options, - } - - item.Options.ack = d.Ack - item.Options.nack = d.Nack - item.Options.delayed = j.delayed - - // requeue func - item.Options.requeueFn = j.handleItem - return i, nil -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} - -// pack job metadata into headers -func pack(id string, j *Item) (amqp.Table, error) { - headers, err := json.Marshal(j.Headers) - if err != nil { - return nil, err - } - return amqp.Table{ - job.RRID: id, - job.RRJob: j.Job, - job.RRPipeline: j.Options.Pipeline, - job.RRHeaders: headers, - job.RRDelay: j.Options.Delay, - job.RRPriority: j.Options.Priority, - }, nil -} - -// unpack restores jobs.Options -func (j *consumer) unpack(d amqp.Delivery) (*Item, error) { - item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ - multipleAsk: j.multipleAck, - requeue: j.requeueOnFail, - requeueFn: j.handleItem, - }} - - if _, ok := d.Headers[job.RRID].(string); !ok { - return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID)) - } - - item.Ident = d.Headers[job.RRID].(string) - - if _, ok := d.Headers[job.RRJob].(string); !ok { - return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob)) - } - - item.Job = d.Headers[job.RRJob].(string) - - if _, ok := d.Headers[job.RRPipeline].(string); ok { - item.Options.Pipeline = d.Headers[job.RRPipeline].(string) - } - - if h, ok := d.Headers[job.RRHeaders].([]byte); ok { - err := json.Unmarshal(h, &item.Headers) - if err != nil { - return nil, err - } - } - - if _, ok := d.Headers[job.RRDelay].(int64); ok { - item.Options.Delay = d.Headers[job.RRDelay].(int64) - } - - if _, ok := d.Headers[job.RRPriority]; !ok { - // set pipe's priority - item.Options.Priority = j.priority - } else { - item.Options.Priority = d.Headers[job.RRPriority].(int64) - } - - return item, nil -} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/listener.go b/plugins/jobs/drivers/amqp/amqpjobs/listener.go deleted file mode 100644 index 0156d55c..00000000 --- a/plugins/jobs/drivers/amqp/amqpjobs/listener.go +++ /dev/null @@ -1,25 +0,0 @@ -package amqpjobs - -import amqp "github.com/rabbitmq/amqp091-go" - -func (j *consumer) listener(deliv <-chan amqp.Delivery) { - go func() { - for { //nolint:gosimple - select { - case msg, ok := <-deliv: - if !ok { - j.log.Info("delivery channel closed, leaving the rabbit listener") - return - } - - d, err := j.fromDelivery(msg) - if err != nil { - j.log.Error("amqp delivery convert", "error", err) - continue - } - // insert job into the main priority queue - j.pq.Insert(d) - } - } - }() -} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go b/plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go deleted file mode 100644 index e260fabe..00000000 --- a/plugins/jobs/drivers/amqp/amqpjobs/rabbit_init.go +++ /dev/null @@ -1,57 +0,0 @@ -package amqpjobs - -import ( - "github.com/spiral/errors" -) - -func (j *consumer) initRabbitMQ() error { - const op = errors.Op("jobs_plugin_rmq_init") - // Channel opens a unique, concurrent server channel to process the bulk of AMQP - // messages. Any error from methods on this receiver will render the receiver - // invalid and a new Channel should be opened. - channel, err := j.conn.Channel() - if err != nil { - return errors.E(op, err) - } - - // declare an exchange (idempotent operation) - err = channel.ExchangeDeclare( - j.exchangeName, - j.exchangeType, - true, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // verify or declare a queue - q, err := channel.QueueDeclare( - j.queue, - false, - false, - j.exclusive, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // bind queue to the exchange - err = channel.QueueBind( - q.Name, - j.routingKey, - j.exchangeName, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - return channel.Close() -} diff --git a/plugins/jobs/drivers/amqp/amqpjobs/redial.go b/plugins/jobs/drivers/amqp/amqpjobs/redial.go deleted file mode 100644 index 0835e3ea..00000000 --- a/plugins/jobs/drivers/amqp/amqpjobs/redial.go +++ /dev/null @@ -1,141 +0,0 @@ -package amqpjobs - -import ( - "time" - - "github.com/cenkalti/backoff/v4" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" -) - -// redialer used to redial to the rabbitmq in case of the connection interrupts -func (j *consumer) redialer() { //nolint:gocognit - go func() { - const op = errors.Op("rabbitmq_redial") - - for { - select { - case err := <-j.conn.NotifyClose(make(chan *amqp.Error)): - if err == nil { - return - } - - j.Lock() - - // trash the broken publishing channel - <-j.publishChan - - t := time.Now() - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeError, - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Error: err, - Start: time.Now(), - }) - - expb := backoff.NewExponentialBackOff() - // set the retry timeout (minutes) - expb.MaxElapsedTime = j.retryTimeout - operation := func() error { - j.log.Warn("rabbitmq reconnecting, caused by", "error", err) - var dialErr error - j.conn, dialErr = amqp.Dial(j.connStr) - if dialErr != nil { - return errors.E(op, dialErr) - } - - j.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") - - // re-init connection - errInit := j.initRabbitMQ() - if errInit != nil { - j.log.Error("rabbitmq dial", "error", errInit) - return errInit - } - - // redeclare consume channel - var errConnCh error - j.consumeChan, errConnCh = j.conn.Channel() - if errConnCh != nil { - return errors.E(op, errConnCh) - } - - // redeclare publish channel - pch, errPubCh := j.conn.Channel() - if errPubCh != nil { - return errors.E(op, errPubCh) - } - - // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // put the fresh publishing channel - j.publishChan <- pch - // restart listener - j.listener(deliv) - - j.log.Info("queues and subscribers redeclared successfully") - - return nil - } - - retryErr := backoff.Retry(operation, expb) - if retryErr != nil { - j.Unlock() - j.log.Error("backoff failed", "error", retryErr) - return - } - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Start: t, - Elapsed: time.Since(t), - }) - - j.Unlock() - - case <-j.stopCh: - if j.publishChan != nil { - pch := <-j.publishChan - err := pch.Close() - if err != nil { - j.log.Error("publish channel close", "error", err) - } - } - - if j.consumeChan != nil { - err := j.consumeChan.Close() - if err != nil { - j.log.Error("consume channel close", "error", err) - } - } - if j.conn != nil { - err := j.conn.Close() - if err != nil { - j.log.Error("amqp connection close", "error", err) - } - } - - return - } - } - }() -} diff --git a/plugins/jobs/drivers/amqp/plugin.go b/plugins/jobs/drivers/amqp/plugin.go deleted file mode 100644 index 8797d20b..00000000 --- a/plugins/jobs/drivers/amqp/plugin.go +++ /dev/null @@ -1,41 +0,0 @@ -package amqp - -import ( - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/amqp/amqpjobs" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - pluginName string = "amqp" -) - -type Plugin struct { - log logger.Logger - cfg config.Configurer -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -func (p *Plugin) Name() string { - return pluginName -} - -func (p *Plugin) Available() {} - -func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return amqpjobs.NewAMQPConsumer(configKey, p.log, p.cfg, e, pq) -} - -// FromPipeline constructs AMQP driver from pipeline -func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return amqpjobs.FromPipeline(pipe, p.log, p.cfg, e, pq) -} diff --git a/plugins/jobs/drivers/beanstalk/config.go b/plugins/jobs/drivers/beanstalk/config.go deleted file mode 100644 index a8069f5d..00000000 --- a/plugins/jobs/drivers/beanstalk/config.go +++ /dev/null @@ -1,53 +0,0 @@ -package beanstalk - -import ( - "time" - - "github.com/spiral/roadrunner/v2/utils" -) - -const ( - tubePriority string = "tube_priority" - tube string = "tube" - reserveTimeout string = "reserve_timeout" -) - -type GlobalCfg struct { - Addr string `mapstructure:"addr"` - Timeout time.Duration `mapstructure:"timeout"` -} - -func (c *GlobalCfg) InitDefault() { - if c.Addr == "" { - c.Addr = "tcp://127.0.0.1:11300" - } - - if c.Timeout == 0 { - c.Timeout = time.Second * 30 - } -} - -type Config struct { - PipePriority int64 `mapstructure:"priority"` - TubePriority *uint32 `mapstructure:"tube_priority"` - Tube string `mapstructure:"tube"` - ReserveTimeout time.Duration `mapstructure:"reserve_timeout"` -} - -func (c *Config) InitDefault() { - if c.Tube == "" { - c.Tube = "default" - } - - if c.ReserveTimeout == 0 { - c.ReserveTimeout = time.Second * 1 - } - - if c.TubePriority == nil { - c.TubePriority = utils.Uint32(0) - } - - if c.PipePriority == 0 { - c.PipePriority = 10 - } -} diff --git a/plugins/jobs/drivers/beanstalk/connection.go b/plugins/jobs/drivers/beanstalk/connection.go deleted file mode 100644 index d3241b37..00000000 --- a/plugins/jobs/drivers/beanstalk/connection.go +++ /dev/null @@ -1,223 +0,0 @@ -package beanstalk - -import ( - "context" - "net" - "sync" - "time" - - "github.com/beanstalkd/go-beanstalk" - "github.com/cenkalti/backoff/v4" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -type ConnPool struct { - sync.RWMutex - - log logger.Logger - - conn *beanstalk.Conn - connT *beanstalk.Conn - ts *beanstalk.TubeSet - t *beanstalk.Tube - - network string - address string - tName string - tout time.Duration -} - -func NewConnPool(network, address, tName string, tout time.Duration, log logger.Logger) (*ConnPool, error) { - connT, err := beanstalk.DialTimeout(network, address, tout) - if err != nil { - return nil, err - } - - connTS, err := beanstalk.DialTimeout(network, address, tout) - if err != nil { - return nil, err - } - - tube := beanstalk.NewTube(connT, tName) - ts := beanstalk.NewTubeSet(connTS, tName) - - return &ConnPool{ - log: log, - network: network, - address: address, - tName: tName, - tout: tout, - conn: connTS, - connT: connT, - ts: ts, - t: tube, - }, nil -} - -// Put the payload -// TODO use the context ?? -func (cp *ConnPool) Put(_ context.Context, body []byte, pri uint32, delay, ttr time.Duration) (uint64, error) { - cp.RLock() - defer cp.RUnlock() - - // TODO(rustatian): redial based on the token - id, err := cp.t.Put(body, pri, delay, ttr) - if err != nil { - // errN contains both, err and internal checkAndRedial error - errN := cp.checkAndRedial(err) - if errN != nil { - return 0, errors.Errorf("err: %s\nerr redial: %s", err, errN) - } else { - // retry put only when we redialed - return cp.t.Put(body, pri, delay, ttr) - } - } - - return id, nil -} - -// Reserve reserves and returns a job from one of the tubes in t. If no -// job is available before time timeout has passed, Reserve returns a -// ConnError recording ErrTimeout. -// -// Typically, a client will reserve a job, perform some work, then delete -// the job with Conn.Delete. -func (cp *ConnPool) Reserve(reserveTimeout time.Duration) (uint64, []byte, error) { - cp.RLock() - defer cp.RUnlock() - - id, body, err := cp.ts.Reserve(reserveTimeout) - if err != nil { - // errN contains both, err and internal checkAndRedial error - errN := cp.checkAndRedial(err) - if errN != nil { - return 0, nil, errors.Errorf("err: %s\nerr redial: %s", err, errN) - } else { - // retry Reserve only when we redialed - return cp.ts.Reserve(reserveTimeout) - } - } - - return id, body, nil -} - -func (cp *ConnPool) Delete(_ context.Context, id uint64) error { - cp.RLock() - defer cp.RUnlock() - - err := cp.conn.Delete(id) - if err != nil { - // errN contains both, err and internal checkAndRedial error - errN := cp.checkAndRedial(err) - if errN != nil { - return errors.Errorf("err: %s\nerr redial: %s", err, errN) - } else { - // retry Delete only when we redialed - return cp.conn.Delete(id) - } - } - return nil -} - -func (cp *ConnPool) Stats(_ context.Context) (map[string]string, error) { - cp.RLock() - defer cp.RUnlock() - - stat, err := cp.conn.Stats() - if err != nil { - errR := cp.checkAndRedial(err) - if errR != nil { - return nil, errors.Errorf("err: %s\nerr redial: %s", err, errR) - } else { - return cp.conn.Stats() - } - } - - return stat, nil -} - -func (cp *ConnPool) redial() error { - const op = errors.Op("connection_pool_redial") - - cp.Lock() - // backoff here - expb := backoff.NewExponentialBackOff() - // TODO(rustatian) set via config - expb.MaxElapsedTime = time.Minute - - operation := func() error { - connT, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) - if err != nil { - return err - } - if connT == nil { - return errors.E(op, errors.Str("connectionT is nil")) - } - - connTS, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) - if err != nil { - return err - } - - if connTS == nil { - return errors.E(op, errors.Str("connectionTS is nil")) - } - - cp.t = beanstalk.NewTube(connT, cp.tName) - cp.ts = beanstalk.NewTubeSet(connTS, cp.tName) - cp.conn = connTS - cp.connT = connT - - cp.log.Info("beanstalk redial was successful") - return nil - } - - retryErr := backoff.Retry(operation, expb) - if retryErr != nil { - cp.Unlock() - return retryErr - } - cp.Unlock() - - return nil -} - -var connErrors = map[string]struct{}{"EOF": {}} - -func (cp *ConnPool) checkAndRedial(err error) error { - const op = errors.Op("connection_pool_check_redial") - switch et := err.(type) { //nolint:gocritic - // check if the error - case beanstalk.ConnError: - switch bErr := et.Err.(type) { - case *net.OpError: - cp.RUnlock() - errR := cp.redial() - cp.RLock() - // if redial failed - return - if errR != nil { - return errors.E(op, errors.Errorf("%v:%v", bErr, errR)) - } - - // if redial was successful -> continue listening - return nil - default: - if _, ok := connErrors[et.Err.Error()]; ok { - // if error is related to the broken connection - redial - cp.RUnlock() - errR := cp.redial() - cp.RLock() - // if redial failed - return - if errR != nil { - return errors.E(op, errors.Errorf("%v:%v", err, errR)) - } - // if redial was successful -> continue listening - return nil - } - } - } - - // return initial error - return err -} diff --git a/plugins/jobs/drivers/beanstalk/consumer.go b/plugins/jobs/drivers/beanstalk/consumer.go deleted file mode 100644 index 5ef89983..00000000 --- a/plugins/jobs/drivers/beanstalk/consumer.go +++ /dev/null @@ -1,360 +0,0 @@ -package beanstalk - -import ( - "bytes" - "context" - "strconv" - "strings" - "sync/atomic" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" -) - -type consumer struct { - log logger.Logger - eh events.Handler - pq priorityqueue.Queue - - pipeline atomic.Value - listeners uint32 - - // beanstalk - pool *ConnPool - addr string - network string - reserveTimeout time.Duration - reconnectCh chan struct{} - tout time.Duration - // tube name - tName string - tubePriority *uint32 - priority int64 - - stopCh chan struct{} - requeueCh chan *Item -} - -func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_beanstalk_consumer") - - // PARSE CONFIGURATION ------- - var pipeCfg Config - var globalCfg GlobalCfg - - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) - } - - err := cfg.UnmarshalKey(configKey, &pipeCfg) - if err != nil { - return nil, errors.E(op, err) - } - - pipeCfg.InitDefault() - - err = cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - dsn := strings.Split(globalCfg.Addr, "://") - if len(dsn) != 2 { - return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) - } - - cPool, err := NewConnPool(dsn[0], dsn[1], pipeCfg.Tube, globalCfg.Timeout, log) - if err != nil { - return nil, errors.E(op, err) - } - - // initialize job consumer - jc := &consumer{ - pq: pq, - log: log, - eh: e, - pool: cPool, - network: dsn[0], - addr: dsn[1], - tout: globalCfg.Timeout, - tName: pipeCfg.Tube, - reserveTimeout: pipeCfg.ReserveTimeout, - tubePriority: pipeCfg.TubePriority, - priority: pipeCfg.PipePriority, - - // buffered with two because jobs root plugin can call Stop at the same time as Pause - stopCh: make(chan struct{}, 2), - requeueCh: make(chan *Item, 1000), - reconnectCh: make(chan struct{}, 2), - } - - return jc, nil -} - -func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_beanstalk_consumer") - - // PARSE CONFIGURATION ------- - var globalCfg GlobalCfg - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) - } - - err := cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - dsn := strings.Split(globalCfg.Addr, "://") - if len(dsn) != 2 { - return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) - } - - cPool, err := NewConnPool(dsn[0], dsn[1], pipe.String(tube, "default"), globalCfg.Timeout, log) - if err != nil { - return nil, errors.E(op, err) - } - - // initialize job consumer - jc := &consumer{ - pq: pq, - log: log, - eh: e, - pool: cPool, - network: dsn[0], - addr: dsn[1], - tout: globalCfg.Timeout, - tName: pipe.String(tube, "default"), - reserveTimeout: time.Second * time.Duration(pipe.Int(reserveTimeout, 5)), - tubePriority: utils.Uint32(uint32(pipe.Int(tubePriority, 1))), - priority: pipe.Priority(), - - // buffered with two because jobs root plugin can call Stop at the same time as Pause - stopCh: make(chan struct{}, 2), - requeueCh: make(chan *Item, 1000), - reconnectCh: make(chan struct{}, 2), - } - - return jc, nil -} -func (j *consumer) Push(ctx context.Context, jb *job.Job) error { - const op = errors.Op("beanstalk_push") - // check if the pipeline registered - - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != jb.Options.Pipeline { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) - } - - err := j.handleItem(ctx, fromJob(jb)) - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (j *consumer) handleItem(ctx context.Context, item *Item) error { - const op = errors.Op("beanstalk_handle_item") - - bb := new(bytes.Buffer) - bb.Grow(64) - err := item.pack(bb) - if err != nil { - return errors.E(op, err) - } - - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L458 - // is an integer < 2**32. Jobs with smaller priority values will be - // scheduled before jobs with larger priorities. The most urgent priority is 0; - // the least urgent priority is 4,294,967,295. - // - // is an integer number of seconds to wait before putting the job in - // the ready queue. The job will be in the "delayed" state during this time. - // Maximum delay is 2**32-1. - // - // -- time to run -- is an integer number of seconds to allow a worker - // to run this job. This time is counted from the moment a worker reserves - // this job. If the worker does not delete, release, or bury the job within - // seconds, the job will time out and the server will release the job. - // The minimum ttr is 1. If the client sends 0, the server will silently - // increase the ttr to 1. Maximum ttr is 2**32-1. - id, err := j.pool.Put(ctx, bb.Bytes(), *j.tubePriority, item.Options.DelayDuration(), j.tout) - if err != nil { - errD := j.pool.Delete(ctx, id) - if errD != nil { - return errors.E(op, errors.Errorf("%s:%s", err.Error(), errD.Error())) - } - return errors.E(op, err) - } - - return nil -} - -func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - // register the pipeline - j.pipeline.Store(p) - return nil -} - -// State https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L514 -func (j *consumer) State(ctx context.Context) (*jobState.State, error) { - const op = errors.Op("beanstalk_state") - stat, err := j.pool.Stats(ctx) - if err != nil { - return nil, errors.E(op, err) - } - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - out := &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: j.tName, - Ready: ready(atomic.LoadUint32(&j.listeners)), - } - - // set stat, skip errors (replace with 0) - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L523 - if v, err := strconv.Atoi(stat["current-jobs-ready"]); err == nil { - out.Active = int64(v) - } - - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L525 - if v, err := strconv.Atoi(stat["current-jobs-reserved"]); err == nil { - // this is not an error, reserved in beanstalk behaves like an active jobs - out.Reserved = int64(v) - } - - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L528 - if v, err := strconv.Atoi(stat["current-jobs-delayed"]); err == nil { - out.Delayed = int64(v) - } - - return out, nil -} - -func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("beanstalk_run") - // check if the pipeline registered - - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", p.Name(), pipe.Name())) - } - - atomic.AddUint32(&j.listeners, 1) - - go j.listen() - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - - return nil -} - -func (j *consumer) Stop(context.Context) error { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - if atomic.LoadUint32(&j.listeners) == 1 { - j.stopCh <- struct{}{} - } - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - - return nil -} - -func (j *consumer) Pause(_ context.Context, p string) { - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) - return - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 0 { - j.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&j.listeners, ^uint32(0)) - - j.stopCh <- struct{}{} - - j.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *consumer) Resume(_ context.Context, p string) { - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) - return - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 1 { - j.log.Warn("sqs listener already in the active state") - return - } - - // start listener - go j.listen() - - // increase num of listeners - atomic.AddUint32(&j.listeners, 1) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/jobs/drivers/beanstalk/encode_test.go b/plugins/jobs/drivers/beanstalk/encode_test.go deleted file mode 100644 index e43207eb..00000000 --- a/plugins/jobs/drivers/beanstalk/encode_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package beanstalk - -import ( - "bytes" - "crypto/rand" - "encoding/gob" - "testing" - - json "github.com/json-iterator/go" - "github.com/spiral/roadrunner/v2/utils" -) - -func BenchmarkEncodeGob(b *testing.B) { - tb := make([]byte, 1024*10) - _, err := rand.Read(tb) - if err != nil { - b.Fatal(err) - } - - item := &Item{ - Job: "/super/test/php/class/loooooong", - Ident: "12341234-asdfasdfa-1234234-asdfasdfas", - Payload: utils.AsString(tb), - Headers: map[string][]string{"Test": {"test1", "test2"}}, - Options: &Options{ - Priority: 10, - Pipeline: "test-local-pipe", - Delay: 10, - }, - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - bb := new(bytes.Buffer) - err := gob.NewEncoder(bb).Encode(item) - if err != nil { - b.Fatal(err) - } - _ = bb.Bytes() - bb.Reset() - } -} - -func BenchmarkEncodeJsonIter(b *testing.B) { - tb := make([]byte, 1024*10) - _, err := rand.Read(tb) - if err != nil { - b.Fatal(err) - } - - item := &Item{ - Job: "/super/test/php/class/loooooong", - Ident: "12341234-asdfasdfa-1234234-asdfasdfas", - Payload: utils.AsString(tb), - Headers: map[string][]string{"Test": {"test1", "test2"}}, - Options: &Options{ - Priority: 10, - Pipeline: "test-local-pipe", - Delay: 10, - }, - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - bb, err := json.Marshal(item) - if err != nil { - b.Fatal(err) - } - _ = bb - } -} diff --git a/plugins/jobs/drivers/beanstalk/item.go b/plugins/jobs/drivers/beanstalk/item.go deleted file mode 100644 index 0a6cd560..00000000 --- a/plugins/jobs/drivers/beanstalk/item.go +++ /dev/null @@ -1,147 +0,0 @@ -package beanstalk - -import ( - "bytes" - "context" - "encoding/gob" - "time" - - "github.com/beanstalkd/go-beanstalk" - json "github.com/json-iterator/go" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // Private ================ - id uint64 - conn *beanstalk.Conn - requeueFn func(context.Context, *Item) error -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -// Not used in the sqs, MessageAttributes used instead -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - return i.Options.conn.Delete(i.Options.id) -} - -func (i *Item) Nack() error { - return i.Options.conn.Delete(i.Options.id) -} - -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - return err - } - - // delete old job - err = i.Options.conn.Delete(i.Options.id) - if err != nil { - return err - } - - return nil -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} - -func (i *Item) pack(b *bytes.Buffer) error { - err := gob.NewEncoder(b).Encode(i) - if err != nil { - return err - } - - return nil -} - -func (j *consumer) unpack(id uint64, data []byte, out *Item) error { - err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(out) - if err != nil { - return err - } - out.Options.conn = j.pool.conn - out.Options.id = id - out.Options.requeueFn = j.handleItem - - return nil -} diff --git a/plugins/jobs/drivers/beanstalk/listen.go b/plugins/jobs/drivers/beanstalk/listen.go deleted file mode 100644 index 6bb159ea..00000000 --- a/plugins/jobs/drivers/beanstalk/listen.go +++ /dev/null @@ -1,39 +0,0 @@ -package beanstalk - -import ( - "github.com/beanstalkd/go-beanstalk" -) - -func (j *consumer) listen() { - for { - select { - case <-j.stopCh: - j.log.Warn("beanstalk listener stopped") - return - default: - id, body, err := j.pool.Reserve(j.reserveTimeout) - if err != nil { - if errB, ok := err.(beanstalk.ConnError); ok { - switch errB.Err { //nolint:gocritic - case beanstalk.ErrTimeout: - j.log.Info("beanstalk reserve timeout", "warn", errB.Op) - continue - } - } - // in case of other error - continue - j.log.Error("beanstalk reserve", "error", err) - continue - } - - item := &Item{} - err = j.unpack(id, body, item) - if err != nil { - j.log.Error("beanstalk unpack item", "error", err) - continue - } - - // insert job into the priority queue - j.pq.Insert(item) - } - } -} diff --git a/plugins/jobs/drivers/beanstalk/plugin.go b/plugins/jobs/drivers/beanstalk/plugin.go deleted file mode 100644 index 529d1474..00000000 --- a/plugins/jobs/drivers/beanstalk/plugin.go +++ /dev/null @@ -1,47 +0,0 @@ -package beanstalk - -import ( - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - pluginName string = "beanstalk" -) - -type Plugin struct { - log logger.Logger - cfg config.Configurer -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -func (p *Plugin) Serve() chan error { - return make(chan error) -} - -func (p *Plugin) Stop() error { - return nil -} - -func (p *Plugin) Name() string { - return pluginName -} - -func (p *Plugin) Available() {} - -func (p *Plugin) JobsConstruct(configKey string, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return NewBeanstalkConsumer(configKey, p.log, p.cfg, eh, pq) -} - -func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return FromPipeline(pipe, p.log, p.cfg, eh, pq) -} diff --git a/plugins/jobs/drivers/ephemeral/consumer.go b/plugins/jobs/drivers/ephemeral/consumer.go deleted file mode 100644 index 91b8eda9..00000000 --- a/plugins/jobs/drivers/ephemeral/consumer.go +++ /dev/null @@ -1,274 +0,0 @@ -package ephemeral - -import ( - "context" - "sync/atomic" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" -) - -const ( - prefetch string = "prefetch" - goroutinesMax uint64 = 1000 -) - -type Config struct { - Prefetch uint64 `mapstructure:"prefetch"` -} - -type consumer struct { - cfg *Config - log logger.Logger - eh events.Handler - pipeline atomic.Value - pq priorityqueue.Queue - localPrefetch chan *Item - - // time.sleep goroutines max number - goroutines uint64 - - delayed *int64 - active *int64 - - listeners uint32 - stopCh chan struct{} -} - -func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_ephemeral_pipeline") - - jb := &consumer{ - log: log, - pq: pq, - eh: eh, - goroutines: 0, - active: utils.Int64(0), - delayed: utils.Int64(0), - stopCh: make(chan struct{}, 1), - } - - err := cfg.UnmarshalKey(configKey, &jb.cfg) - if err != nil { - return nil, errors.E(op, err) - } - - if jb.cfg.Prefetch == 0 { - jb.cfg.Prefetch = 100_000 - } - - // initialize a local queue - jb.localPrefetch = make(chan *Item, jb.cfg.Prefetch) - - return jb, nil -} - -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) { - jb := &consumer{ - log: log, - pq: pq, - eh: eh, - goroutines: 0, - active: utils.Int64(0), - delayed: utils.Int64(0), - stopCh: make(chan struct{}, 1), - } - - // initialize a local queue - jb.localPrefetch = make(chan *Item, pipeline.Int(prefetch, 100_000)) - - return jb, nil -} - -func (j *consumer) Push(ctx context.Context, jb *job.Job) error { - const op = errors.Op("ephemeral_push") - - // check if the pipeline registered - _, ok := j.pipeline.Load().(*pipeline.Pipeline) - if !ok { - return errors.E(op, errors.Errorf("no such pipeline: %s", jb.Options.Pipeline)) - } - - err := j.handleItem(ctx, fromJob(jb)) - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (j *consumer) State(_ context.Context) (*jobState.State, error) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - return &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: pipe.Name(), - Active: atomic.LoadInt64(j.active), - Delayed: atomic.LoadInt64(j.delayed), - Ready: ready(atomic.LoadUint32(&j.listeners)), - }, nil -} - -func (j *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { - j.pipeline.Store(pipeline) - return nil -} - -func (j *consumer) Pause(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested pause on: ", p) - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 0 { - j.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&j.listeners, ^uint32(0)) - - // stop the consumer - j.stopCh <- struct{}{} - - j.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - Elapsed: 0, - }) -} - -func (j *consumer) Resume(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested resume on: ", p) - } - - l := atomic.LoadUint32(&j.listeners) - // listener already active - if l == 1 { - j.log.Warn("listener already in the active state") - return - } - - // resume the consumer on the same channel - j.consume() - - atomic.StoreUint32(&j.listeners, 1) - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Pipeline: pipe.Name(), - Start: time.Now(), - Elapsed: 0, - }) -} - -// Run is no-op for the ephemeral -func (j *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - return nil -} - -func (j *consumer) Stop(ctx context.Context) error { - const op = errors.Op("ephemeral_plugin_stop") - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - select { - // return from the consumer - case j.stopCh <- struct{}{}: - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Pipeline: pipe.Name(), - Start: time.Now(), - Elapsed: 0, - }) - - return nil - - case <-ctx.Done(): - return errors.E(op, ctx.Err()) - } -} - -func (j *consumer) handleItem(ctx context.Context, msg *Item) error { - const op = errors.Op("ephemeral_handle_request") - // handle timeouts - // theoretically, some bad user may send millions requests with a delay and produce a billion (for example) - // goroutines here. We should limit goroutines here. - if msg.Options.Delay > 0 { - // if we have 1000 goroutines waiting on the delay - reject 1001 - if atomic.LoadUint64(&j.goroutines) >= goroutinesMax { - return errors.E(op, errors.Str("max concurrency number reached")) - } - - go func(jj *Item) { - atomic.AddUint64(&j.goroutines, 1) - atomic.AddInt64(j.delayed, 1) - - time.Sleep(jj.Options.DelayDuration()) - - // send the item after timeout expired - j.localPrefetch <- jj - - atomic.AddUint64(&j.goroutines, ^uint64(0)) - }(msg) - - return nil - } - - // increase number of the active jobs - atomic.AddInt64(j.active, 1) - - // insert to the local, limited pipeline - select { - case j.localPrefetch <- msg: - return nil - case <-ctx.Done(): - return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", j.cfg.Prefetch, ctx.Err())) - } -} - -func (j *consumer) consume() { - go func() { - // redirect - for { - select { - case item, ok := <-j.localPrefetch: - if !ok { - j.log.Warn("ephemeral local prefetch queue was closed") - return - } - - // set requeue channel - item.Options.requeueFn = j.handleItem - item.Options.active = j.active - item.Options.delayed = j.delayed - - j.pq.Insert(item) - case <-j.stopCh: - return - } - } - }() -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/jobs/drivers/ephemeral/item.go b/plugins/jobs/drivers/ephemeral/item.go deleted file mode 100644 index 3298424d..00000000 --- a/plugins/jobs/drivers/ephemeral/item.go +++ /dev/null @@ -1,133 +0,0 @@ -package ephemeral - -import ( - "context" - "sync/atomic" - "time" - - json "github.com/json-iterator/go" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -type Item struct { - // Job contains name of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // private - requeueFn func(context.Context, *Item) error - active *int64 - delayed *int64 -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - i.atomicallyReduceCount() - return nil -} - -func (i *Item) Nack() error { - i.atomicallyReduceCount() - return nil -} - -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - i.atomicallyReduceCount() - - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - return err - } - - return nil -} - -// atomicallyReduceCount reduces counter of active or delayed jobs -func (i *Item) atomicallyReduceCount() { - // if job was delayed, reduce number of the delayed jobs - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - return - } - - // otherwise, reduce number of the active jobs - atomic.AddInt64(i.Options.active, ^int64(0)) - // noop for the in-memory -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} diff --git a/plugins/jobs/drivers/ephemeral/plugin.go b/plugins/jobs/drivers/ephemeral/plugin.go deleted file mode 100644 index 28495abb..00000000 --- a/plugins/jobs/drivers/ephemeral/plugin.go +++ /dev/null @@ -1,41 +0,0 @@ -package ephemeral - -import ( - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - PluginName string = "ephemeral" -) - -type Plugin struct { - log logger.Logger - cfg config.Configurer -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -func (p *Plugin) Name() string { - return PluginName -} - -func (p *Plugin) Available() {} - -// JobsConstruct creates new ephemeral consumer from the configuration -func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return NewJobBroker(configKey, p.log, p.cfg, e, pq) -} - -// FromPipeline creates new ephemeral consumer from the provided pipeline -func (p *Plugin) FromPipeline(pipeline *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return FromPipeline(pipeline, p.log, e, pq) -} diff --git a/plugins/jobs/drivers/sqs/config.go b/plugins/jobs/drivers/sqs/config.go deleted file mode 100644 index 9b2a1ca8..00000000 --- a/plugins/jobs/drivers/sqs/config.go +++ /dev/null @@ -1,114 +0,0 @@ -package sqs - -import "github.com/aws/aws-sdk-go-v2/aws" - -const ( - attributes string = "attributes" - tags string = "tags" - queue string = "queue" - pref string = "prefetch" - visibility string = "visibility_timeout" - waitTime string = "wait_time" -) - -type GlobalCfg struct { - Key string `mapstructure:"key"` - Secret string `mapstructure:"secret"` - Region string `mapstructure:"region"` - SessionToken string `mapstructure:"session_token"` - Endpoint string `mapstructure:"endpoint"` -} - -// Config is used to parse pipeline configuration -type Config struct { - // The duration (in seconds) that the received messages are hidden from subsequent - // retrieve requests after being retrieved by a ReceiveMessage request. - VisibilityTimeout int32 `mapstructure:"visibility_timeout"` - // The duration (in seconds) for which the call waits for a message to arrive - // in the queue before returning. If a message is available, the call returns - // sooner than WaitTimeSeconds. If no messages are available and the wait time - // expires, the call returns successfully with an empty list of messages. - WaitTimeSeconds int32 `mapstructure:"wait_time_seconds"` - // Prefetch is the maximum number of messages to return. Amazon SQS never returns more messages - // than this value (however, fewer messages might be returned). Valid values: 1 to - // 10. Default: 1. - Prefetch int32 `mapstructure:"prefetch"` - // The name of the new queue. The following limits apply to this name: - // - // * A queue - // name can have up to 80 characters. - // - // * Valid values: alphanumeric characters, - // hyphens (-), and underscores (_). - // - // * A FIFO queue name must end with the .fifo - // suffix. - // - // Queue URLs and names are case-sensitive. - // - // This member is required. - Queue *string `mapstructure:"queue"` - - // A map of attributes with their corresponding values. The following lists the - // names, descriptions, and values of the special request parameters that the - // CreateQueue action uses. - // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html - Attributes map[string]string `mapstructure:"attributes"` - - // From amazon docs: - // Add cost allocation tags to the specified Amazon SQS queue. For an overview, see - // Tagging Your Amazon SQS Queues - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) - // in the Amazon SQS Developer Guide. When you use queue tags, keep the following - // guidelines in mind: - // - // * Adding more than 50 tags to a queue isn't recommended. - // - // * - // Tags don't have any semantic meaning. Amazon SQS interprets tags as character - // strings. - // - // * Tags are case-sensitive. - // - // * A new tag with a key identical to that - // of an existing tag overwrites the existing tag. - // - // For a full list of tag - // restrictions, see Quotas related to queues - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) - // in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you - // must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account - // permissions don't apply to this action. For more information, see Grant - // cross-account permissions to a role and a user name - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) - // in the Amazon SQS Developer Guide. - Tags map[string]string `mapstructure:"tags"` -} - -func (c *GlobalCfg) InitDefault() { - if c.Endpoint == "" { - c.Endpoint = "http://127.0.0.1:9324" - } -} - -func (c *Config) InitDefault() { - if c.Queue == nil { - c.Queue = aws.String("default") - } - - if c.Prefetch == 0 || c.Prefetch > 10 { - c.Prefetch = 10 - } - - if c.WaitTimeSeconds == 0 { - c.WaitTimeSeconds = 5 - } - - if c.Attributes == nil { - c.Attributes = make(map[string]string) - } - - if c.Tags == nil { - c.Tags = make(map[string]string) - } -} diff --git a/plugins/jobs/drivers/sqs/consumer.go b/plugins/jobs/drivers/sqs/consumer.go deleted file mode 100644 index 23203190..00000000 --- a/plugins/jobs/drivers/sqs/consumer.go +++ /dev/null @@ -1,411 +0,0 @@ -package sqs - -import ( - "context" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/aws/retry" - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/sqs" - "github.com/aws/aws-sdk-go-v2/service/sqs/types" - "github.com/google/uuid" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - cfgPlugin "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -type consumer struct { - sync.Mutex - pq priorityqueue.Queue - log logger.Logger - eh events.Handler - pipeline atomic.Value - - // connection info - key string - secret string - sessionToken string - region string - endpoint string - queue *string - messageGroupID string - waitTime int32 - prefetch int32 - visibilityTimeout int32 - - // if user invoke several resume operations - listeners uint32 - - // queue optional parameters - attributes map[string]string - tags map[string]string - - client *sqs.Client - queueURL *string - - pauseCh chan struct{} -} - -func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_sqs_consumer") - - // if no such key - error - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section")) - } - - // PARSE CONFIGURATION ------- - var pipeCfg Config - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(configKey, &pipeCfg) - if err != nil { - return nil, errors.E(op, err) - } - - pipeCfg.InitDefault() - - err = cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // initialize job consumer - jb := &consumer{ - pq: pq, - log: log, - eh: e, - messageGroupID: uuid.NewString(), - attributes: pipeCfg.Attributes, - tags: pipeCfg.Tags, - queue: pipeCfg.Queue, - prefetch: pipeCfg.Prefetch, - visibilityTimeout: pipeCfg.VisibilityTimeout, - waitTime: pipeCfg.WaitTimeSeconds, - region: globalCfg.Region, - key: globalCfg.Key, - sessionToken: globalCfg.SessionToken, - secret: globalCfg.Secret, - endpoint: globalCfg.Endpoint, - pauseCh: make(chan struct{}, 1), - } - - // PARSE CONFIGURATION ------- - - awsConf, err := config.LoadDefaultConfig(context.Background(), - config.WithRegion(globalCfg.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken))) - if err != nil { - return nil, errors.E(op, err) - } - - // config with retries - jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) { - o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) { - opts.MaxAttempts = 60 - }) - }) - - out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags}) - if err != nil { - return nil, errors.E(op, err) - } - - // assign a queue URL - jb.queueURL = out.QueueUrl - - // To successfully create a new queue, you must provide a - // queue name that adheres to the limits related to queues - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) - // and is unique within the scope of your queues. After you create a queue, you - // must wait at least one second after the queue is created to be able to use the <------------ - // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require - time.Sleep(time.Second * 2) - - return jb, nil -} - -func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_sqs_consumer") - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section")) - } - - // PARSE CONFIGURATION ------- - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - attr := make(map[string]string) - err = pipe.Map(attributes, attr) - if err != nil { - return nil, errors.E(op, err) - } - - tg := make(map[string]string) - err = pipe.Map(tags, tg) - if err != nil { - return nil, errors.E(op, err) - } - - // initialize job consumer - jb := &consumer{ - pq: pq, - log: log, - eh: e, - messageGroupID: uuid.NewString(), - attributes: attr, - tags: tg, - queue: aws.String(pipe.String(queue, "default")), - prefetch: int32(pipe.Int(pref, 10)), - visibilityTimeout: int32(pipe.Int(visibility, 0)), - waitTime: int32(pipe.Int(waitTime, 0)), - region: globalCfg.Region, - key: globalCfg.Key, - sessionToken: globalCfg.SessionToken, - secret: globalCfg.Secret, - endpoint: globalCfg.Endpoint, - pauseCh: make(chan struct{}, 1), - } - - // PARSE CONFIGURATION ------- - - awsConf, err := config.LoadDefaultConfig(context.Background(), - config.WithRegion(globalCfg.Region), - config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken))) - if err != nil { - return nil, errors.E(op, err) - } - - // config with retries - jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) { - o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) { - opts.MaxAttempts = 60 - }) - }) - - out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags}) - if err != nil { - return nil, errors.E(op, err) - } - - // assign a queue URL - jb.queueURL = out.QueueUrl - - // To successfully create a new queue, you must provide a - // queue name that adheres to the limits related to queues - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) - // and is unique within the scope of your queues. After you create a queue, you - // must wait at least one second after the queue is created to be able to use the <------------ - // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require - time.Sleep(time.Second * 2) - - return jb, nil -} - -func (j *consumer) Push(ctx context.Context, jb *job.Job) error { - const op = errors.Op("sqs_push") - // check if the pipeline registered - - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != jb.Options.Pipeline { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) - } - - // The length of time, in seconds, for which to delay a specific message. Valid - // values: 0 to 900. Maximum: 15 minutes. - if jb.Options.Delay > 900 { - return errors.E(op, errors.Errorf("unable to push, maximum possible delay is 900 seconds (15 minutes), provided: %d", jb.Options.Delay)) - } - - err := j.handleItem(ctx, fromJob(jb)) - if err != nil { - return errors.E(op, err) - } - return nil -} - -func (j *consumer) State(ctx context.Context) (*jobState.State, error) { - const op = errors.Op("sqs_state") - attr, err := j.client.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{ - QueueUrl: j.queueURL, - AttributeNames: []types.QueueAttributeName{ - types.QueueAttributeNameApproximateNumberOfMessages, - types.QueueAttributeNameApproximateNumberOfMessagesDelayed, - types.QueueAttributeNameApproximateNumberOfMessagesNotVisible, - }, - }) - - if err != nil { - return nil, errors.E(op, err) - } - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - out := &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: *j.queueURL, - Ready: ready(atomic.LoadUint32(&j.listeners)), - } - - nom, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessages)]) - if err == nil { - out.Active = int64(nom) - } - - delayed, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessagesDelayed)]) - if err == nil { - out.Delayed = int64(delayed) - } - - nv, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessagesNotVisible)]) - if err == nil { - out.Reserved = int64(nv) - } - - return out, nil -} - -func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - j.pipeline.Store(p) - return nil -} - -func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("sqs_run") - - j.Lock() - defer j.Unlock() - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) - } - - atomic.AddUint32(&j.listeners, 1) - - // start listener - go j.listen(context.Background()) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - - return nil -} - -func (j *consumer) Stop(context.Context) error { - j.pauseCh <- struct{}{} - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) - return nil -} - -func (j *consumer) Pause(_ context.Context, p string) { - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) - return - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 0 { - j.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&j.listeners, ^uint32(0)) - - // stop consume - j.pauseCh <- struct{}{} - - j.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *consumer) Resume(_ context.Context, p string) { - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) - return - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 1 { - j.log.Warn("sqs listener already in the active state") - return - } - - // start listener - go j.listen(context.Background()) - - // increase num of listeners - atomic.AddUint32(&j.listeners, 1) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: time.Now(), - }) -} - -func (j *consumer) handleItem(ctx context.Context, msg *Item) error { - d, err := msg.pack(j.queueURL) - if err != nil { - return err - } - _, err = j.client.SendMessage(ctx, d) - if err != nil { - return err - } - - return nil -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/jobs/drivers/sqs/item.go b/plugins/jobs/drivers/sqs/item.go deleted file mode 100644 index 996adf6c..00000000 --- a/plugins/jobs/drivers/sqs/item.go +++ /dev/null @@ -1,247 +0,0 @@ -package sqs - -import ( - "context" - "strconv" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/sqs" - "github.com/aws/aws-sdk-go-v2/service/sqs/types" - json "github.com/json-iterator/go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -const ( - StringType string = "String" - NumberType string = "Number" - BinaryType string = "Binary" - ApproximateReceiveCount string = "ApproximateReceiveCount" -) - -var itemAttributes = []string{ - job.RRJob, - job.RRDelay, - job.RRPriority, - job.RRHeaders, -} - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // Private ================ - approxReceiveCount int64 - queue *string - receiptHandler *string - client *sqs.Client - requeueFn func(context.Context, *Item) error -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -// Not used in the sqs, MessageAttributes used instead -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - _, err := i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ - QueueUrl: i.Options.queue, - ReceiptHandle: i.Options.receiptHandler, - }) - - if err != nil { - return err - } - - return nil -} - -func (i *Item) Nack() error { - // requeue message - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - return err - } - - _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ - QueueUrl: i.Options.queue, - ReceiptHandle: i.Options.receiptHandler, - }) - - if err != nil { - return err - } - - return nil -} - -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - // requeue message - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - return err - } - - // Delete job from the queue only after successful requeue - _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ - QueueUrl: i.Options.queue, - ReceiptHandle: i.Options.receiptHandler, - }) - - if err != nil { - return err - } - - return nil -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} - -func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) { - // pack headers map - data, err := json.Marshal(i.Headers) - if err != nil { - return nil, err - } - - return &sqs.SendMessageInput{ - MessageBody: aws.String(i.Payload), - QueueUrl: queue, - DelaySeconds: int32(i.Options.Delay), - MessageAttributes: map[string]types.MessageAttributeValue{ - job.RRJob: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Job)}, - job.RRDelay: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Delay)))}, - job.RRHeaders: {DataType: aws.String(BinaryType), BinaryValue: data, BinaryListValues: nil, StringListValues: nil, StringValue: nil}, - job.RRPriority: {DataType: aws.String(NumberType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Priority)))}, - }, - }, nil -} - -func (j *consumer) unpack(msg *types.Message) (*Item, error) { - const op = errors.Op("sqs_unpack") - // reserved - if _, ok := msg.Attributes[ApproximateReceiveCount]; !ok { - return nil, errors.E(op, errors.Str("failed to unpack the ApproximateReceiveCount attribute")) - } - - for i := 0; i < len(itemAttributes); i++ { - if _, ok := msg.MessageAttributes[itemAttributes[i]]; !ok { - return nil, errors.E(op, errors.Errorf("missing queue attribute: %s", itemAttributes[i])) - } - } - - var h map[string][]string - err := json.Unmarshal(msg.MessageAttributes[job.RRHeaders].BinaryValue, &h) - if err != nil { - return nil, err - } - - delay, err := strconv.Atoi(*msg.MessageAttributes[job.RRDelay].StringValue) - if err != nil { - return nil, errors.E(op, err) - } - - priority, err := strconv.Atoi(*msg.MessageAttributes[job.RRPriority].StringValue) - if err != nil { - return nil, errors.E(op, err) - } - - recCount, err := strconv.Atoi(msg.Attributes[ApproximateReceiveCount]) - if err != nil { - return nil, errors.E(op, err) - } - - item := &Item{ - Job: *msg.MessageAttributes[job.RRJob].StringValue, - Payload: *msg.Body, - Headers: h, - Options: &Options{ - Delay: int64(delay), - Priority: int64(priority), - - // private - approxReceiveCount: int64(recCount), - client: j.client, - queue: j.queueURL, - receiptHandler: msg.ReceiptHandle, - requeueFn: j.handleItem, - }, - } - - return item, nil -} diff --git a/plugins/jobs/drivers/sqs/listener.go b/plugins/jobs/drivers/sqs/listener.go deleted file mode 100644 index a4280af2..00000000 --- a/plugins/jobs/drivers/sqs/listener.go +++ /dev/null @@ -1,87 +0,0 @@ -package sqs - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go-v2/service/sqs" - "github.com/aws/aws-sdk-go-v2/service/sqs/types" - "github.com/aws/smithy-go" -) - -const ( - // All - get all message attribute names - All string = "All" - - // NonExistentQueue AWS error code - NonExistentQueue string = "AWS.SimpleQueueService.NonExistentQueue" -) - -func (j *consumer) listen(ctx context.Context) { //nolint:gocognit - for { - select { - case <-j.pauseCh: - j.log.Warn("sqs listener stopped") - return - default: - message, err := j.client.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ - QueueUrl: j.queueURL, - MaxNumberOfMessages: j.prefetch, - AttributeNames: []types.QueueAttributeName{types.QueueAttributeName(ApproximateReceiveCount)}, - MessageAttributeNames: []string{All}, - // The new value for the message's visibility timeout (in seconds). Values range: 0 - // to 43200. Maximum: 12 hours. - VisibilityTimeout: j.visibilityTimeout, - WaitTimeSeconds: j.waitTime, - }) - - if err != nil { - if oErr, ok := (err).(*smithy.OperationError); ok { - if rErr, ok := oErr.Err.(*http.ResponseError); ok { - if apiErr, ok := rErr.Err.(*smithy.GenericAPIError); ok { - // in case of NonExistentQueue - recreate the queue - if apiErr.Code == NonExistentQueue { - j.log.Error("receive message", "error code", apiErr.ErrorCode(), "message", apiErr.ErrorMessage(), "error fault", apiErr.ErrorFault()) - _, err = j.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: j.queue, Attributes: j.attributes, Tags: j.tags}) - if err != nil { - j.log.Error("create queue", "error", err) - } - // To successfully create a new queue, you must provide a - // queue name that adheres to the limits related to the queues - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) - // and is unique within the scope of your queues. After you create a queue, you - // must wait at least one second after the queue is created to be able to use the <------------ - // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require - time.Sleep(time.Second * 2) - continue - } - } - } - } - - j.log.Error("receive message", "error", err) - continue - } - - for i := 0; i < len(message.Messages); i++ { - m := message.Messages[i] - item, err := j.unpack(&m) - if err != nil { - _, errD := j.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ - QueueUrl: j.queueURL, - ReceiptHandle: m.ReceiptHandle, - }) - if errD != nil { - j.log.Error("message unpack, failed to delete the message from the queue", "error", err) - } - - j.log.Error("message unpack", "error", err) - continue - } - - j.pq.Insert(item) - } - } - } -} diff --git a/plugins/jobs/drivers/sqs/plugin.go b/plugins/jobs/drivers/sqs/plugin.go deleted file mode 100644 index 54f61ff5..00000000 --- a/plugins/jobs/drivers/sqs/plugin.go +++ /dev/null @@ -1,39 +0,0 @@ -package sqs - -import ( - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - pluginName string = "sqs" -) - -type Plugin struct { - log logger.Logger - cfg config.Configurer -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -func (p *Plugin) Available() {} - -func (p *Plugin) Name() string { - return pluginName -} - -func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return NewSQSConsumer(configKey, p.log, p.cfg, e, pq) -} - -func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return FromPipeline(pipe, p.log, p.cfg, e, pq) -} diff --git a/plugins/kv/drivers/memcached/config.go b/plugins/kv/drivers/memcached/config.go deleted file mode 100644 index 6d413790..00000000 --- a/plugins/kv/drivers/memcached/config.go +++ /dev/null @@ -1,12 +0,0 @@ -package memcached - -type Config struct { - // Addr is url for memcached, 11211 port is used by default - Addr []string -} - -func (s *Config) InitDefaults() { - if s.Addr == nil { - s.Addr = []string{"127.0.0.1:11211"} // default url for memcached - } -} diff --git a/plugins/kv/drivers/memcached/driver.go b/plugins/kv/drivers/memcached/driver.go deleted file mode 100644 index e24747fe..00000000 --- a/plugins/kv/drivers/memcached/driver.go +++ /dev/null @@ -1,248 +0,0 @@ -package memcached - -import ( - "strings" - "time" - - "github.com/bradfitz/gomemcache/memcache" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/logger" - kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" -) - -type Driver struct { - client *memcache.Client - log logger.Logger - cfg *Config -} - -// NewMemcachedDriver returns a memcache client using the provided server(s) -// with equal weight. If a server is listed multiple times, -// it gets a proportional amount of weight. -func NewMemcachedDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) { - const op = errors.Op("new_memcached_driver") - - s := &Driver{ - log: log, - } - - err := cfgPlugin.UnmarshalKey(key, &s.cfg) - if err != nil { - return nil, errors.E(op, err) - } - - s.cfg.InitDefaults() - - m := memcache.New(s.cfg.Addr...) - s.client = m - - return s, nil -} - -// Has checks the key for existence -func (d *Driver) Has(keys ...string) (map[string]bool, error) { - const op = errors.Op("memcached_plugin_has") - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - m := make(map[string]bool, len(keys)) - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - exist, err := d.client.Get(keys[i]) - - if err != nil { - // ErrCacheMiss means that a Get failed because the item wasn't present. - if err == memcache.ErrCacheMiss { - continue - } - return nil, errors.E(op, err) - } - if exist != nil { - m[keys[i]] = true - } - } - return m, nil -} - -// Get gets the item for the given key. ErrCacheMiss is returned for a -// memcache cache miss. The key must be at most 250 bytes in length. -func (d *Driver) Get(key string) ([]byte, error) { - const op = errors.Op("memcached_plugin_get") - // to get cases like " " - keyTrimmed := strings.TrimSpace(key) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - data, err := d.client.Get(key) - if err != nil { - // ErrCacheMiss means that a Get failed because the item wasn't present. - if err == memcache.ErrCacheMiss { - return nil, nil - } - return nil, errors.E(op, err) - } - if data != nil { - // return the value by the key - return data.Value, nil - } - // data is nil by some reason and error also nil - return nil, nil -} - -// MGet return map with key -- string -// and map value as value -- []byte -func (d *Driver) MGet(keys ...string) (map[string][]byte, error) { - const op = errors.Op("memcached_plugin_mget") - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - } - - m := make(map[string][]byte, len(keys)) - for i := range keys { - // Here also MultiGet - data, err := d.client.Get(keys[i]) - if err != nil { - // ErrCacheMiss means that a Get failed because the item wasn't present. - if err == memcache.ErrCacheMiss { - continue - } - return nil, errors.E(op, err) - } - if data != nil { - m[keys[i]] = data.Value - } - } - - return m, nil -} - -// Set sets the KV pairs. Keys should be 250 bytes maximum -// TTL: -// Expiration is the cache expiration time, in seconds: either a relative -// time from now (up to 1 month), or an absolute Unix epoch time. -// Zero means the Item has no expiration time. -func (d *Driver) Set(items ...*kvv1.Item) error { - const op = errors.Op("memcached_plugin_set") - if items == nil { - return errors.E(op, errors.NoKeys) - } - - for i := range items { - if items[i] == nil { - return errors.E(op, errors.EmptyItem) - } - - // pre-allocate item - memcachedItem := &memcache.Item{ - Key: items[i].Key, - // unsafe convert - Value: items[i].Value, - Flags: 0, - } - - // add additional TTL in case of TTL isn't empty - if items[i].Timeout != "" { - // verify the TTL - t, err := time.Parse(time.RFC3339, items[i].Timeout) - if err != nil { - return err - } - memcachedItem.Expiration = int32(t.Unix()) - } - - err := d.client.Set(memcachedItem) - if err != nil { - return err - } - } - - return nil -} - -// MExpire Expiration is the cache expiration time, in seconds: either a relative -// time from now (up to 1 month), or an absolute Unix epoch time. -// Zero means the Item has no expiration time. -func (d *Driver) MExpire(items ...*kvv1.Item) error { - const op = errors.Op("memcached_plugin_mexpire") - for i := range items { - if items[i] == nil { - continue - } - if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" { - return errors.E(op, errors.Str("should set timeout and at least one key")) - } - - // verify provided TTL - t, err := time.Parse(time.RFC3339, items[i].Timeout) - if err != nil { - return errors.E(op, err) - } - - // Touch updates the expiry for the given key. The seconds parameter is either - // a Unix timestamp or, if seconds is less than 1 month, the number of seconds - // into the future at which time the item will expire. Zero means the item has - // no expiration time. ErrCacheMiss is returned if the key is not in the cache. - // The key must be at most 250 bytes in length. - err = d.client.Touch(items[i].Key, int32(t.Unix())) - if err != nil { - return errors.E(op, err) - } - } - return nil -} - -// TTL return time in seconds (int32) for a given keys -func (d *Driver) TTL(_ ...string) (map[string]string, error) { - const op = errors.Op("memcached_plugin_ttl") - return nil, errors.E(op, errors.Str("not valid request for memcached, see https://github.com/memcached/memcached/issues/239")) -} - -func (d *Driver) Delete(keys ...string) error { - const op = errors.Op("memcached_plugin_has") - if keys == nil { - return errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return errors.E(op, errors.EmptyKey) - } - } - - for i := range keys { - err := d.client.Delete(keys[i]) - // ErrCacheMiss means that a Get failed because the item wasn't present. - if err != nil { - // ErrCacheMiss means that a Get failed because the item wasn't present. - if err == memcache.ErrCacheMiss { - continue - } - return errors.E(op, err) - } - } - return nil -} - -func (d *Driver) Clear() error { - err := d.client.DeleteAll() - if err != nil { - d.log.Error("flush_all operation failed", "error", err) - return err - } - - return nil -} diff --git a/plugins/kv/drivers/memcached/plugin.go b/plugins/kv/drivers/memcached/plugin.go deleted file mode 100644 index 59a2b7cb..00000000 --- a/plugins/kv/drivers/memcached/plugin.go +++ /dev/null @@ -1,48 +0,0 @@ -package memcached - -import ( - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/common/kv" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - PluginName string = "memcached" - RootPluginName string = "kv" -) - -type Plugin struct { - // config plugin - cfgPlugin config.Configurer - // logger - log logger.Logger -} - -func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - if !cfg.Has(RootPluginName) { - return errors.E(errors.Disabled) - } - - s.cfgPlugin = cfg - s.log = log - return nil -} - -// Name returns plugin user-friendly name -func (s *Plugin) Name() string { - return PluginName -} - -// Available interface implementation -func (s *Plugin) Available() {} - -func (s *Plugin) KVConstruct(key string) (kv.Storage, error) { - const op = errors.Op("boltdb_plugin_provide") - st, err := NewMemcachedDriver(s.log, key, s.cfgPlugin) - if err != nil { - return nil, errors.E(op, err) - } - - return st, nil -} diff --git a/plugins/kv/plugin.go b/plugins/kv/plugin.go index 9a19f96c..c6ca96c3 100644 --- a/plugins/kv/plugin.go +++ b/plugins/kv/plugin.go @@ -16,11 +16,6 @@ const PluginName string = "kv" const ( // driver is the mandatory field which should present in every storage driver string = "driver" - - memcached string = "memcached" - boltdb string = "boltdb" - redis string = "redis" - memory string = "memory" ) // Plugin for the unified storage @@ -52,40 +47,14 @@ func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error { return nil } -func (p *Plugin) Serve() chan error { //nolint:gocognit +func (p *Plugin) Serve() chan error { errCh := make(chan error, 1) const op = errors.Op("kv_plugin_serve") // key - storage name in the config // value - storage - /* - For example we can have here 2 storages (but they are not pre-configured) - for the boltdb and memcached - We should provide here the actual configs for the all requested storages - kv: - boltdb-south: - driver: boltdb - dir: "tests/rr-bolt" - file: "rr.db" - bucket: "rr" - permissions: 777 - ttl: 40s - - boltdb-north: - driver: boltdb - dir: "tests/rr-bolt" - file: "rr.db" - bucket: "rr" - permissions: 777 - ttl: 40s - - memcached: - driver: memcached - addr: [ "127.0.0.1:11211" ] - - - For this config we should have 3 constructors: memory, boltdb and memcached but 4 KVs: default, boltdb-south, boltdb-north and memcached - when user requests for example boltdb-south, we should provide that particular preconfigured storage - */ + // For this config we should have 3 constructors: memory, boltdb and memcached but 4 KVs: default, boltdb-south, boltdb-north and memcached + // when user requests for example boltdb-south, we should provide that particular preconfigured storage + for k, v := range p.cfg.Data { // for example if the key not properly formatted (yaml) if v == nil { @@ -109,30 +78,16 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit // config key for the particular sub-driver kv.memcached configKey := fmt.Sprintf("%s.%s", PluginName, k) // at this point we know, that driver field present in the configuration - // TODO(rustatian): refactor, made generic, with checks like in the broadcast, websockets or jobs - switch v.(map[string]interface{})[driver] { - case memcached: - if _, ok := p.constructors[memcached]; !ok { - p.log.Warn("no memcached constructors registered", "registered", p.constructors) - continue - } - - storage, err := p.constructors[memcached].KVConstruct(configKey) - if err != nil { - errCh <- errors.E(op, err) - return errCh - } - - // save the storage - p.storages[k] = storage + drName := v.(map[string]interface{})[driver] - case boltdb: - if _, ok := p.constructors[boltdb]; !ok { - p.log.Warn("no boltdb constructors registered", "registered", p.constructors) + // driver name should be a string + if drStr, ok := drName.(string); ok { + if _, ok := p.constructors[drStr]; !ok { + p.log.Warn("no constructors registered", "requested constructor", drStr, "registered", p.constructors) continue } - storage, err := p.constructors[boltdb].KVConstruct(configKey) + storage, err := p.constructors[drStr].KVConstruct(configKey) if err != nil { errCh <- errors.E(op, err) return errCh @@ -140,56 +95,9 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit // save the storage p.storages[k] = storage - case memory: - if _, ok := p.constructors[memory]; !ok { - p.log.Warn("no in-memory constructors registered", "registered", p.constructors) - continue - } - - storage, err := p.constructors[memory].KVConstruct(configKey) - if err != nil { - errCh <- errors.E(op, err) - return errCh - } - - // save the storage - p.storages[k] = storage - case redis: - if _, ok := p.constructors[redis]; !ok { - p.log.Warn("no redis constructors registered", "registered", p.constructors) - continue - } - - // first - try local configuration - switch { - case p.cfgPlugin.Has(configKey): - storage, err := p.constructors[redis].KVConstruct(configKey) - if err != nil { - errCh <- errors.E(op, err) - return errCh - } - - // save the storage - p.storages[k] = storage - case p.cfgPlugin.Has(redis): - storage, err := p.constructors[redis].KVConstruct(configKey) - if err != nil { - errCh <- errors.E(op, err) - return errCh - } - - // save the storage - p.storages[k] = storage - continue - default: - // otherwise - error, no local or global config - p.log.Warn("no global or local redis configuration provided", "key", configKey) - continue - } - - default: - p.log.Error("unknown storage", errors.E(op, errors.Errorf("unknown storage %s", v.(map[string]interface{})[driver]))) } + + continue } return errCh diff --git a/plugins/memcached/config.go b/plugins/memcached/config.go new file mode 100644 index 00000000..6d413790 --- /dev/null +++ b/plugins/memcached/config.go @@ -0,0 +1,12 @@ +package memcached + +type Config struct { + // Addr is url for memcached, 11211 port is used by default + Addr []string +} + +func (s *Config) InitDefaults() { + if s.Addr == nil { + s.Addr = []string{"127.0.0.1:11211"} // default url for memcached + } +} diff --git a/plugins/memcached/driver.go b/plugins/memcached/driver.go new file mode 100644 index 00000000..e24747fe --- /dev/null +++ b/plugins/memcached/driver.go @@ -0,0 +1,248 @@ +package memcached + +import ( + "strings" + "time" + + "github.com/bradfitz/gomemcache/memcache" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/logger" + kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" +) + +type Driver struct { + client *memcache.Client + log logger.Logger + cfg *Config +} + +// NewMemcachedDriver returns a memcache client using the provided server(s) +// with equal weight. If a server is listed multiple times, +// it gets a proportional amount of weight. +func NewMemcachedDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) { + const op = errors.Op("new_memcached_driver") + + s := &Driver{ + log: log, + } + + err := cfgPlugin.UnmarshalKey(key, &s.cfg) + if err != nil { + return nil, errors.E(op, err) + } + + s.cfg.InitDefaults() + + m := memcache.New(s.cfg.Addr...) + s.client = m + + return s, nil +} + +// Has checks the key for existence +func (d *Driver) Has(keys ...string) (map[string]bool, error) { + const op = errors.Op("memcached_plugin_has") + if keys == nil { + return nil, errors.E(op, errors.NoKeys) + } + m := make(map[string]bool, len(keys)) + for i := range keys { + keyTrimmed := strings.TrimSpace(keys[i]) + if keyTrimmed == "" { + return nil, errors.E(op, errors.EmptyKey) + } + exist, err := d.client.Get(keys[i]) + + if err != nil { + // ErrCacheMiss means that a Get failed because the item wasn't present. + if err == memcache.ErrCacheMiss { + continue + } + return nil, errors.E(op, err) + } + if exist != nil { + m[keys[i]] = true + } + } + return m, nil +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a +// memcache cache miss. The key must be at most 250 bytes in length. +func (d *Driver) Get(key string) ([]byte, error) { + const op = errors.Op("memcached_plugin_get") + // to get cases like " " + keyTrimmed := strings.TrimSpace(key) + if keyTrimmed == "" { + return nil, errors.E(op, errors.EmptyKey) + } + data, err := d.client.Get(key) + if err != nil { + // ErrCacheMiss means that a Get failed because the item wasn't present. + if err == memcache.ErrCacheMiss { + return nil, nil + } + return nil, errors.E(op, err) + } + if data != nil { + // return the value by the key + return data.Value, nil + } + // data is nil by some reason and error also nil + return nil, nil +} + +// MGet return map with key -- string +// and map value as value -- []byte +func (d *Driver) MGet(keys ...string) (map[string][]byte, error) { + const op = errors.Op("memcached_plugin_mget") + if keys == nil { + return nil, errors.E(op, errors.NoKeys) + } + + // should not be empty keys + for i := range keys { + keyTrimmed := strings.TrimSpace(keys[i]) + if keyTrimmed == "" { + return nil, errors.E(op, errors.EmptyKey) + } + } + + m := make(map[string][]byte, len(keys)) + for i := range keys { + // Here also MultiGet + data, err := d.client.Get(keys[i]) + if err != nil { + // ErrCacheMiss means that a Get failed because the item wasn't present. + if err == memcache.ErrCacheMiss { + continue + } + return nil, errors.E(op, err) + } + if data != nil { + m[keys[i]] = data.Value + } + } + + return m, nil +} + +// Set sets the KV pairs. Keys should be 250 bytes maximum +// TTL: +// Expiration is the cache expiration time, in seconds: either a relative +// time from now (up to 1 month), or an absolute Unix epoch time. +// Zero means the Item has no expiration time. +func (d *Driver) Set(items ...*kvv1.Item) error { + const op = errors.Op("memcached_plugin_set") + if items == nil { + return errors.E(op, errors.NoKeys) + } + + for i := range items { + if items[i] == nil { + return errors.E(op, errors.EmptyItem) + } + + // pre-allocate item + memcachedItem := &memcache.Item{ + Key: items[i].Key, + // unsafe convert + Value: items[i].Value, + Flags: 0, + } + + // add additional TTL in case of TTL isn't empty + if items[i].Timeout != "" { + // verify the TTL + t, err := time.Parse(time.RFC3339, items[i].Timeout) + if err != nil { + return err + } + memcachedItem.Expiration = int32(t.Unix()) + } + + err := d.client.Set(memcachedItem) + if err != nil { + return err + } + } + + return nil +} + +// MExpire Expiration is the cache expiration time, in seconds: either a relative +// time from now (up to 1 month), or an absolute Unix epoch time. +// Zero means the Item has no expiration time. +func (d *Driver) MExpire(items ...*kvv1.Item) error { + const op = errors.Op("memcached_plugin_mexpire") + for i := range items { + if items[i] == nil { + continue + } + if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" { + return errors.E(op, errors.Str("should set timeout and at least one key")) + } + + // verify provided TTL + t, err := time.Parse(time.RFC3339, items[i].Timeout) + if err != nil { + return errors.E(op, err) + } + + // Touch updates the expiry for the given key. The seconds parameter is either + // a Unix timestamp or, if seconds is less than 1 month, the number of seconds + // into the future at which time the item will expire. Zero means the item has + // no expiration time. ErrCacheMiss is returned if the key is not in the cache. + // The key must be at most 250 bytes in length. + err = d.client.Touch(items[i].Key, int32(t.Unix())) + if err != nil { + return errors.E(op, err) + } + } + return nil +} + +// TTL return time in seconds (int32) for a given keys +func (d *Driver) TTL(_ ...string) (map[string]string, error) { + const op = errors.Op("memcached_plugin_ttl") + return nil, errors.E(op, errors.Str("not valid request for memcached, see https://github.com/memcached/memcached/issues/239")) +} + +func (d *Driver) Delete(keys ...string) error { + const op = errors.Op("memcached_plugin_has") + if keys == nil { + return errors.E(op, errors.NoKeys) + } + + // should not be empty keys + for i := range keys { + keyTrimmed := strings.TrimSpace(keys[i]) + if keyTrimmed == "" { + return errors.E(op, errors.EmptyKey) + } + } + + for i := range keys { + err := d.client.Delete(keys[i]) + // ErrCacheMiss means that a Get failed because the item wasn't present. + if err != nil { + // ErrCacheMiss means that a Get failed because the item wasn't present. + if err == memcache.ErrCacheMiss { + continue + } + return errors.E(op, err) + } + } + return nil +} + +func (d *Driver) Clear() error { + err := d.client.DeleteAll() + if err != nil { + d.log.Error("flush_all operation failed", "error", err) + return err + } + + return nil +} diff --git a/plugins/memcached/plugin.go b/plugins/memcached/plugin.go new file mode 100644 index 00000000..59a2b7cb --- /dev/null +++ b/plugins/memcached/plugin.go @@ -0,0 +1,48 @@ +package memcached + +import ( + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/kv" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + PluginName string = "memcached" + RootPluginName string = "kv" +) + +type Plugin struct { + // config plugin + cfgPlugin config.Configurer + // logger + log logger.Logger +} + +func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + if !cfg.Has(RootPluginName) { + return errors.E(errors.Disabled) + } + + s.cfgPlugin = cfg + s.log = log + return nil +} + +// Name returns plugin user-friendly name +func (s *Plugin) Name() string { + return PluginName +} + +// Available interface implementation +func (s *Plugin) Available() {} + +func (s *Plugin) KVConstruct(key string) (kv.Storage, error) { + const op = errors.Op("boltdb_plugin_provide") + st, err := NewMemcachedDriver(s.log, key, s.cfgPlugin) + if err != nil { + return nil, errors.E(op, err) + } + + return st, nil +} diff --git a/plugins/sqs/config.go b/plugins/sqs/config.go new file mode 100644 index 00000000..9b2a1ca8 --- /dev/null +++ b/plugins/sqs/config.go @@ -0,0 +1,114 @@ +package sqs + +import "github.com/aws/aws-sdk-go-v2/aws" + +const ( + attributes string = "attributes" + tags string = "tags" + queue string = "queue" + pref string = "prefetch" + visibility string = "visibility_timeout" + waitTime string = "wait_time" +) + +type GlobalCfg struct { + Key string `mapstructure:"key"` + Secret string `mapstructure:"secret"` + Region string `mapstructure:"region"` + SessionToken string `mapstructure:"session_token"` + Endpoint string `mapstructure:"endpoint"` +} + +// Config is used to parse pipeline configuration +type Config struct { + // The duration (in seconds) that the received messages are hidden from subsequent + // retrieve requests after being retrieved by a ReceiveMessage request. + VisibilityTimeout int32 `mapstructure:"visibility_timeout"` + // The duration (in seconds) for which the call waits for a message to arrive + // in the queue before returning. If a message is available, the call returns + // sooner than WaitTimeSeconds. If no messages are available and the wait time + // expires, the call returns successfully with an empty list of messages. + WaitTimeSeconds int32 `mapstructure:"wait_time_seconds"` + // Prefetch is the maximum number of messages to return. Amazon SQS never returns more messages + // than this value (however, fewer messages might be returned). Valid values: 1 to + // 10. Default: 1. + Prefetch int32 `mapstructure:"prefetch"` + // The name of the new queue. The following limits apply to this name: + // + // * A queue + // name can have up to 80 characters. + // + // * Valid values: alphanumeric characters, + // hyphens (-), and underscores (_). + // + // * A FIFO queue name must end with the .fifo + // suffix. + // + // Queue URLs and names are case-sensitive. + // + // This member is required. + Queue *string `mapstructure:"queue"` + + // A map of attributes with their corresponding values. The following lists the + // names, descriptions, and values of the special request parameters that the + // CreateQueue action uses. + // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html + Attributes map[string]string `mapstructure:"attributes"` + + // From amazon docs: + // Add cost allocation tags to the specified Amazon SQS queue. For an overview, see + // Tagging Your Amazon SQS Queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) + // in the Amazon SQS Developer Guide. When you use queue tags, keep the following + // guidelines in mind: + // + // * Adding more than 50 tags to a queue isn't recommended. + // + // * + // Tags don't have any semantic meaning. Amazon SQS interprets tags as character + // strings. + // + // * Tags are case-sensitive. + // + // * A new tag with a key identical to that + // of an existing tag overwrites the existing tag. + // + // For a full list of tag + // restrictions, see Quotas related to queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) + // in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you + // must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account + // permissions don't apply to this action. For more information, see Grant + // cross-account permissions to a role and a user name + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) + // in the Amazon SQS Developer Guide. + Tags map[string]string `mapstructure:"tags"` +} + +func (c *GlobalCfg) InitDefault() { + if c.Endpoint == "" { + c.Endpoint = "http://127.0.0.1:9324" + } +} + +func (c *Config) InitDefault() { + if c.Queue == nil { + c.Queue = aws.String("default") + } + + if c.Prefetch == 0 || c.Prefetch > 10 { + c.Prefetch = 10 + } + + if c.WaitTimeSeconds == 0 { + c.WaitTimeSeconds = 5 + } + + if c.Attributes == nil { + c.Attributes = make(map[string]string) + } + + if c.Tags == nil { + c.Tags = make(map[string]string) + } +} diff --git a/plugins/sqs/consumer.go b/plugins/sqs/consumer.go new file mode 100644 index 00000000..23203190 --- /dev/null +++ b/plugins/sqs/consumer.go @@ -0,0 +1,411 @@ +package sqs + +import ( + "context" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/google/uuid" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + jobState "github.com/spiral/roadrunner/v2/pkg/state/job" + cfgPlugin "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +type consumer struct { + sync.Mutex + pq priorityqueue.Queue + log logger.Logger + eh events.Handler + pipeline atomic.Value + + // connection info + key string + secret string + sessionToken string + region string + endpoint string + queue *string + messageGroupID string + waitTime int32 + prefetch int32 + visibilityTimeout int32 + + // if user invoke several resume operations + listeners uint32 + + // queue optional parameters + attributes map[string]string + tags map[string]string + + client *sqs.Client + queueURL *string + + pauseCh chan struct{} +} + +func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_sqs_consumer") + + // if no such key - error + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section")) + } + + // PARSE CONFIGURATION ------- + var pipeCfg Config + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // initialize job consumer + jb := &consumer{ + pq: pq, + log: log, + eh: e, + messageGroupID: uuid.NewString(), + attributes: pipeCfg.Attributes, + tags: pipeCfg.Tags, + queue: pipeCfg.Queue, + prefetch: pipeCfg.Prefetch, + visibilityTimeout: pipeCfg.VisibilityTimeout, + waitTime: pipeCfg.WaitTimeSeconds, + region: globalCfg.Region, + key: globalCfg.Key, + sessionToken: globalCfg.SessionToken, + secret: globalCfg.Secret, + endpoint: globalCfg.Endpoint, + pauseCh: make(chan struct{}, 1), + } + + // PARSE CONFIGURATION ------- + + awsConf, err := config.LoadDefaultConfig(context.Background(), + config.WithRegion(globalCfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken))) + if err != nil { + return nil, errors.E(op, err) + } + + // config with retries + jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) { + o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) { + opts.MaxAttempts = 60 + }) + }) + + out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags}) + if err != nil { + return nil, errors.E(op, err) + } + + // assign a queue URL + jb.queueURL = out.QueueUrl + + // To successfully create a new queue, you must provide a + // queue name that adheres to the limits related to queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) + // and is unique within the scope of your queues. After you create a queue, you + // must wait at least one second after the queue is created to be able to use the <------------ + // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require + time.Sleep(time.Second * 2) + + return jb, nil +} + +func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("new_sqs_consumer") + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section")) + } + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + attr := make(map[string]string) + err = pipe.Map(attributes, attr) + if err != nil { + return nil, errors.E(op, err) + } + + tg := make(map[string]string) + err = pipe.Map(tags, tg) + if err != nil { + return nil, errors.E(op, err) + } + + // initialize job consumer + jb := &consumer{ + pq: pq, + log: log, + eh: e, + messageGroupID: uuid.NewString(), + attributes: attr, + tags: tg, + queue: aws.String(pipe.String(queue, "default")), + prefetch: int32(pipe.Int(pref, 10)), + visibilityTimeout: int32(pipe.Int(visibility, 0)), + waitTime: int32(pipe.Int(waitTime, 0)), + region: globalCfg.Region, + key: globalCfg.Key, + sessionToken: globalCfg.SessionToken, + secret: globalCfg.Secret, + endpoint: globalCfg.Endpoint, + pauseCh: make(chan struct{}, 1), + } + + // PARSE CONFIGURATION ------- + + awsConf, err := config.LoadDefaultConfig(context.Background(), + config.WithRegion(globalCfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken))) + if err != nil { + return nil, errors.E(op, err) + } + + // config with retries + jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) { + o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) { + opts.MaxAttempts = 60 + }) + }) + + out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags}) + if err != nil { + return nil, errors.E(op, err) + } + + // assign a queue URL + jb.queueURL = out.QueueUrl + + // To successfully create a new queue, you must provide a + // queue name that adheres to the limits related to queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) + // and is unique within the scope of your queues. After you create a queue, you + // must wait at least one second after the queue is created to be able to use the <------------ + // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require + time.Sleep(time.Second * 2) + + return jb, nil +} + +func (j *consumer) Push(ctx context.Context, jb *job.Job) error { + const op = errors.Op("sqs_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != jb.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) + } + + // The length of time, in seconds, for which to delay a specific message. Valid + // values: 0 to 900. Maximum: 15 minutes. + if jb.Options.Delay > 900 { + return errors.E(op, errors.Errorf("unable to push, maximum possible delay is 900 seconds (15 minutes), provided: %d", jb.Options.Delay)) + } + + err := j.handleItem(ctx, fromJob(jb)) + if err != nil { + return errors.E(op, err) + } + return nil +} + +func (j *consumer) State(ctx context.Context) (*jobState.State, error) { + const op = errors.Op("sqs_state") + attr, err := j.client.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{ + QueueUrl: j.queueURL, + AttributeNames: []types.QueueAttributeName{ + types.QueueAttributeNameApproximateNumberOfMessages, + types.QueueAttributeNameApproximateNumberOfMessagesDelayed, + types.QueueAttributeNameApproximateNumberOfMessagesNotVisible, + }, + }) + + if err != nil { + return nil, errors.E(op, err) + } + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + out := &jobState.State{ + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Queue: *j.queueURL, + Ready: ready(atomic.LoadUint32(&j.listeners)), + } + + nom, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessages)]) + if err == nil { + out.Active = int64(nom) + } + + delayed, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessagesDelayed)]) + if err == nil { + out.Delayed = int64(delayed) + } + + nv, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessagesNotVisible)]) + if err == nil { + out.Reserved = int64(nv) + } + + return out, nil +} + +func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { + j.pipeline.Store(p) + return nil +} + +func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("sqs_run") + + j.Lock() + defer j.Unlock() + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) + } + + atomic.AddUint32(&j.listeners, 1) + + // start listener + go j.listen(context.Background()) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *consumer) Stop(context.Context) error { + j.pauseCh <- struct{}{} + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} + +func (j *consumer) Pause(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + // stop consume + j.pauseCh <- struct{}{} + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) Resume(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("sqs listener already in the active state") + return + } + + // start listener + go j.listen(context.Background()) + + // increase num of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *consumer) handleItem(ctx context.Context, msg *Item) error { + d, err := msg.pack(j.queueURL) + if err != nil { + return err + } + _, err = j.client.SendMessage(ctx, d) + if err != nil { + return err + } + + return nil +} + +func ready(r uint32) bool { + return r > 0 +} diff --git a/plugins/sqs/item.go b/plugins/sqs/item.go new file mode 100644 index 00000000..996adf6c --- /dev/null +++ b/plugins/sqs/item.go @@ -0,0 +1,247 @@ +package sqs + +import ( + "context" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + json "github.com/json-iterator/go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + StringType string = "String" + NumberType string = "Number" + BinaryType string = "Binary" + ApproximateReceiveCount string = "ApproximateReceiveCount" +) + +var itemAttributes = []string{ + job.RRJob, + job.RRDelay, + job.RRPriority, + job.RRHeaders, +} + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // Private ================ + approxReceiveCount int64 + queue *string + receiptHandler *string + client *sqs.Client + requeueFn func(context.Context, *Item) error +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the sqs, MessageAttributes used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + _, err := i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: i.Options.queue, + ReceiptHandle: i.Options.receiptHandler, + }) + + if err != nil { + return err + } + + return nil +} + +func (i *Item) Nack() error { + // requeue message + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: i.Options.queue, + ReceiptHandle: i.Options.receiptHandler, + }) + + if err != nil { + return err + } + + return nil +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + // requeue message + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + // Delete job from the queue only after successful requeue + _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: i.Options.queue, + ReceiptHandle: i.Options.receiptHandler, + }) + + if err != nil { + return err + } + + return nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) { + // pack headers map + data, err := json.Marshal(i.Headers) + if err != nil { + return nil, err + } + + return &sqs.SendMessageInput{ + MessageBody: aws.String(i.Payload), + QueueUrl: queue, + DelaySeconds: int32(i.Options.Delay), + MessageAttributes: map[string]types.MessageAttributeValue{ + job.RRJob: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Job)}, + job.RRDelay: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Delay)))}, + job.RRHeaders: {DataType: aws.String(BinaryType), BinaryValue: data, BinaryListValues: nil, StringListValues: nil, StringValue: nil}, + job.RRPriority: {DataType: aws.String(NumberType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Priority)))}, + }, + }, nil +} + +func (j *consumer) unpack(msg *types.Message) (*Item, error) { + const op = errors.Op("sqs_unpack") + // reserved + if _, ok := msg.Attributes[ApproximateReceiveCount]; !ok { + return nil, errors.E(op, errors.Str("failed to unpack the ApproximateReceiveCount attribute")) + } + + for i := 0; i < len(itemAttributes); i++ { + if _, ok := msg.MessageAttributes[itemAttributes[i]]; !ok { + return nil, errors.E(op, errors.Errorf("missing queue attribute: %s", itemAttributes[i])) + } + } + + var h map[string][]string + err := json.Unmarshal(msg.MessageAttributes[job.RRHeaders].BinaryValue, &h) + if err != nil { + return nil, err + } + + delay, err := strconv.Atoi(*msg.MessageAttributes[job.RRDelay].StringValue) + if err != nil { + return nil, errors.E(op, err) + } + + priority, err := strconv.Atoi(*msg.MessageAttributes[job.RRPriority].StringValue) + if err != nil { + return nil, errors.E(op, err) + } + + recCount, err := strconv.Atoi(msg.Attributes[ApproximateReceiveCount]) + if err != nil { + return nil, errors.E(op, err) + } + + item := &Item{ + Job: *msg.MessageAttributes[job.RRJob].StringValue, + Payload: *msg.Body, + Headers: h, + Options: &Options{ + Delay: int64(delay), + Priority: int64(priority), + + // private + approxReceiveCount: int64(recCount), + client: j.client, + queue: j.queueURL, + receiptHandler: msg.ReceiptHandle, + requeueFn: j.handleItem, + }, + } + + return item, nil +} diff --git a/plugins/sqs/listener.go b/plugins/sqs/listener.go new file mode 100644 index 00000000..a4280af2 --- /dev/null +++ b/plugins/sqs/listener.go @@ -0,0 +1,87 @@ +package sqs + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/aws/smithy-go" +) + +const ( + // All - get all message attribute names + All string = "All" + + // NonExistentQueue AWS error code + NonExistentQueue string = "AWS.SimpleQueueService.NonExistentQueue" +) + +func (j *consumer) listen(ctx context.Context) { //nolint:gocognit + for { + select { + case <-j.pauseCh: + j.log.Warn("sqs listener stopped") + return + default: + message, err := j.client.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ + QueueUrl: j.queueURL, + MaxNumberOfMessages: j.prefetch, + AttributeNames: []types.QueueAttributeName{types.QueueAttributeName(ApproximateReceiveCount)}, + MessageAttributeNames: []string{All}, + // The new value for the message's visibility timeout (in seconds). Values range: 0 + // to 43200. Maximum: 12 hours. + VisibilityTimeout: j.visibilityTimeout, + WaitTimeSeconds: j.waitTime, + }) + + if err != nil { + if oErr, ok := (err).(*smithy.OperationError); ok { + if rErr, ok := oErr.Err.(*http.ResponseError); ok { + if apiErr, ok := rErr.Err.(*smithy.GenericAPIError); ok { + // in case of NonExistentQueue - recreate the queue + if apiErr.Code == NonExistentQueue { + j.log.Error("receive message", "error code", apiErr.ErrorCode(), "message", apiErr.ErrorMessage(), "error fault", apiErr.ErrorFault()) + _, err = j.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: j.queue, Attributes: j.attributes, Tags: j.tags}) + if err != nil { + j.log.Error("create queue", "error", err) + } + // To successfully create a new queue, you must provide a + // queue name that adheres to the limits related to the queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) + // and is unique within the scope of your queues. After you create a queue, you + // must wait at least one second after the queue is created to be able to use the <------------ + // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require + time.Sleep(time.Second * 2) + continue + } + } + } + } + + j.log.Error("receive message", "error", err) + continue + } + + for i := 0; i < len(message.Messages); i++ { + m := message.Messages[i] + item, err := j.unpack(&m) + if err != nil { + _, errD := j.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: j.queueURL, + ReceiptHandle: m.ReceiptHandle, + }) + if errD != nil { + j.log.Error("message unpack, failed to delete the message from the queue", "error", err) + } + + j.log.Error("message unpack", "error", err) + continue + } + + j.pq.Insert(item) + } + } + } +} diff --git a/plugins/sqs/plugin.go b/plugins/sqs/plugin.go new file mode 100644 index 00000000..54f61ff5 --- /dev/null +++ b/plugins/sqs/plugin.go @@ -0,0 +1,39 @@ +package sqs + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + pluginName string = "sqs" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Available() {} + +func (p *Plugin) Name() string { + return pluginName +} + +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewSQSConsumer(configKey, p.log, p.cfg, e, pq) +} + +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipe, p.log, p.cfg, e, pq) +} -- cgit v1.2.3 From fb356081dcaea81952e2019502c0216af7d10c7d Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Sat, 28 Aug 2021 02:03:54 +0300 Subject: Reduce error check operations Signed-off-by: Valery Piashchynski --- plugins/server/plugin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'plugins') diff --git a/plugins/server/plugin.go b/plugins/server/plugin.go index 16e3bd8c..5f5f2df9 100644 --- a/plugins/server/plugin.go +++ b/plugins/server/plugin.go @@ -216,7 +216,7 @@ func (server *Plugin) collectPoolEvents(event interface{}) { case events.EventMaxMemory: server.log.Warn("worker max memory reached", "pid", we.Payload.(worker.BaseProcess).Pid()) case events.EventNoFreeWorkers: - server.log.Warn("no free workers in pool", "error", we.Payload.(error).Error()) + server.log.Warn("no free workers in the pool", "error", we.Payload.(error).Error()) case events.EventPoolError: server.log.Error("pool error", "error", we.Payload.(error).Error()) case events.EventSupervisorError: -- cgit v1.2.3 From c23a88a943b53b99d112b63ed121931d1f79436f Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Sun, 29 Aug 2021 23:46:11 +0300 Subject: Implement Init, FromPipeline methods Update receiver in the amqp driver Add simple (initial) boltdb tests Signed-off-by: Valery Piashchynski --- plugins/amqp/amqpjobs/consumer.go | 146 ++++++++++++----------- plugins/amqp/amqpjobs/item.go | 18 +-- plugins/amqp/amqpjobs/listener.go | 10 +- plugins/amqp/amqpjobs/rabbit_init.go | 16 +-- plugins/amqp/amqpjobs/redial.go | 68 +++++------ plugins/boltdb/boltjobs/config.go | 37 ++++-- plugins/boltdb/boltjobs/consumer.go | 225 +++++++++++++++++++++++++++++------ plugins/boltdb/boltjobs/listener.go | 24 +++- plugins/jobs/job/general.go | 29 ----- plugins/jobs/job/job.go | 62 ++++++++++ plugins/jobs/job/job_options.go | 32 ----- plugins/jobs/job/job_options_test.go | 45 ------- plugins/jobs/job/job_test.go | 45 +++++++ plugins/jobs/plugin.go | 2 +- 14 files changed, 477 insertions(+), 282 deletions(-) delete mode 100644 plugins/jobs/job/general.go create mode 100644 plugins/jobs/job/job.go delete mode 100644 plugins/jobs/job/job_options.go delete mode 100644 plugins/jobs/job/job_options_test.go create mode 100644 plugins/jobs/job/job_test.go (limited to 'plugins') diff --git a/plugins/amqp/amqpjobs/consumer.go b/plugins/amqp/amqpjobs/consumer.go index 1931ceaa..f1b4d54f 100644 --- a/plugins/amqp/amqpjobs/consumer.go +++ b/plugins/amqp/amqpjobs/consumer.go @@ -218,17 +218,17 @@ func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Con return jb, nil } -func (j *consumer) Push(ctx context.Context, job *job.Job) error { +func (c *consumer) Push(ctx context.Context, job *job.Job) error { const op = errors.Op("rabbitmq_push") // check if the pipeline registered // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != job.Options.Pipeline { return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) } - err := j.handleItem(ctx, fromJob(job)) + err := c.handleItem(ctx, fromJob(job)) if err != nil { return errors.E(op, err) } @@ -236,38 +236,38 @@ func (j *consumer) Push(ctx context.Context, job *job.Job) error { return nil } -func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - j.pipeline.Store(p) +func (c *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { + c.pipeline.Store(p) return nil } -func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("rabbit_consume") +func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("rabbit_run") - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p.Name() { return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) } // protect connection (redial) - j.Lock() - defer j.Unlock() + c.Lock() + defer c.Unlock() var err error - j.consumeChan, err = j.conn.Channel() + c.consumeChan, err = c.conn.Channel() if err != nil { return errors.E(op, err) } - err = j.consumeChan.Qos(j.prefetch, 0, false) + err = c.consumeChan.Qos(c.prefetch, 0, false) if err != nil { return errors.E(op, err) } // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, + deliv, err := c.consumeChan.Consume( + c.queue, + c.consumeID, false, false, false, @@ -279,9 +279,11 @@ func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { } // run listener - j.listener(deliv) + c.listener(deliv) - j.eh.Push(events.JobEvent{ + atomic.StoreUint32(&c.listeners, 1) + + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -291,28 +293,28 @@ func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { return nil } -func (j *consumer) State(ctx context.Context) (*jobState.State, error) { +func (c *consumer) State(ctx context.Context) (*jobState.State, error) { const op = errors.Op("amqp_driver_state") select { - case pch := <-j.publishChan: + case pch := <-c.publishChan: defer func() { - j.publishChan <- pch + c.publishChan <- pch }() - q, err := pch.QueueInspect(j.queue) + q, err := pch.QueueInspect(c.queue) if err != nil { return nil, errors.E(op, err) } - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) return &jobState.State{ Pipeline: pipe.Name(), Driver: pipe.Driver(), Queue: q.Name, Active: int64(q.Messages), - Delayed: atomic.LoadInt64(j.delayed), - Ready: ready(atomic.LoadUint32(&j.listeners)), + Delayed: atomic.LoadInt64(c.delayed), + Ready: ready(atomic.LoadUint32(&c.listeners)), }, nil case <-ctx.Done(): @@ -320,37 +322,37 @@ func (j *consumer) State(ctx context.Context) (*jobState.State, error) { } } -func (j *consumer) Pause(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) +func (c *consumer) Pause(_ context.Context, p string) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { - j.log.Error("no such pipeline", "requested pause on: ", p) + c.log.Error("no such pipeline", "requested pause on: ", p) } - l := atomic.LoadUint32(&j.listeners) + l := atomic.LoadUint32(&c.listeners) // no active listeners if l == 0 { - j.log.Warn("no active listeners, nothing to pause") + c.log.Warn("no active listeners, nothing to pause") return } - atomic.AddUint32(&j.listeners, ^uint32(0)) + atomic.AddUint32(&c.listeners, ^uint32(0)) // protect connection (redial) - j.Lock() - defer j.Unlock() + c.Lock() + defer c.Unlock() - err := j.consumeChan.Cancel(j.consumeID, true) + err := c.consumeChan.Cancel(c.consumeID, true) if err != nil { - j.log.Error("cancel publish channel, forcing close", "error", err) - errCl := j.consumeChan.Close() + c.log.Error("cancel publish channel, forcing close", "error", err) + errCl := c.consumeChan.Close() if errCl != nil { - j.log.Error("force close failed", "error", err) + c.log.Error("force close failed", "error", err) return } return } - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipePaused, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -358,40 +360,40 @@ func (j *consumer) Pause(_ context.Context, p string) { }) } -func (j *consumer) Resume(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) +func (c *consumer) Resume(_ context.Context, p string) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { - j.log.Error("no such pipeline", "requested resume on: ", p) + c.log.Error("no such pipeline", "requested resume on: ", p) } // protect connection (redial) - j.Lock() - defer j.Unlock() + c.Lock() + defer c.Unlock() - l := atomic.LoadUint32(&j.listeners) + l := atomic.LoadUint32(&c.listeners) // no active listeners if l == 1 { - j.log.Warn("amqp listener already in the active state") + c.log.Warn("amqp listener already in the active state") return } var err error - j.consumeChan, err = j.conn.Channel() + c.consumeChan, err = c.conn.Channel() if err != nil { - j.log.Error("create channel on rabbitmq connection", "error", err) + c.log.Error("create channel on rabbitmq connection", "error", err) return } - err = j.consumeChan.Qos(j.prefetch, 0, false) + err = c.consumeChan.Qos(c.prefetch, 0, false) if err != nil { - j.log.Error("qos set failed", "error", err) + c.log.Error("qos set failed", "error", err) return } // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, + deliv, err := c.consumeChan.Consume( + c.queue, + c.consumeID, false, false, false, @@ -399,17 +401,17 @@ func (j *consumer) Resume(_ context.Context, p string) { nil, ) if err != nil { - j.log.Error("consume operation failed", "error", err) + c.log.Error("consume operation failed", "error", err) return } // run listener - j.listener(deliv) + c.listener(deliv) // increase number of listeners - atomic.AddUint32(&j.listeners, 1) + atomic.AddUint32(&c.listeners, 1) - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -417,11 +419,11 @@ func (j *consumer) Resume(_ context.Context, p string) { }) } -func (j *consumer) Stop(context.Context) error { - j.stopCh <- struct{}{} +func (c *consumer) Stop(context.Context) error { + c.stopCh <- struct{}{} - pipe := j.pipeline.Load().(*pipeline.Pipeline) - j.eh.Push(events.JobEvent{ + pipe := c.pipeline.Load().(*pipeline.Pipeline) + c.eh.Push(events.JobEvent{ Event: events.EventPipeStopped, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -431,13 +433,13 @@ func (j *consumer) Stop(context.Context) error { } // handleItem -func (j *consumer) handleItem(ctx context.Context, msg *Item) error { +func (c *consumer) handleItem(ctx context.Context, msg *Item) error { const op = errors.Op("rabbitmq_handle_item") select { - case pch := <-j.publishChan: + case pch := <-c.publishChan: // return the channel back defer func() { - j.publishChan <- pch + c.publishChan <- pch }() // convert @@ -449,30 +451,30 @@ func (j *consumer) handleItem(ctx context.Context, msg *Item) error { const op = errors.Op("rabbitmq_handle_item") // handle timeouts if msg.Options.DelayDuration() > 0 { - atomic.AddInt64(j.delayed, 1) + atomic.AddInt64(c.delayed, 1) // TODO declare separate method for this if condition // TODO dlx cache channel?? delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) - tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, j.exchangeName, j.queue) + tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, c.exchangeName, c.queue) _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ - dlx: j.exchangeName, - dlxRoutingKey: j.routingKey, + dlx: c.exchangeName, + dlxRoutingKey: c.routingKey, dlxTTL: delayMs, dlxExpires: delayMs * 2, }) if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) + atomic.AddInt64(c.delayed, ^int64(0)) return errors.E(op, err) } - err = pch.QueueBind(tmpQ, tmpQ, j.exchangeName, false, nil) + err = pch.QueueBind(tmpQ, tmpQ, c.exchangeName, false, nil) if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) + atomic.AddInt64(c.delayed, ^int64(0)) return errors.E(op, err) } // insert to the local, limited pipeline - err = pch.Publish(j.exchangeName, tmpQ, false, false, amqp.Publishing{ + err = pch.Publish(c.exchangeName, tmpQ, false, false, amqp.Publishing{ Headers: table, ContentType: contentType, Timestamp: time.Now().UTC(), @@ -481,7 +483,7 @@ func (j *consumer) handleItem(ctx context.Context, msg *Item) error { }) if err != nil { - atomic.AddInt64(j.delayed, ^int64(0)) + atomic.AddInt64(c.delayed, ^int64(0)) return errors.E(op, err) } @@ -489,7 +491,7 @@ func (j *consumer) handleItem(ctx context.Context, msg *Item) error { } // insert to the local, limited pipeline - err = pch.Publish(j.exchangeName, j.routingKey, false, false, amqp.Publishing{ + err = pch.Publish(c.exchangeName, c.routingKey, false, false, amqp.Publishing{ Headers: table, ContentType: contentType, Timestamp: time.Now(), diff --git a/plugins/amqp/amqpjobs/item.go b/plugins/amqp/amqpjobs/item.go index a8e305ea..66b70a36 100644 --- a/plugins/amqp/amqpjobs/item.go +++ b/plugins/amqp/amqpjobs/item.go @@ -139,9 +139,9 @@ func (i *Item) Requeue(headers map[string][]string, delay int64) error { } // fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ -func (j *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { +func (c *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { const op = errors.Op("from_delivery_convert") - item, err := j.unpack(d) + item, err := c.unpack(d) if err != nil { return nil, errors.E(op, err) } @@ -156,10 +156,10 @@ func (j *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { item.Options.ack = d.Ack item.Options.nack = d.Nack - item.Options.delayed = j.delayed + item.Options.delayed = c.delayed // requeue func - item.Options.requeueFn = j.handleItem + item.Options.requeueFn = c.handleItem return i, nil } @@ -194,11 +194,11 @@ func pack(id string, j *Item) (amqp.Table, error) { } // unpack restores jobs.Options -func (j *consumer) unpack(d amqp.Delivery) (*Item, error) { +func (c *consumer) unpack(d amqp.Delivery) (*Item, error) { item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ - multipleAsk: j.multipleAck, - requeue: j.requeueOnFail, - requeueFn: j.handleItem, + multipleAsk: c.multipleAck, + requeue: c.requeueOnFail, + requeueFn: c.handleItem, }} if _, ok := d.Headers[job.RRID].(string); !ok { @@ -230,7 +230,7 @@ func (j *consumer) unpack(d amqp.Delivery) (*Item, error) { if _, ok := d.Headers[job.RRPriority]; !ok { // set pipe's priority - item.Options.Priority = j.priority + item.Options.Priority = c.priority } else { item.Options.Priority = d.Headers[job.RRPriority].(int64) } diff --git a/plugins/amqp/amqpjobs/listener.go b/plugins/amqp/amqpjobs/listener.go index 0156d55c..75c61cad 100644 --- a/plugins/amqp/amqpjobs/listener.go +++ b/plugins/amqp/amqpjobs/listener.go @@ -2,23 +2,23 @@ package amqpjobs import amqp "github.com/rabbitmq/amqp091-go" -func (j *consumer) listener(deliv <-chan amqp.Delivery) { +func (c *consumer) listener(deliv <-chan amqp.Delivery) { go func() { for { //nolint:gosimple select { case msg, ok := <-deliv: if !ok { - j.log.Info("delivery channel closed, leaving the rabbit listener") + c.log.Info("delivery channel closed, leaving the rabbit listener") return } - d, err := j.fromDelivery(msg) + d, err := c.fromDelivery(msg) if err != nil { - j.log.Error("amqp delivery convert", "error", err) + c.log.Error("amqp delivery convert", "error", err) continue } // insert job into the main priority queue - j.pq.Insert(d) + c.pq.Insert(d) } } }() diff --git a/plugins/amqp/amqpjobs/rabbit_init.go b/plugins/amqp/amqpjobs/rabbit_init.go index e260fabe..fb5f6911 100644 --- a/plugins/amqp/amqpjobs/rabbit_init.go +++ b/plugins/amqp/amqpjobs/rabbit_init.go @@ -4,20 +4,20 @@ import ( "github.com/spiral/errors" ) -func (j *consumer) initRabbitMQ() error { +func (c *consumer) initRabbitMQ() error { const op = errors.Op("jobs_plugin_rmq_init") // Channel opens a unique, concurrent server channel to process the bulk of AMQP // messages. Any error from methods on this receiver will render the receiver // invalid and a new Channel should be opened. - channel, err := j.conn.Channel() + channel, err := c.conn.Channel() if err != nil { return errors.E(op, err) } // declare an exchange (idempotent operation) err = channel.ExchangeDeclare( - j.exchangeName, - j.exchangeType, + c.exchangeName, + c.exchangeType, true, false, false, @@ -30,10 +30,10 @@ func (j *consumer) initRabbitMQ() error { // verify or declare a queue q, err := channel.QueueDeclare( - j.queue, + c.queue, false, false, - j.exclusive, + c.exclusive, false, nil, ) @@ -44,8 +44,8 @@ func (j *consumer) initRabbitMQ() error { // bind queue to the exchange err = channel.QueueBind( q.Name, - j.routingKey, - j.exchangeName, + c.routingKey, + c.exchangeName, false, nil, ) diff --git a/plugins/amqp/amqpjobs/redial.go b/plugins/amqp/amqpjobs/redial.go index 0835e3ea..56142e2b 100644 --- a/plugins/amqp/amqpjobs/redial.go +++ b/plugins/amqp/amqpjobs/redial.go @@ -11,26 +11,26 @@ import ( ) // redialer used to redial to the rabbitmq in case of the connection interrupts -func (j *consumer) redialer() { //nolint:gocognit +func (c *consumer) redialer() { //nolint:gocognit go func() { const op = errors.Op("rabbitmq_redial") for { select { - case err := <-j.conn.NotifyClose(make(chan *amqp.Error)): + case err := <-c.conn.NotifyClose(make(chan *amqp.Error)): if err == nil { return } - j.Lock() + c.Lock() // trash the broken publishing channel - <-j.publishChan + <-c.publishChan t := time.Now() - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipeError, Pipeline: pipe.Name(), Driver: pipe.Driver(), @@ -40,41 +40,41 @@ func (j *consumer) redialer() { //nolint:gocognit expb := backoff.NewExponentialBackOff() // set the retry timeout (minutes) - expb.MaxElapsedTime = j.retryTimeout + expb.MaxElapsedTime = c.retryTimeout operation := func() error { - j.log.Warn("rabbitmq reconnecting, caused by", "error", err) + c.log.Warn("rabbitmq reconnecting, caused by", "error", err) var dialErr error - j.conn, dialErr = amqp.Dial(j.connStr) + c.conn, dialErr = amqp.Dial(c.connStr) if dialErr != nil { return errors.E(op, dialErr) } - j.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") + c.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") // re-init connection - errInit := j.initRabbitMQ() + errInit := c.initRabbitMQ() if errInit != nil { - j.log.Error("rabbitmq dial", "error", errInit) + c.log.Error("rabbitmq dial", "error", errInit) return errInit } // redeclare consume channel var errConnCh error - j.consumeChan, errConnCh = j.conn.Channel() + c.consumeChan, errConnCh = c.conn.Channel() if errConnCh != nil { return errors.E(op, errConnCh) } // redeclare publish channel - pch, errPubCh := j.conn.Channel() + pch, errPubCh := c.conn.Channel() if errPubCh != nil { return errors.E(op, errPubCh) } // start reading messages from the channel - deliv, err := j.consumeChan.Consume( - j.queue, - j.consumeID, + deliv, err := c.consumeChan.Consume( + c.queue, + c.consumeID, false, false, false, @@ -86,23 +86,23 @@ func (j *consumer) redialer() { //nolint:gocognit } // put the fresh publishing channel - j.publishChan <- pch + c.publishChan <- pch // restart listener - j.listener(deliv) + c.listener(deliv) - j.log.Info("queues and subscribers redeclared successfully") + c.log.Info("queues and subscribers redeclared successfully") return nil } retryErr := backoff.Retry(operation, expb) if retryErr != nil { - j.Unlock() - j.log.Error("backoff failed", "error", retryErr) + c.Unlock() + c.log.Error("backoff failed", "error", retryErr) return } - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Pipeline: pipe.Name(), Driver: pipe.Driver(), @@ -110,27 +110,27 @@ func (j *consumer) redialer() { //nolint:gocognit Elapsed: time.Since(t), }) - j.Unlock() + c.Unlock() - case <-j.stopCh: - if j.publishChan != nil { - pch := <-j.publishChan + case <-c.stopCh: + if c.publishChan != nil { + pch := <-c.publishChan err := pch.Close() if err != nil { - j.log.Error("publish channel close", "error", err) + c.log.Error("publish channel close", "error", err) } } - if j.consumeChan != nil { - err := j.consumeChan.Close() + if c.consumeChan != nil { + err := c.consumeChan.Close() if err != nil { - j.log.Error("consume channel close", "error", err) + c.log.Error("consume channel close", "error", err) } } - if j.conn != nil { - err := j.conn.Close() + if c.conn != nil { + err := c.conn.Close() if err != nil { - j.log.Error("amqp connection close", "error", err) + c.log.Error("amqp connection close", "error", err) } } diff --git a/plugins/boltdb/boltjobs/config.go b/plugins/boltdb/boltjobs/config.go index 013e30bf..8cc098c1 100644 --- a/plugins/boltdb/boltjobs/config.go +++ b/plugins/boltdb/boltjobs/config.go @@ -1,16 +1,39 @@ package boltjobs -type Config struct { - // File is boltDB file. No need to create it by your own, - // boltdb driver is able to create the file, or read existing - File string - // Bucket to store data in boltDB - bucket string +const ( + file string = "file" + priority string = "priority" + prefetch string = "prefetch" +) + +type GlobalCfg struct { // db file permissions - Permissions int + Permissions int `mapstructure:"permissions"` // consume timeout } +func (c *GlobalCfg) InitDefaults() { + if c.Permissions == 0 { + c.Permissions = 0777 + } +} + +type Config struct { + File string `mapstructure:"file"` + Priority int `mapstructure:"priority"` + Prefetch int `mapstructure:"prefetch"` +} + func (c *Config) InitDefaults() { + if c.File == "" { + c.File = "rr.db" + } + + if c.Priority == 0 { + c.Priority = 10 + } + if c.Prefetch == 0 { + c.Prefetch = 1000 + } } diff --git a/plugins/boltdb/boltjobs/consumer.go b/plugins/boltdb/boltjobs/consumer.go index a8db2f30..67a6d3e7 100644 --- a/plugins/boltdb/boltjobs/consumer.go +++ b/plugins/boltdb/boltjobs/consumer.go @@ -1,11 +1,14 @@ package boltjobs import ( + "bytes" "context" + "encoding/gob" "os" "sync/atomic" "time" + "github.com/google/uuid" "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/pkg/events" priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" @@ -20,19 +23,27 @@ import ( const ( PluginName = "boltdb" + + PushBucket = "push" + InQueueBucket = "processing" + DoneBucket = "done" ) type consumer struct { - // bbolt configuration file string permissions int - bucket string - db *bolt.DB + priority int + prefetch int + + db *bolt.DB + + log logger.Logger + eh events.Handler + pq priorityqueue.Queue + listeners uint32 + pipeline atomic.Value - log logger.Logger - eh events.Handler - pq priorityqueue.Queue - pipe atomic.Value + stopCh chan struct{} } func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { @@ -47,26 +58,88 @@ func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e return nil, errors.E(op, errors.Str("no global boltdb configuration")) } - conf := &Config{} + conf := &GlobalCfg{} - err := cfg.UnmarshalKey(configKey, conf) + err := cfg.UnmarshalKey(PluginName, conf) if err != nil { return nil, errors.E(op, err) } - // add default values + localCfg := &Config{} + err = cfg.UnmarshalKey(configKey, localCfg) + if err != nil { + return nil, errors.E(op, err) + } + + localCfg.InitDefaults() conf.InitDefaults() - c := &consumer{ - file: conf.File, + + db, err := bolt.Open(localCfg.File, os.FileMode(conf.Permissions), &bolt.Options{ + Timeout: time.Second * 20, + NoGrowSync: false, + NoFreelistSync: false, + ReadOnly: false, + NoSync: false, + }) + + if err != nil { + return nil, errors.E(op, err) + } + + // create bucket if it does not exist + // tx.Commit invokes via the db.Update + err = db.Update(func(tx *bolt.Tx) error { + const upOp = errors.Op("boltdb_plugin_update") + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) + if err != nil { + return errors.E(op, upOp) + } + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) + if err != nil { + return errors.E(op, upOp) + } + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DoneBucket)) + if err != nil { + return errors.E(op, upOp) + } + return nil + }) + if err != nil { + return nil, errors.E(op, err) + } + + return &consumer{ permissions: conf.Permissions, - bucket: conf.bucket, + file: localCfg.File, + priority: localCfg.Priority, + prefetch: localCfg.Prefetch, + + db: db, + log: log, + eh: e, + pq: pq, + stopCh: make(chan struct{}, 1), + }, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { + const op = errors.Op("init_boltdb_jobs") - log: log, - eh: e, - pq: pq, + // if no global section + if !cfg.Has(PluginName) { + return nil, errors.E(op, errors.Str("no global boltdb configuration")) } - db, err := bolt.Open(c.file, os.FileMode(c.permissions), &bolt.Options{ + conf := &GlobalCfg{} + err := cfg.UnmarshalKey(PluginName, conf) + if err != nil { + return nil, errors.E(op, err) + } + + // add default values + conf.InitDefaults() + + db, err := bolt.Open(pipeline.String(file, "rr.db"), os.FileMode(conf.Permissions), &bolt.Options{ Timeout: time.Second * 20, NoGrowSync: false, NoFreelistSync: false, @@ -78,51 +151,135 @@ func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e return nil, errors.E(op, err) } - c.db = db - // create bucket if it does not exist // tx.Commit invokes via the db.Update err = db.Update(func(tx *bolt.Tx) error { const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(c.bucket)) + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) + if err != nil { + return errors.E(op, upOp) + } + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) + if err != nil { + return errors.E(op, upOp) + } + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DoneBucket)) if err != nil { return errors.E(op, upOp) } return nil }) - return c, nil -} + if err != nil { + return nil, errors.E(op, err) + } -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - return &consumer{}, nil + return &consumer{ + file: pipeline.String(file, "rr.db"), + priority: pipeline.Int(priority, 10), + prefetch: pipeline.Int(prefetch, 100), + permissions: conf.Permissions, + + db: db, + log: log, + eh: e, + pq: pq, + stopCh: make(chan struct{}, 1), + }, nil } func (c *consumer) Push(ctx context.Context, job *job.Job) error { - panic("implement me") + const op = errors.Op("boltdb_jobs_push") + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(utils.AsBytes(PushBucket)) + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(job) + if err != nil { + return err + } + + return b.Put(utils.AsBytes(uuid.NewString()), buf.Bytes()) + }) + + if err != nil { + return errors.E(op, err) + } + + return nil } func (c *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { - c.pipe.Store(pipeline) + c.pipeline.Store(pipeline) return nil } -func (c *consumer) Run(_ context.Context, pipeline *pipeline.Pipeline) error { - panic("implement me") +func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("boltdb_run") + + pipe := c.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) + } + return nil } func (c *consumer) Stop(ctx context.Context) error { - panic("implement me") + return nil } -func (c *consumer) Pause(ctx context.Context, pipeline string) { - panic("implement me") +func (c *consumer) Pause(ctx context.Context, p string) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + c.log.Error("no such pipeline", "requested pause on: ", p) + } + + l := atomic.LoadUint32(&c.listeners) + // no active listeners + if l == 0 { + c.log.Warn("no active listeners, nothing to pause") + return + } + + c.stopCh <- struct{}{} + + atomic.AddUint32(&c.listeners, ^uint32(0)) + + c.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) } -func (c *consumer) Resume(ctx context.Context, pipeline string) { - panic("implement me") +func (c *consumer) Resume(ctx context.Context, p string) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + c.log.Error("no such pipeline", "requested resume on: ", p) + } + + l := atomic.LoadUint32(&c.listeners) + // no active listeners + if l == 1 { + c.log.Warn("amqp listener already in the active state") + return + } + + // run listener + go c.listener() + + // increase number of listeners + atomic.AddUint32(&c.listeners, 1) + + c.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) } func (c *consumer) State(ctx context.Context) (*jobState.State, error) { - panic("implement me") + return nil, nil } diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go index 4a8d6cd9..2ee06088 100644 --- a/plugins/boltdb/boltjobs/listener.go +++ b/plugins/boltdb/boltjobs/listener.go @@ -1,22 +1,34 @@ package boltjobs -import "time" +import ( + "fmt" + "time" + + "github.com/spiral/roadrunner/v2/utils" +) func (c *consumer) listener() { tt := time.NewTicker(time.Second) for { select { + case <-c.stopCh: + c.log.Warn("boltdb listener stopped") + return case <-tt.C: tx, err := c.db.Begin(false) if err != nil { panic(err) } - // cursor := tx.Cursor() - err = tx.Commit() - if err != nil { - panic(err) - } + b := tx.Bucket(utils.AsBytes(PushBucket)) + + cursor := b.Cursor() + + k, v := cursor.First() + _ = k + _ = v + + fmt.Println("foo") } } } diff --git a/plugins/jobs/job/general.go b/plugins/jobs/job/general.go deleted file mode 100644 index 390f44b5..00000000 --- a/plugins/jobs/job/general.go +++ /dev/null @@ -1,29 +0,0 @@ -package job - -// constant keys to pack/unpack messages from different drivers -const ( - RRID string = "rr_id" - RRJob string = "rr_job" - RRHeaders string = "rr_headers" - RRPipeline string = "rr_pipeline" - RRDelay string = "rr_delay" - RRPriority string = "rr_priority" -) - -// Job carries information about single job. -type Job struct { - // Job contains name of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-value pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} diff --git a/plugins/jobs/job/job.go b/plugins/jobs/job/job.go new file mode 100644 index 00000000..06c3254e --- /dev/null +++ b/plugins/jobs/job/job.go @@ -0,0 +1,62 @@ +package job + +import ( + "time" +) + +// constant keys to pack/unpack messages from different drivers +const ( + RRID string = "rr_id" + RRJob string = "rr_job" + RRHeaders string = "rr_headers" + RRPipeline string = "rr_pipeline" + RRDelay string = "rr_delay" + RRPriority string = "rr_priority" +) + +// Job carries information about single job. +type Job struct { + // Job contains name of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-value pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` +} + +// Merge merges job options. +func (o *Options) Merge(from *Options) { + if o.Pipeline == "" { + o.Pipeline = from.Pipeline + } + + if o.Delay == 0 { + o.Delay = from.Delay + } +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} diff --git a/plugins/jobs/job/job_options.go b/plugins/jobs/job/job_options.go deleted file mode 100644 index b7e4ed36..00000000 --- a/plugins/jobs/job/job_options.go +++ /dev/null @@ -1,32 +0,0 @@ -package job - -import "time" - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` -} - -// Merge merges job options. -func (o *Options) Merge(from *Options) { - if o.Pipeline == "" { - o.Pipeline = from.Pipeline - } - - if o.Delay == 0 { - o.Delay = from.Delay - } -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} diff --git a/plugins/jobs/job/job_options_test.go b/plugins/jobs/job/job_options_test.go deleted file mode 100644 index a47151a3..00000000 --- a/plugins/jobs/job/job_options_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package job - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestOptions_DelayDuration(t *testing.T) { - opts := &Options{Delay: 0} - assert.Equal(t, time.Duration(0), opts.DelayDuration()) -} - -func TestOptions_DelayDuration2(t *testing.T) { - opts := &Options{Delay: 1} - assert.Equal(t, time.Second, opts.DelayDuration()) -} - -func TestOptions_Merge(t *testing.T) { - opts := &Options{} - - opts.Merge(&Options{ - Pipeline: "pipeline", - Delay: 2, - }) - - assert.Equal(t, "pipeline", opts.Pipeline) - assert.Equal(t, int64(2), opts.Delay) -} - -func TestOptions_MergeKeepOriginal(t *testing.T) { - opts := &Options{ - Pipeline: "default", - Delay: 10, - } - - opts.Merge(&Options{ - Pipeline: "pipeline", - Delay: 2, - }) - - assert.Equal(t, "default", opts.Pipeline) - assert.Equal(t, int64(10), opts.Delay) -} diff --git a/plugins/jobs/job/job_test.go b/plugins/jobs/job/job_test.go new file mode 100644 index 00000000..a47151a3 --- /dev/null +++ b/plugins/jobs/job/job_test.go @@ -0,0 +1,45 @@ +package job + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestOptions_DelayDuration(t *testing.T) { + opts := &Options{Delay: 0} + assert.Equal(t, time.Duration(0), opts.DelayDuration()) +} + +func TestOptions_DelayDuration2(t *testing.T) { + opts := &Options{Delay: 1} + assert.Equal(t, time.Second, opts.DelayDuration()) +} + +func TestOptions_Merge(t *testing.T) { + opts := &Options{} + + opts.Merge(&Options{ + Pipeline: "pipeline", + Delay: 2, + }) + + assert.Equal(t, "pipeline", opts.Pipeline) + assert.Equal(t, int64(2), opts.Delay) +} + +func TestOptions_MergeKeepOriginal(t *testing.T) { + opts := &Options{ + Pipeline: "default", + Delay: 10, + } + + opts.Merge(&Options{ + Pipeline: "pipeline", + Delay: 2, + }) + + assert.Equal(t, "default", opts.Pipeline) + assert.Equal(t, int64(10), opts.Delay) +} diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go index 5e62c5c5..a0b477f9 100644 --- a/plugins/jobs/plugin.go +++ b/plugins/jobs/plugin.go @@ -178,7 +178,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit }) var err error - p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: "jobs"}) + p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: RrModeJobs}) if err != nil { errCh <- err return errCh -- cgit v1.2.3 From c7d9385f135853539100430521042f7e7e2ae005 Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Mon, 30 Aug 2021 21:32:50 +0300 Subject: Tests for the boltdb jobs. Fix issue with Stop in the jobs plugin which didn't destroy the pool. Signed-off-by: Valery Piashchynski --- plugins/amqp/amqpjobs/consumer.go | 4 +- plugins/boltdb/boltjobs/consumer.go | 200 ++++++++++++++++++++++++++++++------ plugins/boltdb/boltjobs/item.go | 157 +++++++++++++++++++++++++++- plugins/boltdb/boltjobs/listener.go | 144 ++++++++++++++++++++++++-- plugins/boltdb/doc/boltjobs.drawio | 1 + plugins/boltdb/doc/job_lifecycle.md | 10 ++ plugins/ephemeral/consumer.go | 119 ++++++++++----------- plugins/jobs/plugin.go | 12 ++- plugins/sqs/consumer.go | 84 +++++++-------- plugins/sqs/item.go | 8 +- plugins/sqs/listener.go | 36 +++---- 11 files changed, 607 insertions(+), 168 deletions(-) create mode 100644 plugins/boltdb/doc/boltjobs.drawio create mode 100644 plugins/boltdb/doc/job_lifecycle.md (limited to 'plugins') diff --git a/plugins/amqp/amqpjobs/consumer.go b/plugins/amqp/amqpjobs/consumer.go index f1b4d54f..578f36ce 100644 --- a/plugins/amqp/amqpjobs/consumer.go +++ b/plugins/amqp/amqpjobs/consumer.go @@ -420,7 +420,9 @@ func (c *consumer) Resume(_ context.Context, p string) { } func (c *consumer) Stop(context.Context) error { - c.stopCh <- struct{}{} + if atomic.LoadUint32(&c.listeners) > 0 { + c.stopCh <- struct{}{} + } pipe := c.pipeline.Load().(*pipeline.Pipeline) c.eh.Push(events.JobEvent{ diff --git a/plugins/boltdb/boltjobs/consumer.go b/plugins/boltdb/boltjobs/consumer.go index 67a6d3e7..2492ab60 100644 --- a/plugins/boltdb/boltjobs/consumer.go +++ b/plugins/boltdb/boltjobs/consumer.go @@ -5,10 +5,10 @@ import ( "context" "encoding/gob" "os" + "sync" "sync/atomic" "time" - "github.com/google/uuid" "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/pkg/events" priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" @@ -22,11 +22,12 @@ import ( ) const ( - PluginName = "boltdb" + PluginName string = "boltdb" + rrDB string = "rr.db" - PushBucket = "push" - InQueueBucket = "processing" - DoneBucket = "done" + PushBucket string = "push" + InQueueBucket string = "processing" + DelayBucket string = "delayed" ) type consumer struct { @@ -37,11 +38,16 @@ type consumer struct { db *bolt.DB - log logger.Logger - eh events.Handler - pq priorityqueue.Queue + bPool sync.Pool + log logger.Logger + eh events.Handler + pq priorityqueue.Queue + pipeline atomic.Value + cond *sync.Cond + listeners uint32 - pipeline atomic.Value + active *uint64 + delayed *uint64 stopCh chan struct{} } @@ -90,20 +96,36 @@ func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e // tx.Commit invokes via the db.Update err = db.Update(func(tx *bolt.Tx) error { const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DelayBucket)) if err != nil { return errors.E(op, upOp) } - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) + + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) if err != nil { return errors.E(op, upOp) } - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DoneBucket)) + + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) if err != nil { return errors.E(op, upOp) } + + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + cursor := inQb.Cursor() + + pushB := tx.Bucket(utils.AsBytes(PushBucket)) + + // get all items, which are in the InQueueBucket and put them into the PushBucket + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + err = pushB.Put(k, v) + if err != nil { + return errors.E(op, err) + } + } return nil }) + if err != nil { return nil, errors.E(op, err) } @@ -114,11 +136,19 @@ func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e priority: localCfg.Priority, prefetch: localCfg.Prefetch, + bPool: sync.Pool{New: func() interface{} { + return new(bytes.Buffer) + }}, + cond: sync.NewCond(&sync.Mutex{}), + + delayed: utils.Uint64(0), + active: utils.Uint64(0), + db: db, log: log, eh: e, pq: pq, - stopCh: make(chan struct{}, 1), + stopCh: make(chan struct{}, 2), }, nil } @@ -139,7 +169,7 @@ func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Con // add default values conf.InitDefaults() - db, err := bolt.Open(pipeline.String(file, "rr.db"), os.FileMode(conf.Permissions), &bolt.Options{ + db, err := bolt.Open(pipeline.String(file, rrDB), os.FileMode(conf.Permissions), &bolt.Options{ Timeout: time.Second * 20, NoGrowSync: false, NoFreelistSync: false, @@ -155,18 +185,34 @@ func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Con // tx.Commit invokes via the db.Update err = db.Update(func(tx *bolt.Tx) error { const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DelayBucket)) if err != nil { return errors.E(op, upOp) } - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) + + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) if err != nil { return errors.E(op, upOp) } - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DoneBucket)) + + _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) if err != nil { return errors.E(op, upOp) } + + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + cursor := inQb.Cursor() + + pushB := tx.Bucket(utils.AsBytes(PushBucket)) + + // get all items, which are in the InQueueBucket and put them into the PushBucket + for k, v := cursor.First(); k != nil; k, v = cursor.Next() { + err = pushB.Put(k, v) + if err != nil { + return errors.E(op, err) + } + } + return nil }) @@ -175,31 +221,74 @@ func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Con } return &consumer{ - file: pipeline.String(file, "rr.db"), + file: pipeline.String(file, rrDB), priority: pipeline.Int(priority, 10), prefetch: pipeline.Int(prefetch, 100), permissions: conf.Permissions, + bPool: sync.Pool{New: func() interface{} { + return new(bytes.Buffer) + }}, + cond: sync.NewCond(&sync.Mutex{}), + + delayed: utils.Uint64(0), + active: utils.Uint64(0), + db: db, log: log, eh: e, pq: pq, - stopCh: make(chan struct{}, 1), + stopCh: make(chan struct{}, 2), }, nil } -func (c *consumer) Push(ctx context.Context, job *job.Job) error { +func (c *consumer) Push(_ context.Context, job *job.Job) error { const op = errors.Op("boltdb_jobs_push") err := c.db.Update(func(tx *bolt.Tx) error { + item := fromJob(job) + + // handle delay + if item.Options.Delay > 0 { + b := tx.Bucket(utils.AsBytes(DelayBucket)) + tKey := time.Now().Add(time.Second * time.Duration(item.Options.Delay)).Format(time.RFC3339) + + // pool with buffers + buf := c.get() + defer c.put(buf) + + enc := gob.NewEncoder(buf) + err := enc.Encode(item) + if err != nil { + return errors.E(op, err) + } + + value := make([]byte, buf.Len()) + copy(value, buf.Bytes()) + + atomic.AddUint64(c.delayed, 1) + + return b.Put(utils.AsBytes(tKey), value) + } + b := tx.Bucket(utils.AsBytes(PushBucket)) - buf := new(bytes.Buffer) + + // pool with buffers + buf := c.get() + defer c.put(buf) + enc := gob.NewEncoder(buf) - err := enc.Encode(job) + err := enc.Encode(item) if err != nil { - return err + return errors.E(op, err) } - return b.Put(utils.AsBytes(uuid.NewString()), buf.Bytes()) + value := make([]byte, buf.Len()) + copy(value, buf.Bytes()) + + // increment active counter + atomic.AddUint64(c.active, 1) + + return b.Put(utils.AsBytes(item.ID()), value) }) if err != nil { @@ -221,14 +310,41 @@ func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { if pipe.Name() != p.Name() { return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) } + + // run listener + go c.listener() + go c.delayedJobsListener() + + // increase number of listeners + atomic.AddUint32(&c.listeners, 1) + + c.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil } -func (c *consumer) Stop(ctx context.Context) error { +func (c *consumer) Stop(_ context.Context) error { + if atomic.LoadUint32(&c.listeners) > 0 { + c.stopCh <- struct{}{} + c.stopCh <- struct{}{} + } + + pipe := c.pipeline.Load().(*pipeline.Pipeline) + c.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) return nil } -func (c *consumer) Pause(ctx context.Context, p string) { +func (c *consumer) Pause(_ context.Context, p string) { pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { c.log.Error("no such pipeline", "requested pause on: ", p) @@ -241,6 +357,7 @@ func (c *consumer) Pause(ctx context.Context, p string) { return } + c.stopCh <- struct{}{} c.stopCh <- struct{}{} atomic.AddUint32(&c.listeners, ^uint32(0)) @@ -253,7 +370,7 @@ func (c *consumer) Pause(ctx context.Context, p string) { }) } -func (c *consumer) Resume(ctx context.Context, p string) { +func (c *consumer) Resume(_ context.Context, p string) { pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { c.log.Error("no such pipeline", "requested resume on: ", p) @@ -268,6 +385,7 @@ func (c *consumer) Resume(ctx context.Context, p string) { // run listener go c.listener() + go c.delayedJobsListener() // increase number of listeners atomic.AddUint32(&c.listeners, 1) @@ -280,6 +398,30 @@ func (c *consumer) Resume(ctx context.Context, p string) { }) } -func (c *consumer) State(ctx context.Context) (*jobState.State, error) { - return nil, nil +func (c *consumer) State(_ context.Context) (*jobState.State, error) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) + + return &jobState.State{ + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Queue: PushBucket, + Active: int64(atomic.LoadUint64(c.active)), + Delayed: int64(atomic.LoadUint64(c.delayed)), + Ready: toBool(atomic.LoadUint32(&c.listeners)), + }, nil +} + +// Private + +func (c *consumer) get() *bytes.Buffer { + return c.bPool.Get().(*bytes.Buffer) +} + +func (c *consumer) put(b *bytes.Buffer) { + b.Reset() + c.bPool.Put(b) +} + +func toBool(r uint32) bool { + return r > 0 } diff --git a/plugins/boltdb/boltjobs/item.go b/plugins/boltdb/boltjobs/item.go index 8a4aefa3..4f02bb43 100644 --- a/plugins/boltdb/boltjobs/item.go +++ b/plugins/boltdb/boltjobs/item.go @@ -1,8 +1,16 @@ package boltjobs import ( + "bytes" + "encoding/gob" + "sync/atomic" + "time" + json "github.com/json-iterator/go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" "github.com/spiral/roadrunner/v2/utils" + "go.etcd.io/bbolt" ) type Item struct { @@ -33,6 +41,12 @@ type Options struct { // Delay defines time duration to delay execution for. Defaults to none. Delay int64 `json:"delay,omitempty"` + + // private + db *bbolt.DB + + active *uint64 + delayed *uint64 } func (i *Item) ID() string { @@ -65,13 +79,150 @@ func (i *Item) Context() ([]byte, error) { } func (i *Item) Ack() error { - panic("implement me") + const op = errors.Op("boltdb_item_ack") + tx, err := i.Options.db.Begin(true) + if err != nil { + _ = tx.Rollback() + return errors.E(op, err) + } + + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + err = inQb.Delete(utils.AsBytes(i.ID())) + if err != nil { + _ = tx.Rollback() + return errors.E(op, err) + } + + if i.Options.Delay > 0 { + atomic.AddUint64(i.Options.delayed, ^uint64(0)) + } else { + atomic.AddUint64(i.Options.active, ^uint64(0)) + } + + return tx.Commit() } func (i *Item) Nack() error { - panic("implement me") + const op = errors.Op("boltdb_item_ack") + /* + steps: + 1. begin tx + 2. get item by ID from the InQueueBucket (previously put in the listener) + 3. put it back to the PushBucket + 4. Delete it from the InQueueBucket + */ + tx, err := i.Options.db.Begin(true) + if err != nil { + _ = tx.Rollback() + return errors.E(op, err) + } + + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + v := inQb.Get(utils.AsBytes(i.ID())) + + pushB := tx.Bucket(utils.AsBytes(PushBucket)) + + err = pushB.Put(utils.AsBytes(i.ID()), v) + if err != nil { + _ = tx.Rollback() + return errors.E(op, err) + } + + err = inQb.Delete(utils.AsBytes(i.ID())) + if err != nil { + _ = tx.Rollback() + return errors.E(op, err) + } + + return tx.Commit() } func (i *Item) Requeue(headers map[string][]string, delay int64) error { - panic("implement me") + const op = errors.Op("boltdb_item_requeue") + i.Headers = headers + i.Options.Delay = delay + + tx, err := i.Options.db.Begin(true) + if err != nil { + return errors.E(op, err) + } + + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + err = inQb.Delete(utils.AsBytes(i.ID())) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + if delay > 0 { + delayB := tx.Bucket(utils.AsBytes(DelayBucket)) + tKey := time.Now().Add(time.Second * time.Duration(delay)).Format(time.RFC3339) + + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err = enc.Encode(i) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + err = delayB.Put(utils.AsBytes(tKey), buf.Bytes()) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + err = inQb.Delete(utils.AsBytes(i.ID())) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + return tx.Commit() + } + + pushB := tx.Bucket(utils.AsBytes(PushBucket)) + + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err = enc.Encode(i) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + err = pushB.Put(utils.AsBytes(i.ID()), buf.Bytes()) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + err = inQb.Delete(utils.AsBytes(i.ID())) + if err != nil { + return errors.E(op, i.rollback(err, tx)) + } + + return tx.Commit() +} + +func (i *Item) attachDB(db *bbolt.DB, active, delayed *uint64) { + i.Options.db = db + i.Options.active = active + i.Options.delayed = delayed +} + +func (i *Item) rollback(err error, tx *bbolt.Tx) error { + errR := tx.Rollback() + if errR != nil { + return errors.Errorf("transaction commit error: %v, rollback failed: %v", err, errR) + } + return errors.Errorf("transaction commit error: %v", err) +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } } diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go index 2ee06088..d184303a 100644 --- a/plugins/boltdb/boltjobs/listener.go +++ b/plugins/boltdb/boltjobs/listener.go @@ -1,34 +1,160 @@ package boltjobs import ( - "fmt" + "bytes" + "encoding/gob" + "sync/atomic" "time" "github.com/spiral/roadrunner/v2/utils" + bolt "go.etcd.io/bbolt" ) func (c *consumer) listener() { - tt := time.NewTicker(time.Second) + tt := time.NewTicker(time.Millisecond * 10) + defer tt.Stop() for { select { case <-c.stopCh: c.log.Warn("boltdb listener stopped") return case <-tt.C: - tx, err := c.db.Begin(false) + if atomic.LoadUint64(c.active) >= uint64(c.prefetch) { + time.Sleep(time.Second) + continue + } + + tx, err := c.db.Begin(true) if err != nil { - panic(err) + c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err) + continue } b := tx.Bucket(utils.AsBytes(PushBucket)) + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + + // get first item + k, v := b.Cursor().First() + if k == nil && v == nil { + _ = tx.Commit() + continue + } + + buf := bytes.NewReader(v) + dec := gob.NewDecoder(buf) + + item := &Item{} + err = dec.Decode(item) + if err != nil { + c.rollback(err, tx) + continue + } - cursor := b.Cursor() + err = inQb.Put(utils.AsBytes(item.ID()), v) + if err != nil { + c.rollback(err, tx) + continue + } - k, v := cursor.First() - _ = k - _ = v + // delete key from the PushBucket + err = b.Delete(k) + if err != nil { + c.rollback(err, tx) + continue + } - fmt.Println("foo") + err = tx.Commit() + if err != nil { + c.rollback(err, tx) + continue + } + + // attach pointer to the DB + item.attachDB(c.db, c.active, c.delayed) + // as the last step, after commit, put the item into the PQ + c.pq.Insert(item) } } } + +func (c *consumer) delayedJobsListener() { + tt := time.NewTicker(time.Millisecond * 100) + defer tt.Stop() + for { + select { + case <-c.stopCh: + c.log.Warn("boltdb listener stopped") + return + case <-tt.C: + tx, err := c.db.Begin(true) + if err != nil { + c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err) + continue + } + + delayB := tx.Bucket(utils.AsBytes(DelayBucket)) + inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) + + // get first item + k, v := delayB.Cursor().First() + if k == nil && v == nil { + _ = tx.Commit() + continue + } + + t, err := time.Parse(time.RFC3339, utils.AsString(k)) + if err != nil { + c.rollback(err, tx) + continue + } + + if t.After(time.Now()) { + _ = tx.Commit() + continue + } + + buf := bytes.NewReader(v) + dec := gob.NewDecoder(buf) + + item := &Item{} + err = dec.Decode(item) + if err != nil { + c.rollback(err, tx) + continue + } + + err = inQb.Put(utils.AsBytes(item.ID()), v) + if err != nil { + c.rollback(err, tx) + continue + } + + // delete key from the PushBucket + err = delayB.Delete(k) + if err != nil { + c.rollback(err, tx) + continue + } + + err = tx.Commit() + if err != nil { + c.rollback(err, tx) + continue + } + + // attach pointer to the DB + item.attachDB(c.db, c.active, c.delayed) + // as the last step, after commit, put the item into the PQ + c.pq.Insert(item) + } + } +} + +func (c *consumer) rollback(err error, tx *bolt.Tx) { + errR := tx.Rollback() + if errR != nil { + c.log.Error("transaction commit error, rollback failed", "error", err, "rollback error", errR) + } + + c.log.Error("transaction commit error, rollback succeed", "error", err) +} diff --git a/plugins/boltdb/doc/boltjobs.drawio b/plugins/boltdb/doc/boltjobs.drawio new file mode 100644 index 00000000..feeccae0 --- /dev/null +++ b/plugins/boltdb/doc/boltjobs.drawio @@ -0,0 +1 @@ +ddHBDsIgDADQr+GOEPcDc+rF0w6eyahAwtaFsWz69W4ZiGR6orwWSgrhZTtfnOj1DSVYwqicCT8Rxg7HoliWVZ5BKAuinJHBEtTmBbEw6GgkDFmhR7Te9Dk22HXQ+MyEczjlZQ+0eddeKNhB3Qi717uRXgctKE2JKxilY2sWM62I1QEGLSROX8QrwkuH6LeonUuw6/jiYLZz5z/Zz8scdP7HgSVIdy+b7I949QY= \ No newline at end of file diff --git a/plugins/boltdb/doc/job_lifecycle.md b/plugins/boltdb/doc/job_lifecycle.md new file mode 100644 index 00000000..317aec90 --- /dev/null +++ b/plugins/boltdb/doc/job_lifecycle.md @@ -0,0 +1,10 @@ +### Job lifecycle + +There are several boltdb buckets: + +1. `PushBucket` - used for pushed jobs via RPC. +2. `InQueueBucket` - when the job consumed from the `PushBucket`, in the same transaction, it copied into the priority queue and +get into the `InQueueBucket` waiting to acknowledgement. +3. `DelayBucket` - used for delayed jobs. RFC3339 used as a timestamp to track delay expiration. + +`` diff --git a/plugins/ephemeral/consumer.go b/plugins/ephemeral/consumer.go index 91b8eda9..8870bb0f 100644 --- a/plugins/ephemeral/consumer.go +++ b/plugins/ephemeral/consumer.go @@ -88,16 +88,16 @@ func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Hand return jb, nil } -func (j *consumer) Push(ctx context.Context, jb *job.Job) error { +func (c *consumer) Push(ctx context.Context, jb *job.Job) error { const op = errors.Op("ephemeral_push") // check if the pipeline registered - _, ok := j.pipeline.Load().(*pipeline.Pipeline) + _, ok := c.pipeline.Load().(*pipeline.Pipeline) if !ok { return errors.E(op, errors.Errorf("no such pipeline: %s", jb.Options.Pipeline)) } - err := j.handleItem(ctx, fromJob(jb)) + err := c.handleItem(ctx, fromJob(jb)) if err != nil { return errors.E(op, err) } @@ -105,42 +105,42 @@ func (j *consumer) Push(ctx context.Context, jb *job.Job) error { return nil } -func (j *consumer) State(_ context.Context) (*jobState.State, error) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) +func (c *consumer) State(_ context.Context) (*jobState.State, error) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) return &jobState.State{ Pipeline: pipe.Name(), Driver: pipe.Driver(), Queue: pipe.Name(), - Active: atomic.LoadInt64(j.active), - Delayed: atomic.LoadInt64(j.delayed), - Ready: ready(atomic.LoadUint32(&j.listeners)), + Active: atomic.LoadInt64(c.active), + Delayed: atomic.LoadInt64(c.delayed), + Ready: ready(atomic.LoadUint32(&c.listeners)), }, nil } -func (j *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { - j.pipeline.Store(pipeline) +func (c *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { + c.pipeline.Store(pipeline) return nil } -func (j *consumer) Pause(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) +func (c *consumer) Pause(_ context.Context, p string) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { - j.log.Error("no such pipeline", "requested pause on: ", p) + c.log.Error("no such pipeline", "requested pause on: ", p) } - l := atomic.LoadUint32(&j.listeners) + l := atomic.LoadUint32(&c.listeners) // no active listeners if l == 0 { - j.log.Warn("no active listeners, nothing to pause") + c.log.Warn("no active listeners, nothing to pause") return } - atomic.AddUint32(&j.listeners, ^uint32(0)) + atomic.AddUint32(&c.listeners, ^uint32(0)) // stop the consumer - j.stopCh <- struct{}{} + c.stopCh <- struct{}{} - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipePaused, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -149,24 +149,24 @@ func (j *consumer) Pause(_ context.Context, p string) { }) } -func (j *consumer) Resume(_ context.Context, p string) { - pipe := j.pipeline.Load().(*pipeline.Pipeline) +func (c *consumer) Resume(_ context.Context, p string) { + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { - j.log.Error("no such pipeline", "requested resume on: ", p) + c.log.Error("no such pipeline", "requested resume on: ", p) } - l := atomic.LoadUint32(&j.listeners) + l := atomic.LoadUint32(&c.listeners) // listener already active if l == 1 { - j.log.Warn("listener already in the active state") + c.log.Warn("listener already in the active state") return } // resume the consumer on the same channel - j.consume() + c.consume() - atomic.StoreUint32(&j.listeners, 1) - j.eh.Push(events.JobEvent{ + atomic.StoreUint32(&c.listeners, 1) + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Pipeline: pipe.Name(), Start: time.Now(), @@ -175,8 +175,8 @@ func (j *consumer) Resume(_ context.Context, p string) { } // Run is no-op for the ephemeral -func (j *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { - j.eh.Push(events.JobEvent{ +func (c *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -185,84 +185,79 @@ func (j *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { return nil } -func (j *consumer) Stop(ctx context.Context) error { - const op = errors.Op("ephemeral_plugin_stop") +func (c *consumer) Stop(_ context.Context) error { + pipe := c.pipeline.Load().(*pipeline.Pipeline) - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - select { - // return from the consumer - case j.stopCh <- struct{}{}: - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Pipeline: pipe.Name(), - Start: time.Now(), - Elapsed: 0, - }) + if atomic.LoadUint32(&c.listeners) > 0 { + c.stopCh <- struct{}{} + } - return nil + c.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Pipeline: pipe.Name(), + Start: time.Now(), + Elapsed: 0, + }) - case <-ctx.Done(): - return errors.E(op, ctx.Err()) - } + return nil } -func (j *consumer) handleItem(ctx context.Context, msg *Item) error { +func (c *consumer) handleItem(ctx context.Context, msg *Item) error { const op = errors.Op("ephemeral_handle_request") // handle timeouts // theoretically, some bad user may send millions requests with a delay and produce a billion (for example) // goroutines here. We should limit goroutines here. if msg.Options.Delay > 0 { // if we have 1000 goroutines waiting on the delay - reject 1001 - if atomic.LoadUint64(&j.goroutines) >= goroutinesMax { + if atomic.LoadUint64(&c.goroutines) >= goroutinesMax { return errors.E(op, errors.Str("max concurrency number reached")) } go func(jj *Item) { - atomic.AddUint64(&j.goroutines, 1) - atomic.AddInt64(j.delayed, 1) + atomic.AddUint64(&c.goroutines, 1) + atomic.AddInt64(c.delayed, 1) time.Sleep(jj.Options.DelayDuration()) // send the item after timeout expired - j.localPrefetch <- jj + c.localPrefetch <- jj - atomic.AddUint64(&j.goroutines, ^uint64(0)) + atomic.AddUint64(&c.goroutines, ^uint64(0)) }(msg) return nil } // increase number of the active jobs - atomic.AddInt64(j.active, 1) + atomic.AddInt64(c.active, 1) // insert to the local, limited pipeline select { - case j.localPrefetch <- msg: + case c.localPrefetch <- msg: return nil case <-ctx.Done(): - return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", j.cfg.Prefetch, ctx.Err())) + return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", c.cfg.Prefetch, ctx.Err())) } } -func (j *consumer) consume() { +func (c *consumer) consume() { go func() { // redirect for { select { - case item, ok := <-j.localPrefetch: + case item, ok := <-c.localPrefetch: if !ok { - j.log.Warn("ephemeral local prefetch queue was closed") + c.log.Warn("ephemeral local prefetch queue was closed") return } // set requeue channel - item.Options.requeueFn = j.handleItem - item.Options.active = j.active - item.Options.delayed = j.delayed + item.Options.requeueFn = c.handleItem + item.Options.active = c.active + item.Options.delayed = c.delayed - j.pq.Insert(item) - case <-j.stopCh: + c.pq.Insert(item) + case <-c.stopCh: return } } diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go index a0b477f9..236aded3 100644 --- a/plugins/jobs/plugin.go +++ b/plugins/jobs/plugin.go @@ -177,6 +177,11 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit return true }) + // do not continue processing, immediately stop if channel contains an error + if len(errCh) > 0 { + return errCh + } + var err error p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: RrModeJobs}) if err != nil { @@ -279,6 +284,8 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit Start: start, Elapsed: time.Since(start), }) + + continue } // handle the response protocol @@ -330,6 +337,10 @@ func (p *Plugin) Stop() error { cancel() } + p.Lock() + p.workersPool.Destroy(context.Background()) + p.Unlock() + // this function can block forever, but we don't care, because we might have a chance to exit from the pollers, // but if not, this is not a problem at all. // The main target is to stop the drivers @@ -342,7 +353,6 @@ func (p *Plugin) Stop() error { // just wait pollers for 5 seconds before exit time.Sleep(time.Second * 5) - return nil } diff --git a/plugins/sqs/consumer.go b/plugins/sqs/consumer.go index 23203190..dfbda154 100644 --- a/plugins/sqs/consumer.go +++ b/plugins/sqs/consumer.go @@ -227,12 +227,12 @@ func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Conf return jb, nil } -func (j *consumer) Push(ctx context.Context, jb *job.Job) error { +func (c *consumer) Push(ctx context.Context, jb *job.Job) error { const op = errors.Op("sqs_push") // check if the pipeline registered // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != jb.Options.Pipeline { return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) } @@ -243,17 +243,17 @@ func (j *consumer) Push(ctx context.Context, jb *job.Job) error { return errors.E(op, errors.Errorf("unable to push, maximum possible delay is 900 seconds (15 minutes), provided: %d", jb.Options.Delay)) } - err := j.handleItem(ctx, fromJob(jb)) + err := c.handleItem(ctx, fromJob(jb)) if err != nil { return errors.E(op, err) } return nil } -func (j *consumer) State(ctx context.Context) (*jobState.State, error) { +func (c *consumer) State(ctx context.Context) (*jobState.State, error) { const op = errors.Op("sqs_state") - attr, err := j.client.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{ - QueueUrl: j.queueURL, + attr, err := c.client.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{ + QueueUrl: c.queueURL, AttributeNames: []types.QueueAttributeName{ types.QueueAttributeNameApproximateNumberOfMessages, types.QueueAttributeNameApproximateNumberOfMessagesDelayed, @@ -265,13 +265,13 @@ func (j *consumer) State(ctx context.Context) (*jobState.State, error) { return nil, errors.E(op, err) } - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) out := &jobState.State{ Pipeline: pipe.Name(), Driver: pipe.Driver(), - Queue: *j.queueURL, - Ready: ready(atomic.LoadUint32(&j.listeners)), + Queue: *c.queueURL, + Ready: ready(atomic.LoadUint32(&c.listeners)), } nom, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessages)]) @@ -292,28 +292,28 @@ func (j *consumer) State(ctx context.Context) (*jobState.State, error) { return out, nil } -func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - j.pipeline.Store(p) +func (c *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { + c.pipeline.Store(p) return nil } -func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { +func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { const op = errors.Op("sqs_run") - j.Lock() - defer j.Unlock() + c.Lock() + defer c.Unlock() - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p.Name() { return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) } - atomic.AddUint32(&j.listeners, 1) + atomic.AddUint32(&c.listeners, 1) // start listener - go j.listen(context.Background()) + go c.listen(context.Background()) - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -323,11 +323,13 @@ func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { return nil } -func (j *consumer) Stop(context.Context) error { - j.pauseCh <- struct{}{} +func (c *consumer) Stop(context.Context) error { + if atomic.LoadUint32(&c.listeners) > 0 { + c.pauseCh <- struct{}{} + } - pipe := j.pipeline.Load().(*pipeline.Pipeline) - j.eh.Push(events.JobEvent{ + pipe := c.pipeline.Load().(*pipeline.Pipeline) + c.eh.Push(events.JobEvent{ Event: events.EventPipeStopped, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -336,27 +338,27 @@ func (j *consumer) Stop(context.Context) error { return nil } -func (j *consumer) Pause(_ context.Context, p string) { +func (c *consumer) Pause(_ context.Context, p string) { // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + c.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) return } - l := atomic.LoadUint32(&j.listeners) + l := atomic.LoadUint32(&c.listeners) // no active listeners if l == 0 { - j.log.Warn("no active listeners, nothing to pause") + c.log.Warn("no active listeners, nothing to pause") return } - atomic.AddUint32(&j.listeners, ^uint32(0)) + atomic.AddUint32(&c.listeners, ^uint32(0)) // stop consume - j.pauseCh <- struct{}{} + c.pauseCh <- struct{}{} - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipePaused, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -364,28 +366,28 @@ func (j *consumer) Pause(_ context.Context, p string) { }) } -func (j *consumer) Resume(_ context.Context, p string) { +func (c *consumer) Resume(_ context.Context, p string) { // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) + pipe := c.pipeline.Load().(*pipeline.Pipeline) if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + c.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) return } - l := atomic.LoadUint32(&j.listeners) + l := atomic.LoadUint32(&c.listeners) // no active listeners if l == 1 { - j.log.Warn("sqs listener already in the active state") + c.log.Warn("sqs listener already in the active state") return } // start listener - go j.listen(context.Background()) + go c.listen(context.Background()) // increase num of listeners - atomic.AddUint32(&j.listeners, 1) + atomic.AddUint32(&c.listeners, 1) - j.eh.Push(events.JobEvent{ + c.eh.Push(events.JobEvent{ Event: events.EventPipeActive, Driver: pipe.Driver(), Pipeline: pipe.Name(), @@ -393,12 +395,12 @@ func (j *consumer) Resume(_ context.Context, p string) { }) } -func (j *consumer) handleItem(ctx context.Context, msg *Item) error { - d, err := msg.pack(j.queueURL) +func (c *consumer) handleItem(ctx context.Context, msg *Item) error { + d, err := msg.pack(c.queueURL) if err != nil { return err } - _, err = j.client.SendMessage(ctx, d) + _, err = c.client.SendMessage(ctx, d) if err != nil { return err } diff --git a/plugins/sqs/item.go b/plugins/sqs/item.go index 996adf6c..4e33e99e 100644 --- a/plugins/sqs/item.go +++ b/plugins/sqs/item.go @@ -192,7 +192,7 @@ func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) { }, nil } -func (j *consumer) unpack(msg *types.Message) (*Item, error) { +func (c *consumer) unpack(msg *types.Message) (*Item, error) { const op = errors.Op("sqs_unpack") // reserved if _, ok := msg.Attributes[ApproximateReceiveCount]; !ok { @@ -236,10 +236,10 @@ func (j *consumer) unpack(msg *types.Message) (*Item, error) { // private approxReceiveCount: int64(recCount), - client: j.client, - queue: j.queueURL, + client: c.client, + queue: c.queueURL, receiptHandler: msg.ReceiptHandle, - requeueFn: j.handleItem, + requeueFn: c.handleItem, }, } diff --git a/plugins/sqs/listener.go b/plugins/sqs/listener.go index a4280af2..215dd6a5 100644 --- a/plugins/sqs/listener.go +++ b/plugins/sqs/listener.go @@ -18,22 +18,22 @@ const ( NonExistentQueue string = "AWS.SimpleQueueService.NonExistentQueue" ) -func (j *consumer) listen(ctx context.Context) { //nolint:gocognit +func (c *consumer) listen(ctx context.Context) { //nolint:gocognit for { select { - case <-j.pauseCh: - j.log.Warn("sqs listener stopped") + case <-c.pauseCh: + c.log.Warn("sqs listener stopped") return default: - message, err := j.client.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ - QueueUrl: j.queueURL, - MaxNumberOfMessages: j.prefetch, + message, err := c.client.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ + QueueUrl: c.queueURL, + MaxNumberOfMessages: c.prefetch, AttributeNames: []types.QueueAttributeName{types.QueueAttributeName(ApproximateReceiveCount)}, MessageAttributeNames: []string{All}, // The new value for the message's visibility timeout (in seconds). Values range: 0 // to 43200. Maximum: 12 hours. - VisibilityTimeout: j.visibilityTimeout, - WaitTimeSeconds: j.waitTime, + VisibilityTimeout: c.visibilityTimeout, + WaitTimeSeconds: c.waitTime, }) if err != nil { @@ -42,10 +42,10 @@ func (j *consumer) listen(ctx context.Context) { //nolint:gocognit if apiErr, ok := rErr.Err.(*smithy.GenericAPIError); ok { // in case of NonExistentQueue - recreate the queue if apiErr.Code == NonExistentQueue { - j.log.Error("receive message", "error code", apiErr.ErrorCode(), "message", apiErr.ErrorMessage(), "error fault", apiErr.ErrorFault()) - _, err = j.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: j.queue, Attributes: j.attributes, Tags: j.tags}) + c.log.Error("receive message", "error code", apiErr.ErrorCode(), "message", apiErr.ErrorMessage(), "error fault", apiErr.ErrorFault()) + _, err = c.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: c.queue, Attributes: c.attributes, Tags: c.tags}) if err != nil { - j.log.Error("create queue", "error", err) + c.log.Error("create queue", "error", err) } // To successfully create a new queue, you must provide a // queue name that adheres to the limits related to the queues @@ -60,27 +60,27 @@ func (j *consumer) listen(ctx context.Context) { //nolint:gocognit } } - j.log.Error("receive message", "error", err) + c.log.Error("receive message", "error", err) continue } for i := 0; i < len(message.Messages); i++ { m := message.Messages[i] - item, err := j.unpack(&m) + item, err := c.unpack(&m) if err != nil { - _, errD := j.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ - QueueUrl: j.queueURL, + _, errD := c.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: c.queueURL, ReceiptHandle: m.ReceiptHandle, }) if errD != nil { - j.log.Error("message unpack, failed to delete the message from the queue", "error", err) + c.log.Error("message unpack, failed to delete the message from the queue", "error", err) } - j.log.Error("message unpack", "error", err) + c.log.Error("message unpack", "error", err) continue } - j.pq.Insert(item) + c.pq.Insert(item) } } } -- cgit v1.2.3 From 0f5f9517f9b5bb79e265bbf7d9ee8ce4633cf9b4 Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Mon, 30 Aug 2021 22:21:48 +0300 Subject: Add error to the EventJobError event Signed-off-by: Valery Piashchynski --- plugins/jobs/plugin.go | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'plugins') diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go index 236aded3..91a77446 100644 --- a/plugins/jobs/plugin.go +++ b/plugins/jobs/plugin.go @@ -224,6 +224,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit if err != nil { p.events.Push(events.JobEvent{ Event: events.EventJobError, + Error: err, ID: jb.ID(), Start: start, Elapsed: time.Since(start), @@ -248,6 +249,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit p.events.Push(events.JobEvent{ Event: events.EventJobError, ID: jb.ID(), + Error: err, Start: start, Elapsed: time.Since(start), }) @@ -271,6 +273,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit p.events.Push(events.JobEvent{ Event: events.EventJobError, ID: jb.ID(), + Error: err, Start: start, Elapsed: time.Since(start), }) @@ -295,6 +298,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit Event: events.EventJobError, ID: jb.ID(), Start: start, + Error: err, Elapsed: time.Since(start), }) p.putPayload(exec) -- cgit v1.2.3 From 0c10cb989fa1deae3996df272f40e2270a880b52 Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Tue, 31 Aug 2021 11:54:24 +0300 Subject: Finish boltdb tests Signed-off-by: Valery Piashchynski --- plugins/amqp/amqpjobs/consumer.go | 2 +- plugins/amqp/amqpjobs/redial.go | 4 +- plugins/boltdb/boltjobs/consumer.go | 41 ++++++++--------- plugins/boltdb/boltjobs/item.go | 45 +++++++++--------- plugins/boltdb/boltjobs/listener.go | 91 ++++++++++++++++++------------------- 5 files changed, 88 insertions(+), 95 deletions(-) (limited to 'plugins') diff --git a/plugins/amqp/amqpjobs/consumer.go b/plugins/amqp/amqpjobs/consumer.go index 578f36ce..784a102c 100644 --- a/plugins/amqp/amqpjobs/consumer.go +++ b/plugins/amqp/amqpjobs/consumer.go @@ -479,7 +479,7 @@ func (c *consumer) handleItem(ctx context.Context, msg *Item) error { err = pch.Publish(c.exchangeName, tmpQ, false, false, amqp.Publishing{ Headers: table, ContentType: contentType, - Timestamp: time.Now().UTC(), + Timestamp: time.Now(), DeliveryMode: amqp.Persistent, Body: msg.Body(), }) diff --git a/plugins/amqp/amqpjobs/redial.go b/plugins/amqp/amqpjobs/redial.go index 56142e2b..8d21784f 100644 --- a/plugins/amqp/amqpjobs/redial.go +++ b/plugins/amqp/amqpjobs/redial.go @@ -27,7 +27,7 @@ func (c *consumer) redialer() { //nolint:gocognit // trash the broken publishing channel <-c.publishChan - t := time.Now() + t := time.Now().UTC() pipe := c.pipeline.Load().(*pipeline.Pipeline) c.eh.Push(events.JobEvent{ @@ -35,7 +35,7 @@ func (c *consumer) redialer() { //nolint:gocognit Pipeline: pipe.Name(), Driver: pipe.Driver(), Error: err, - Start: time.Now(), + Start: time.Now().UTC(), }) expb := backoff.NewExponentialBackOff() diff --git a/plugins/boltdb/boltjobs/consumer.go b/plugins/boltdb/boltjobs/consumer.go index 2492ab60..ed0eda61 100644 --- a/plugins/boltdb/boltjobs/consumer.go +++ b/plugins/boltdb/boltjobs/consumer.go @@ -65,7 +65,6 @@ func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e } conf := &GlobalCfg{} - err := cfg.UnmarshalKey(PluginName, conf) if err != nil { return nil, errors.E(op, err) @@ -246,49 +245,45 @@ func (c *consumer) Push(_ context.Context, job *job.Job) error { const op = errors.Op("boltdb_jobs_push") err := c.db.Update(func(tx *bolt.Tx) error { item := fromJob(job) + // pool with buffers + buf := c.get() + // encode the job + enc := gob.NewEncoder(buf) + err := enc.Encode(item) + if err != nil { + c.put(buf) + return errors.E(op, err) + } + + value := make([]byte, buf.Len()) + copy(value, buf.Bytes()) + c.put(buf) // handle delay if item.Options.Delay > 0 { b := tx.Bucket(utils.AsBytes(DelayBucket)) - tKey := time.Now().Add(time.Second * time.Duration(item.Options.Delay)).Format(time.RFC3339) - - // pool with buffers - buf := c.get() - defer c.put(buf) + tKey := time.Now().UTC().Add(time.Second * time.Duration(item.Options.Delay)).Format(time.RFC3339) - enc := gob.NewEncoder(buf) - err := enc.Encode(item) + err = b.Put(utils.AsBytes(tKey), value) if err != nil { return errors.E(op, err) } - value := make([]byte, buf.Len()) - copy(value, buf.Bytes()) - atomic.AddUint64(c.delayed, 1) - return b.Put(utils.AsBytes(tKey), value) + return nil } b := tx.Bucket(utils.AsBytes(PushBucket)) - - // pool with buffers - buf := c.get() - defer c.put(buf) - - enc := gob.NewEncoder(buf) - err := enc.Encode(item) + err = b.Put(utils.AsBytes(item.ID()), value) if err != nil { return errors.E(op, err) } - value := make([]byte, buf.Len()) - copy(value, buf.Bytes()) - // increment active counter atomic.AddUint64(c.active, 1) - return b.Put(utils.AsBytes(item.ID()), value) + return nil }) if err != nil { diff --git a/plugins/boltdb/boltjobs/item.go b/plugins/boltdb/boltjobs/item.go index 4f02bb43..837f8c63 100644 --- a/plugins/boltdb/boltjobs/item.go +++ b/plugins/boltdb/boltjobs/item.go @@ -43,8 +43,7 @@ type Options struct { Delay int64 `json:"delay,omitempty"` // private - db *bbolt.DB - + db *bbolt.DB active *uint64 delayed *uint64 } @@ -137,6 +136,17 @@ func (i *Item) Nack() error { return tx.Commit() } +/* +Requeue algorithm: +1. Rewrite item headers and delay. +2. Begin writable transaction on attached to the item db. +3. Delete item from the InQueueBucket +4. Handle items with the delay: + 4.1. Get DelayBucket + 4.2. Make a key by adding the delay to the time.Now() in RFC3339 format + 4.3. Put this key with value to the DelayBucket +5. W/o delay, put the key with value to the PushBucket (requeue) +*/ func (i *Item) Requeue(headers map[string][]string, delay int64) error { const op = errors.Op("boltdb_item_requeue") i.Headers = headers @@ -153,23 +163,23 @@ func (i *Item) Requeue(headers map[string][]string, delay int64) error { return errors.E(op, i.rollback(err, tx)) } + // encode the item + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err = enc.Encode(i) + val := make([]byte, buf.Len()) + copy(val, buf.Bytes()) + buf.Reset() + if delay > 0 { delayB := tx.Bucket(utils.AsBytes(DelayBucket)) - tKey := time.Now().Add(time.Second * time.Duration(delay)).Format(time.RFC3339) - - buf := new(bytes.Buffer) - enc := gob.NewEncoder(buf) - err = enc.Encode(i) - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } + tKey := time.Now().UTC().Add(time.Second * time.Duration(delay)).Format(time.RFC3339) - err = delayB.Put(utils.AsBytes(tKey), buf.Bytes()) if err != nil { return errors.E(op, i.rollback(err, tx)) } - err = inQb.Delete(utils.AsBytes(i.ID())) + err = delayB.Put(utils.AsBytes(tKey), val) if err != nil { return errors.E(op, i.rollback(err, tx)) } @@ -178,20 +188,11 @@ func (i *Item) Requeue(headers map[string][]string, delay int64) error { } pushB := tx.Bucket(utils.AsBytes(PushBucket)) - - buf := new(bytes.Buffer) - enc := gob.NewEncoder(buf) - err = enc.Encode(i) if err != nil { return errors.E(op, i.rollback(err, tx)) } - err = pushB.Put(utils.AsBytes(i.ID()), buf.Bytes()) - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } - - err = inQb.Delete(utils.AsBytes(i.ID())) + err = pushB.Put(utils.AsBytes(i.ID()), val) if err != nil { return errors.E(op, i.rollback(err, tx)) } diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go index d184303a..39de34ab 100644 --- a/plugins/boltdb/boltjobs/listener.go +++ b/plugins/boltdb/boltjobs/listener.go @@ -16,7 +16,7 @@ func (c *consumer) listener() { for { select { case <-c.stopCh: - c.log.Warn("boltdb listener stopped") + c.log.Info("boltdb listener stopped") return case <-tt.C: if atomic.LoadUint64(c.active) >= uint64(c.prefetch) { @@ -78,12 +78,22 @@ func (c *consumer) listener() { } func (c *consumer) delayedJobsListener() { - tt := time.NewTicker(time.Millisecond * 100) + tt := time.NewTicker(time.Millisecond * 10) defer tt.Stop() + + // just some 90's + loc, err := time.LoadLocation("UTC") + if err != nil { + c.log.Error("failed to load location, delayed jobs won't work", "error", err) + return + } + + var startDate = utils.AsBytes(time.Date(1990, 1, 1, 0, 0, 0, 0, loc).Format(time.RFC3339)) + for { select { case <-c.stopCh: - c.log.Warn("boltdb listener stopped") + c.log.Info("boltdb listener stopped") return case <-tt.C: tx, err := c.db.Begin(true) @@ -95,45 +105,37 @@ func (c *consumer) delayedJobsListener() { delayB := tx.Bucket(utils.AsBytes(DelayBucket)) inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - // get first item - k, v := delayB.Cursor().First() - if k == nil && v == nil { - _ = tx.Commit() - continue - } - - t, err := time.Parse(time.RFC3339, utils.AsString(k)) - if err != nil { - c.rollback(err, tx) - continue - } - - if t.After(time.Now()) { - _ = tx.Commit() - continue - } - - buf := bytes.NewReader(v) - dec := gob.NewDecoder(buf) - - item := &Item{} - err = dec.Decode(item) - if err != nil { - c.rollback(err, tx) - continue - } - - err = inQb.Put(utils.AsBytes(item.ID()), v) - if err != nil { - c.rollback(err, tx) - continue - } - - // delete key from the PushBucket - err = delayB.Delete(k) - if err != nil { - c.rollback(err, tx) - continue + cursor := delayB.Cursor() + endDate := utils.AsBytes(time.Now().UTC().Format(time.RFC3339)) + + for k, v := cursor.Seek(startDate); k != nil && bytes.Compare(k, endDate) <= 0; k, v = cursor.Next() { + buf := bytes.NewReader(v) + dec := gob.NewDecoder(buf) + + item := &Item{} + err = dec.Decode(item) + if err != nil { + c.rollback(err, tx) + continue + } + + err = inQb.Put(utils.AsBytes(item.ID()), v) + if err != nil { + c.rollback(err, tx) + continue + } + + // delete key from the PushBucket + err = delayB.Delete(k) + if err != nil { + c.rollback(err, tx) + continue + } + + // attach pointer to the DB + item.attachDB(c.db, c.active, c.delayed) + // as the last step, after commit, put the item into the PQ + c.pq.Insert(item) } err = tx.Commit() @@ -141,11 +143,6 @@ func (c *consumer) delayedJobsListener() { c.rollback(err, tx) continue } - - // attach pointer to the DB - item.attachDB(c.db, c.active, c.delayed) - // as the last step, after commit, put the item into the PQ - c.pq.Insert(item) } } } -- cgit v1.2.3 From 3a187237282444f70b1eae8881f08cb6f0e068fc Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Tue, 31 Aug 2021 12:34:19 +0300 Subject: Update arch diagramm Signed-off-by: Valery Piashchynski --- plugins/boltdb/doc/boltjobs.drawio | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'plugins') diff --git a/plugins/boltdb/doc/boltjobs.drawio b/plugins/boltdb/doc/boltjobs.drawio index feeccae0..7d1f3531 100644 --- a/plugins/boltdb/doc/boltjobs.drawio +++ b/plugins/boltdb/doc/boltjobs.drawio @@ -1 +1 @@ -ddHBDsIgDADQr+GOEPcDc+rF0w6eyahAwtaFsWz69W4ZiGR6orwWSgrhZTtfnOj1DSVYwqicCT8Rxg7HoliWVZ5BKAuinJHBEtTmBbEw6GgkDFmhR7Te9Dk22HXQ+MyEczjlZQ+0eddeKNhB3Qi717uRXgctKE2JKxilY2sWM62I1QEGLSROX8QrwkuH6LeonUuw6/jiYLZz5z/Zz8scdP7HgSVIdy+b7I949QY= \ No newline at end of file +7V1bc5s4GP01nmkfkgHE9TF2km13up20me1uH2WQbRJsuYATe3/9ijtICiHmIrvBnWmNEAjrfNejT3QCZuv9Hz7crv7CDvImiuTsJ+B6oigKABr5J2o5JC0yUJWkZem7TtpWNNy7/6G0UUpbd66DgkrHEGMvdLfVRhtvNsgOK23Q9/FztdsCe9VRt3CJmIZ7G3ps6z+uE67SVl2SihOfkLtcZUMr2Zk1zHqnDcEKOvi51ARuJmDmYxwm39b7GfKi6csmJrnu9oWz+ZP5aBM2ucDcf0ZP8pX6w3iw/K8/fijw1/VFikYQHrJfjBwyAekh9sMVXuIN9G6K1qmPdxsHRXeVyFHR5wvGW9Iok8YHFIaHFE24CzFpWoVrLz1LHtg//Btdf6llhz/T28UH1/vK0SE9WuBNeAvXrhc1zGBgQ8eF5OlnRO7S0+mYMiDHyU+Lfs+LM5aJFPSXKKyZJjnHi4g6wmtEHotc5yMPhu5T9f4wFbll3i+/9A67ZGRFSvVDV1PROORiVb1F8lzpVWVoX7uRSt0owDvfRsyNyJfS7ymaYsl5gxRl2jqwGO3dsCRF5Ohn6UwhQ9HBoSxQpyR6CTKviV4DEVVbimhFJt4sAMl9n6C3S0e6+/v+E2n5fjdjRKMK/PPKDdH9FsaT8EzcSBXkN078wvW8GfawHw8F5tA2nag9CH38iEpnFKCqmpND9YT8EO3rwWInN71AM6vqp2R6/FxyGZkbWJW8Ba2mneFhiNRHuaSNhW7y9VGcXoGGeqWJ1CvzXHAs7GrFqhZG9uTsalP8hdpVa8RfNP66SPwB41enJPm5nrb0qVUnuSAf0+Q5SaADCzjHwtTKp+pVl6qZGutSZY5LBX25VFlmJv1EdVGcTrW1lfzsxqDCK7VhmtRVdqOywe0uWJGW6c5+RGG3uqhFf3i6qMefFL+yjsYfITqqaBQwoGHcSwPYmZIqQh1mNRGVapX0pUQ050dqHGZNwmM7aG7OubacfG5vX5eTDs2Bfg4hlgxOR2Qachfy5ExiLFk5hyBL1hgDP8PbaPwHPOcKxxc4R14VUOi5yw35bpNJQkTlppFldW3oXaUn1q7jJLKDAvc/OI/vF8GzjfxW/Ju06US75ul4E43lWPKUFE/HmuRMdBnKGp140e4TkbOUdNJa+vYLWbIqPuRCoXwDXiwC1A9rqYvVfKO56tMS0RONKUL1ZaOXoJEkB1QGMXDUmE0AGzZ6bhCiDTESivRBltbBx24jyAXSbZsXATiGNZeOdgitIkUmhOdFiuqQkaLMRvXyZTwHheWXiO6Sv1aI/P15822HdigL+XWPPOB07ldw03/tosW8eAIvgngGr0gHGWz38TRm58m3ZfSvEg14jTwURiMsfLzOh4tE5YX0gqAQ8n2PhxZhO88TmasghZ4RoQ3eIDHSQ5EBwGSlJ5eUQfh1WSgx+5ag8Qw9h9HUc1jDeA59aM9hsJ7j010k7th/RH4QzSvGXkuvQckFE79SbgXJjoYMnluxdANAvamItDIEskQtdGuG6JU2RSgtKF0qw6x9vzkd6cYUWA1NQamgRkACmeVCp+8NfmcReCFnHWihRjCL9IZcsisZYByBFH9OVDrEcowW49G/o2CLNwEa1os7EJkLbnKo2yaaL8R4cV28FxdaMCOVdLf5QnvLdYPBdDQD8rRJ4OwpSyr6FdqPw6rnwrQRn7uZm5qqNbatrdTTtKraaUrCtZOl0m58n0xNc2qkC1q+KTnSBXXfHC4rq4xI4TI41JrGQYvm2bsLhQRnREdFQsq5GNOhypbiS698Hx5KHVKdaE6dWDq1J4G6QDFr+5MvySN0yq8obFXV1dCW3tGQ6ag8S28qc6ALolPEm3ogOJU+jk4xWlmPU0ikFLWhXWmdZh9nWHJ3lUmqVW9YZBnUXtCPZQGCV5TbkwC/t+y2ZQmPlF2qTM6iN+pRF1j1/XsSXbEZ8HslsBrLLhBKYCmcGlgfh9h+n8tQ8smlyOrZVKe32SlyatqrNVReVejyFFveeE3GjcbvoYC9PcM8QKF642Xj3sqPsui0Uh0WxQDzmHQsVx4dVQr0+/JdJDyqYsmxvQpvZ1B/hBdLT+ZFXX/GdWSV0q5KJdll3seN6jocMquXI8xlFc2CYJWFGRiDwnxCe0veT4AMGgfIpkgfq57hAmI1/jJaB2C3t5Jknqh0iKHDKXrVMl8hraj+hqZRgtlH4s+mdt/Rr8hDtYwL37r2CR0D8tc+VV2T1GEyO5pntIS/yiN7YdY5WZauCXERlsVsaFmEvh8ke0o2txtVV5Y04bqritTd41gZSndbx4wCdDfDs/eNx0dFBQZd/SbVRwV0f1Wu7w/q+/cTRagsn5GRTD1vd2tdMtUD36Q14ZsG3e6mM/B83pDjb0mcJ817IAJbVzgMQAQackMX0d8bK7gePCGPHl4hjxi83iVRRJUQ8dZilEGJIpUl3JOtpSsUc7tOahh9uFlGmH6A8T0J2kH82OEqsecJ4nGvj+Udp6XNowlnjPZb1ydqmt+jLXs83EbSYWVFpdbtdB6paA0aIdao/6j4tWAa1EKAxdsVzHOy/Sk+u80jUdDEkEeaHan7V/w8iZ5hmtkCot3SIzpUFTcOn0a4ixfCqVW4uekdV3l7w1tjg152Gf7ssKLeLxblbzHn34WvpsqDAcf+8qKv/hBkX1zM0m0jgiUEqWU5VTiCCoMguxVzRLBA0KJiII2TAg2LIFuOz9JmI4IFgnL+4tMsjOVs+RkWQnYNia3jHyEsQUjvvODt2hoWQjZpHSGsh5AiHgzOUuGwELIk3whhvSEFp6aF7Kt/2C0lI4SleJR6NRiX/RsUQZbQYUuDRgQLBOktoDonrR8WQZbFGXWwVgfp/Uqic8LMJowINq6doBdRLNEQssTMaEbrIKTfsMplwwdFkCVm2JdmjggWCOpKA4K7IwjJYfEfTyY1IMV/4Alu/gc= \ No newline at end of file -- cgit v1.2.3 From 2f44878a7eac71d7b81e66246b46c615a95892d7 Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Tue, 31 Aug 2021 13:05:15 +0300 Subject: Tune listener timers Signed-off-by: Valery Piashchynski --- plugins/boltdb/boltjobs/listener.go | 10 ++-------- plugins/jobs/plugin.go | 1 + 2 files changed, 3 insertions(+), 8 deletions(-) (limited to 'plugins') diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go index 39de34ab..7c161555 100644 --- a/plugins/boltdb/boltjobs/listener.go +++ b/plugins/boltdb/boltjobs/listener.go @@ -3,7 +3,6 @@ package boltjobs import ( "bytes" "encoding/gob" - "sync/atomic" "time" "github.com/spiral/roadrunner/v2/utils" @@ -11,7 +10,7 @@ import ( ) func (c *consumer) listener() { - tt := time.NewTicker(time.Millisecond * 10) + tt := time.NewTicker(time.Millisecond) defer tt.Stop() for { select { @@ -19,11 +18,6 @@ func (c *consumer) listener() { c.log.Info("boltdb listener stopped") return case <-tt.C: - if atomic.LoadUint64(c.active) >= uint64(c.prefetch) { - time.Sleep(time.Second) - continue - } - tx, err := c.db.Begin(true) if err != nil { c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err) @@ -78,7 +72,7 @@ func (c *consumer) listener() { } func (c *consumer) delayedJobsListener() { - tt := time.NewTicker(time.Millisecond * 10) + tt := time.NewTicker(time.Second) defer tt.Stop() // just some 90's diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go index 91a77446..83b302ee 100644 --- a/plugins/jobs/plugin.go +++ b/plugins/jobs/plugin.go @@ -318,6 +318,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit Start: start, Elapsed: time.Since(start), }) + // return payload p.putPayload(exec) } -- cgit v1.2.3 From 31cf040029eb0b26278e4a9948cbc1aba77ed58b Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Tue, 31 Aug 2021 14:59:28 +0300 Subject: Naming: service -> plugin Fix bug with survived workers in the debug mode Signed-off-by: Valery Piashchynski --- plugins/jobs/plugin.go | 9 +++++---- plugins/resetter/plugin.go | 2 +- plugins/sqs/item.go | 3 +++ plugins/status/plugin.go | 4 ++-- 4 files changed, 11 insertions(+), 7 deletions(-) (limited to 'plugins') diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go index 83b302ee..3f3fa196 100644 --- a/plugins/jobs/plugin.go +++ b/plugins/jobs/plugin.go @@ -342,10 +342,6 @@ func (p *Plugin) Stop() error { cancel() } - p.Lock() - p.workersPool.Destroy(context.Background()) - p.Unlock() - // this function can block forever, but we don't care, because we might have a chance to exit from the pollers, // but if not, this is not a problem at all. // The main target is to stop the drivers @@ -358,6 +354,11 @@ func (p *Plugin) Stop() error { // just wait pollers for 5 seconds before exit time.Sleep(time.Second * 5) + + p.Lock() + p.workersPool.Destroy(context.Background()) + p.Unlock() + return nil } diff --git a/plugins/resetter/plugin.go b/plugins/resetter/plugin.go index b2fe59af..191185ae 100644 --- a/plugins/resetter/plugin.go +++ b/plugins/resetter/plugin.go @@ -21,7 +21,7 @@ func (p *Plugin) Reset(name string) error { const op = errors.Op("resetter_plugin_reset_by_name") svc, ok := p.registry[name] if !ok { - return errors.E(op, errors.Errorf("no such service: %s", name)) + return errors.E(op, errors.Errorf("no such plugin: %s", name)) } return svc.Reset() diff --git a/plugins/sqs/item.go b/plugins/sqs/item.go index 4e33e99e..969d8b5b 100644 --- a/plugins/sqs/item.go +++ b/plugins/sqs/item.go @@ -22,6 +22,7 @@ const ( ) var itemAttributes = []string{ + job.RRID, job.RRJob, job.RRDelay, job.RRPriority, @@ -184,6 +185,7 @@ func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) { QueueUrl: queue, DelaySeconds: int32(i.Options.Delay), MessageAttributes: map[string]types.MessageAttributeValue{ + job.RRID: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Ident)}, job.RRJob: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Job)}, job.RRDelay: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Delay)))}, job.RRHeaders: {DataType: aws.String(BinaryType), BinaryValue: data, BinaryListValues: nil, StringListValues: nil, StringValue: nil}, @@ -228,6 +230,7 @@ func (c *consumer) unpack(msg *types.Message) (*Item, error) { item := &Item{ Job: *msg.MessageAttributes[job.RRJob].StringValue, + Ident: *msg.MessageAttributes[job.RRID].StringValue, Payload: *msg.Body, Headers: h, Options: &Options{ diff --git a/plugins/status/plugin.go b/plugins/status/plugin.go index 82a0fa6c..b76ad0a3 100644 --- a/plugins/status/plugin.go +++ b/plugins/status/plugin.go @@ -85,7 +85,7 @@ func (c *Plugin) status(name string) (Status, error) { const op = errors.Op("checker_plugin_status") svc, ok := c.statusRegistry[name] if !ok { - return Status{}, errors.E(op, errors.Errorf("no such service: %s", name)) + return Status{}, errors.E(op, errors.Errorf("no such plugin: %s", name)) } return svc.Status(), nil @@ -96,7 +96,7 @@ func (c *Plugin) ready(name string) (Status, error) { const op = errors.Op("checker_plugin_ready") svc, ok := c.readyRegistry[name] if !ok { - return Status{}, errors.E(op, errors.Errorf("no such service: %s", name)) + return Status{}, errors.E(op, errors.Errorf("no such plugin: %s", name)) } return svc.Ready(), nil -- cgit v1.2.3