summaryrefslogtreecommitdiff
path: root/plugins
diff options
context:
space:
mode:
authorValery Piashchynski <[email protected]>2021-01-05 17:37:17 +0300
committerValery Piashchynski <[email protected]>2021-01-05 17:37:17 +0300
commit13b01ccaba1eedeb99d37842ec8f2019d2625187 (patch)
treec645c240336666fa63d70ed2703a78df828c597f /plugins
parent877b0ed461c7d5e1de87b7561f414aeb236cf3ec (diff)
Finish implementation of the KV
Diffstat (limited to 'plugins')
-rw-r--r--plugins/kv/boltdb/config.go23
-rw-r--r--plugins/kv/boltdb/plugin.go476
-rw-r--r--plugins/kv/boltdb/plugin_unit_test.go485
-rw-r--r--plugins/kv/interface.go45
-rw-r--r--plugins/kv/memcached/config.go10
-rw-r--r--plugins/kv/memcached/plugin.go243
-rw-r--r--plugins/kv/memcached/storage_test.go444
-rw-r--r--plugins/kv/memory/config.go12
-rw-r--r--plugins/kv/memory/storage.go263
-rw-r--r--plugins/kv/memory/storage_test.go470
-rw-r--r--plugins/reload/plugin.go2
-rw-r--r--plugins/reload/watcher.go4
12 files changed, 2474 insertions, 3 deletions
diff --git a/plugins/kv/boltdb/config.go b/plugins/kv/boltdb/config.go
new file mode 100644
index 00000000..6b116611
--- /dev/null
+++ b/plugins/kv/boltdb/config.go
@@ -0,0 +1,23 @@
+package boltdb
+
+type Config struct {
+ // Dir is a directory to store the DB files
+ Dir string
+ // File is boltDB file. No need to create it by your own,
+ // boltdb driver is able to create the file, or read existing
+ File string
+ // Bucket to store data in boltDB
+ Bucket string
+
+ Permissions int
+ TTL int
+}
+
+func (s *Config) InitDefaults() error {
+ s.Dir = "." // current dir
+ s.Bucket = "rr" // default bucket name
+ s.File = "rr.db" // default file name
+ s.Permissions = 0777 // free for all
+ s.TTL = 60 // 60 seconds is default TTL
+ return nil
+}
diff --git a/plugins/kv/boltdb/plugin.go b/plugins/kv/boltdb/plugin.go
new file mode 100644
index 00000000..e5eda0c2
--- /dev/null
+++ b/plugins/kv/boltdb/plugin.go
@@ -0,0 +1,476 @@
+package boltdb
+
+import (
+ "bytes"
+ "context"
+ "encoding/gob"
+ "os"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ bolt "go.etcd.io/bbolt"
+)
+
+const PluginName = "boltdb"
+
+// BoltDB K/V storage.
+type Plugin struct {
+ // db instance
+ DB *bolt.DB
+ // name should be UTF-8
+ bucket []byte
+
+ // config for RR integration
+ cfg *Config
+
+ // logger
+ log logger.Logger
+
+ // gc contains key which are contain timeouts
+ gc *sync.Map
+ // default timeout for cache cleanup is 1 minute
+ timeout time.Duration
+
+ // stop is used to stop keys GC and close boltdb connection
+ stop chan struct{}
+}
+
+// NewBoltClient instantiate new BOLTDB client
+// The parameters are:
+// path string -- path to database file (can be placed anywhere), if file is not exist, it will be created
+// perm os.FileMode -- file permissions, for example 0777
+// options *bolt.Options -- boltDB options, such as timeouts, noGrows options and other
+// bucket string -- name of the bucket to use, should be UTF-8
+func NewBoltClient(path string, perm os.FileMode, options *bolt.Options, bucket string, ttl time.Duration) (kv.Storage, error) {
+ const op = errors.Op("newBoltClient")
+ db, err := bolt.Open(path, perm, options)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // bucket should be SET
+ if bucket == "" {
+ return nil, errors.E(op, errors.Str("bucket should be set"))
+ }
+
+ // create bucket if it does not exist
+ // tx.Commit invokes via the db.Update
+ err = db.Update(func(tx *bolt.Tx) error {
+ _, err = tx.CreateBucketIfNotExists([]byte(bucket))
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // if TTL is not set, make it default
+ if ttl == 0 {
+ ttl = time.Minute
+ }
+
+ s := &Plugin{
+ DB: db,
+ bucket: []byte(bucket),
+ stop: make(chan struct{}),
+ timeout: ttl,
+ gc: &sync.Map{},
+ }
+
+ // start the TTL gc
+ go s.gcPhase()
+
+ return s, nil
+}
+
+func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
+ const op = errors.Op("boltdb plugin init")
+ s.cfg = &Config{}
+
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ s.log = log
+
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ const op = errors.Op("boltdb serve")
+ errCh := make(chan error, 1)
+
+ db, err := bolt.Open(path.Join(s.cfg.Dir, s.cfg.File), os.FileMode(s.cfg.Permissions), nil)
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+
+ // create bucket if it does not exist
+ // tx.Commit invokes via the db.Update
+ err = db.Update(func(tx *bolt.Tx) error {
+ const upOp = errors.Op("boltdb Update")
+ _, err = tx.CreateBucketIfNotExists([]byte(s.cfg.Bucket))
+ if err != nil {
+ return errors.E(op, upOp)
+ }
+ return nil
+ })
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ s.DB = db
+ s.bucket = []byte(s.cfg.Bucket)
+ s.stop = make(chan struct{})
+ s.timeout = time.Duration(s.cfg.TTL) * time.Second
+ s.gc = &sync.Map{}
+
+ // start the TTL gc
+ go s.gcPhase()
+
+ return errCh
+}
+
+func (s Plugin) Stop() error {
+ const op = errors.Op("boltdb stop")
+ err := s.Close()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+func (s Plugin) Has(ctx context.Context, keys ...string) (map[string]bool, error) {
+ const op = errors.Op("boltdb Has")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ m := make(map[string]bool, len(keys))
+
+ // this is readable transaction
+ err := s.DB.View(func(tx *bolt.Tx) error {
+ // Get retrieves the value for a key in the bucket.
+ // Returns a nil value if the key does not exist or if the key is a nested bucket.
+ // The returned value is only valid for the life of the transaction.
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+ exist := b.Get([]byte(key))
+ if exist != nil {
+ m[key] = true
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+func (s Plugin) Get(ctx context.Context, key string) ([]byte, error) {
+ const op = errors.Op("boltdb Get")
+ // to get cases like " "
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+
+ var val []byte
+ err := s.DB.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+ val = b.Get([]byte(key))
+
+ // try to decode values
+ if val != nil {
+ buf := bytes.NewReader(val)
+ decoder := gob.NewDecoder(buf)
+
+ i := kv.Item{}
+ err := decoder.Decode(&i)
+ if err != nil {
+ // unsafe (w/o runes) convert
+ return errors.E(op, err)
+ }
+
+ // set the value
+ val = []byte(i.Value)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ return val, nil
+}
+
+func (s Plugin) MGet(ctx context.Context, keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("boltdb MGet")
+ // defence
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ err := s.DB.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+
+ for _, key := range keys {
+ value := b.Get([]byte(key))
+ if value != nil {
+ m[key] = value
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ return m, nil
+}
+
+// Set puts the K/V to the bolt
+func (s Plugin) Set(ctx context.Context, items ...kv.Item) error {
+ const op = errors.Op("boltdb Set")
+ if items == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // start writable transaction
+ tx, err := s.DB.Begin(true)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ defer func() {
+ err = tx.Commit()
+ if err != nil {
+ errRb := tx.Rollback()
+ if errRb != nil {
+ s.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb)
+ }
+ }
+ }()
+
+ b := tx.Bucket(s.bucket)
+ // use access by index to avoid copying
+ for i := range items {
+ // performance note: pass a prepared bytes slice with initial cap
+ // we can't move buf and gob out of loop, because we need to clear both from data
+ // but gob will contain (w/o re-init) the past data
+ buf := bytes.Buffer{}
+ encoder := gob.NewEncoder(&buf)
+ if errors.Is(errors.EmptyItem, err) {
+ return errors.E(op, errors.EmptyItem)
+ }
+
+ err = encoder.Encode(&items[i])
+ if err != nil {
+ return errors.E(op, err)
+ }
+ // buf.Bytes will copy the underlying slice. Take a look in case of performance problems
+ err = b.Put([]byte(items[i].Key), buf.Bytes())
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // if there are no errors, and TTL > 0, we put the key with timeout to the hashmap, for future check
+ // we do not need mutex here, since we use sync.Map
+ if items[i].TTL != "" {
+ // check correctness of provided TTL
+ _, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ s.gc.Store(items[i].Key, items[i].TTL)
+ }
+
+ buf.Reset()
+ }
+
+ return nil
+}
+
+// Delete all keys from DB
+func (s Plugin) Delete(ctx context.Context, keys ...string) error {
+ const op = errors.Op("boltdb Delete")
+ if keys == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ // start writable transaction
+ tx, err := s.DB.Begin(true)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ defer func() {
+ err = tx.Commit()
+ if err != nil {
+ errRb := tx.Rollback()
+ if errRb != nil {
+ s.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb)
+ }
+ }
+ }()
+
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+
+ for _, key := range keys {
+ err = b.Delete([]byte(key))
+ if err != nil {
+ return errors.E(op, err)
+ }
+ }
+
+ return nil
+}
+
+// MExpire sets the expiration time to the key
+// If key already has the expiration time, it will be overwritten
+func (s Plugin) MExpire(ctx context.Context, items ...kv.Item) error {
+ const op = errors.Op("boltdb MExpire")
+ for i := range items {
+ if items[i].TTL == "" || strings.TrimSpace(items[i].Key) == "" {
+ return errors.E(op, errors.Str("should set timeout and at least one key"))
+ }
+
+ // verify provided TTL
+ _, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.gc.Store(items[i].Key, items[i].TTL)
+ }
+ return nil
+}
+
+func (s Plugin) TTL(ctx context.Context, keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("boltdb TTL")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ for i := range keys {
+ if item, ok := s.gc.Load(keys[i]); ok {
+ // a little bit dangerous operation, but user can't store value other that kv.Item.TTL --> int64
+ m[keys[i]] = item.(string)
+ }
+ }
+ return m, nil
+}
+
+// Close the DB connection
+func (s Plugin) Close() error {
+ // stop the keys GC
+ s.stop <- struct{}{}
+ return s.DB.Close()
+}
+
+// ========================= PRIVATE =================================
+
+func (s Plugin) gcPhase() {
+ t := time.NewTicker(s.timeout)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ // calculate current time before loop started to be fair
+ now := time.Now()
+ s.gc.Range(func(key, value interface{}) bool {
+ const op = errors.Op("gcPhase")
+ k := key.(string)
+ v, err := time.Parse(time.RFC3339, value.(string))
+ if err != nil {
+ return false
+ }
+
+ if now.After(v) {
+ // time expired
+ s.gc.Delete(k)
+ err := s.DB.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+ err := b.Delete([]byte(k))
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ })
+ if err != nil {
+ s.log.Error("error during the gc phase of update", "error", err)
+ // todo this error is ignored, it means, that timer still be active
+ // to prevent this, we need to invoke t.Stop()
+ return false
+ }
+ }
+ return true
+ })
+ case <-s.stop:
+ return
+ }
+ }
+}
diff --git a/plugins/kv/boltdb/plugin_unit_test.go b/plugins/kv/boltdb/plugin_unit_test.go
new file mode 100644
index 00000000..a12c830d
--- /dev/null
+++ b/plugins/kv/boltdb/plugin_unit_test.go
@@ -0,0 +1,485 @@
+package boltdb
+
+import (
+ "context"
+ "os"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/stretchr/testify/assert"
+)
+
+func initStorage() kv.Storage {
+ storage, err := NewBoltClient("rr.db", 0777, nil, "rr", time.Second)
+ if err != nil {
+ panic(err)
+ }
+ return storage
+}
+
+func cleanup(t *testing.T, path string) {
+ err := os.RemoveAll(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestStorage_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has(context.Background(), "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+}
+
+func TestStorage_Has_Set_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+}
+
+func TestConcurrentReadWriteTransactions(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ m := &sync.RWMutex{}
+ // concurrently set the keys
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ // set is writable transaction
+ // it should stop readable
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }, kv.Item{
+ Key: "key2" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }))
+ m.Unlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.RLock()
+ v, err = s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ m.RUnlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ err = s.Delete(ctx, "key"+strconv.Itoa(i))
+ assert.NoError(t, err)
+ m.Unlock()
+ }
+ }(s)
+
+ wg.Wait()
+}
+
+func TestStorage_Has_Set_MGet(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_Has_Set_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world2",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.Get(ctx, "key")
+ assert.NoError(t, err)
+
+ if string(res) != "hello world" {
+ t.Fatal("wrong value by key")
+ }
+}
+
+func TestStorage_Set_Del_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ // check that keys are present
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+
+ assert.NoError(t, s.Delete(ctx, "key", "key2"))
+ // check that keys are not present
+ res, err = s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 0)
+}
+
+func TestStorage_Set_GetM(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+ ctx := context.Background()
+
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestNilAndWrongArgs(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ // check
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ _, err = s.Has(ctx, "")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, "")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, " ")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, " ")
+ assert.Error(t, err)
+
+ _, err = s.MGet(ctx, "key", "key2", "")
+ assert.Error(t, err)
+
+ _, err = s.MGet(ctx, "key", "key2", " ")
+ assert.Error(t, err)
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ assert.Error(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "asdf",
+ }))
+
+ _, err = s.Has(ctx, "key")
+ assert.NoError(t, err)
+
+ assert.Error(t, s.Set(ctx, kv.Item{}))
+
+ err = s.Delete(ctx, "")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key", "")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key", " ")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key")
+ assert.NoError(t, err)
+}
+
+func TestStorage_MExpire_TTL(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+ // set timeout to 5 sec
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ i1 := kv.Item{
+ Key: "key",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ i2 := kv.Item{
+ Key: "key2",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ assert.NoError(t, s.MExpire(ctx, i1, i2))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestStorage_SetExpire_TTL(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ // set timeout to 5 sec
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: nowPlusFive,
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: nowPlusFive,
+ }))
+
+ time.Sleep(time.Second * 2)
+ m, err := s.TTL(ctx, "key", "key2")
+ assert.NoError(t, err)
+
+ // remove a precision 4.02342342 -> 4
+ keyTTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // remove a precision 4.02342342 -> 4
+ key2TTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.True(t, keyTTL < 5)
+ assert.True(t, key2TTL < 5)
+
+ time.Sleep(time.Second * 4)
+
+ // ensure that storage is clean
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
diff --git a/plugins/kv/interface.go b/plugins/kv/interface.go
new file mode 100644
index 00000000..f17754e6
--- /dev/null
+++ b/plugins/kv/interface.go
@@ -0,0 +1,45 @@
+package kv
+
+// Item represents general storage item
+import (
+ "context"
+)
+
+type Item struct {
+ // Key of item
+ Key string
+ // Value of item
+ Value string
+ // live until time provided by TTL in RFC 3339 format
+ TTL string
+}
+
+// Storage represents single abstract storage.
+type Storage interface {
+ // Has checks if value exists.
+ Has(ctx context.Context, keys ...string) (map[string]bool, error)
+
+ // Get loads value content into a byte slice.
+ Get(ctx context.Context, key string) ([]byte, error)
+
+ // MGet loads content of multiple values
+ // If there are no values for keys, key will no be in the map
+ MGet(ctx context.Context, keys ...string) (map[string]interface{}, error)
+
+ // Set used to upload item to KV with TTL
+ // 0 value in TTL means no TTL
+ Set(ctx context.Context, items ...Item) error
+
+ // MExpire sets the TTL for multiply keys
+ MExpire(ctx context.Context, items ...Item) error
+
+ // TTL return the rest time to live for provided keys
+ // Not supported for the memcached and boltdb
+ TTL(ctx context.Context, keys ...string) (map[string]interface{}, error)
+
+ // Delete one or multiple keys.
+ Delete(ctx context.Context, keys ...string) error
+
+ // Close closes the storage and underlying resources.
+ Close() error
+} \ No newline at end of file
diff --git a/plugins/kv/memcached/config.go b/plugins/kv/memcached/config.go
new file mode 100644
index 00000000..62f29ef2
--- /dev/null
+++ b/plugins/kv/memcached/config.go
@@ -0,0 +1,10 @@
+package memcached
+
+type Config struct {
+ // Addr is url for memcached, 11211 port is used by default
+ Addr []string
+}
+
+func (s *Config) InitDefaults() {
+ s.Addr = []string{"localhost:11211"} // default url for memcached // init logger
+}
diff --git a/plugins/kv/memcached/plugin.go b/plugins/kv/memcached/plugin.go
new file mode 100644
index 00000000..bd0a207d
--- /dev/null
+++ b/plugins/kv/memcached/plugin.go
@@ -0,0 +1,243 @@
+package memcached
+
+import (
+ "context"
+ "strings"
+ "time"
+
+ "github.com/bradfitz/gomemcache/memcache"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const PluginName = "memcached"
+
+var EmptyItem = kv.Item{}
+
+type Plugin struct {
+ // config
+ cfg *Config
+ // logger
+ log logger.Logger
+ // memcached client
+ client *memcache.Client
+}
+
+// NewMemcachedClient returns a memcache client using the provided server(s)
+// with equal weight. If a server is listed multiple times,
+// it gets a proportional amount of weight.
+func NewMemcachedClient(url string) kv.Storage {
+ m := memcache.New(url)
+ return &Plugin{
+ client: m,
+ }
+}
+
+func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
+ const op = errors.Op("memcached init")
+ s.cfg = &Config{}
+ s.cfg.InitDefaults()
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ s.log = log
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ s.client = memcache.New(s.cfg.Addr...)
+ return errCh
+}
+
+// Memcached has no stop/close or smt similar to close the connection
+func (s *Plugin) Stop() error {
+ return nil
+}
+
+// Has checks the key for existence
+func (s Plugin) Has(ctx context.Context, keys ...string) (map[string]bool, error) {
+ const op = errors.Op("memcached Has")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+ m := make(map[string]bool, len(keys))
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ exist, err := s.client.Get(key)
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return nil, err
+ }
+ if exist != nil {
+ m[key] = true
+ }
+ }
+ return m, nil
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a
+// memcache cache miss. The key must be at most 250 bytes in length.
+func (s Plugin) Get(ctx context.Context, key string) ([]byte, error) {
+ const op = errors.Op("memcached Get")
+ // to get cases like " "
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ data, err := s.client.Get(key)
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return nil, err
+ }
+ if data != nil {
+ // return the value by the key
+ return data.Value, nil
+ }
+ // data is nil by some reason and error also nil
+ return nil, nil
+}
+
+// return map with key -- string
+// and map value as value -- []byte
+func (s Plugin) MGet(ctx context.Context, keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("memcached MGet")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+ for _, key := range keys {
+ // Here also MultiGet
+ data, err := s.client.Get(key)
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return nil, err
+ }
+ if data != nil {
+ m[key] = data.Value
+ }
+ }
+
+ return m, nil
+}
+
+// Set sets the KV pairs. Keys should be 250 bytes maximum
+// TTL:
+// Expiration is the cache expiration time, in seconds: either a relative
+// time from now (up to 1 month), or an absolute Unix epoch time.
+// Zero means the Item has no expiration time.
+func (s Plugin) Set(ctx context.Context, items ...kv.Item) error {
+ const op = errors.Op("memcached Set")
+ if items == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ for i := range items {
+ if items[i] == EmptyItem {
+ return errors.E(op, errors.EmptyItem)
+ }
+
+ // pre-allocate item
+ memcachedItem := &memcache.Item{
+ Key: items[i].Key,
+ // unsafe convert
+ Value: []byte(items[i].Value),
+ Flags: 0,
+ }
+
+ // add additional TTL in case of TTL isn't empty
+ if items[i].TTL != "" {
+ // verify the TTL
+ t, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return err
+ }
+ memcachedItem.Expiration = int32(t.Unix())
+ }
+
+ err := s.client.Set(memcachedItem)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Expiration is the cache expiration time, in seconds: either a relative
+// time from now (up to 1 month), or an absolute Unix epoch time.
+// Zero means the Item has no expiration time.
+func (s Plugin) MExpire(ctx context.Context, items ...kv.Item) error {
+ const op = errors.Op("memcached MExpire")
+ for _, item := range items {
+ if item.TTL == "" || strings.TrimSpace(item.Key) == "" {
+ return errors.E(op, errors.Str("should set timeout and at least one key"))
+ }
+
+ // verify provided TTL
+ t, err := time.Parse(time.RFC3339, item.TTL)
+ if err != nil {
+ return err
+ }
+
+ // Touch updates the expiry for the given key. The seconds parameter is either
+ // a Unix timestamp or, if seconds is less than 1 month, the number of seconds
+ // into the future at which time the item will expire. Zero means the item has
+ // no expiration time. ErrCacheMiss is returned if the key is not in the cache.
+ // The key must be at most 250 bytes in length.
+ err = s.client.Touch(item.Key, int32(t.Unix()))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// return time in seconds (int32) for a given keys
+func (s Plugin) TTL(ctx context.Context, keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("memcached HTTLas")
+ return nil, errors.E(op, errors.Str("not valid request for memcached, see https://github.com/memcached/memcached/issues/239"))
+}
+
+func (s Plugin) Delete(ctx context.Context, keys ...string) error {
+ const op = errors.Op("memcached Has")
+ if keys == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ for _, key := range keys {
+ err := s.client.Delete(key)
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s Plugin) Close() error {
+ return nil
+}
diff --git a/plugins/kv/memcached/storage_test.go b/plugins/kv/memcached/storage_test.go
new file mode 100644
index 00000000..4b59bbd0
--- /dev/null
+++ b/plugins/kv/memcached/storage_test.go
@@ -0,0 +1,444 @@
+package memcached
+
+import (
+ "context"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/stretchr/testify/assert"
+)
+
+func initStorage() kv.Storage {
+ return NewMemcachedClient("localhost:11211")
+}
+
+func cleanup(t *testing.T, s kv.Storage, keys ...string) {
+ err := s.Delete(context.Background(), keys...)
+ if err != nil {
+ t.Fatalf("error during cleanup: %s", err.Error())
+ }
+}
+
+func TestStorage_Has(t *testing.T) {
+ s := initStorage()
+
+ ctx := context.Background()
+
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+}
+
+func TestStorage_Has_Set_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+}
+
+func TestStorage_Has_Set_MGet(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_Has_Set_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.Get(ctx, "key")
+ assert.NoError(t, err)
+
+ if string(res) != "hello world" {
+ t.Fatal("wrong value by key")
+ }
+}
+
+func TestStorage_Set_Del_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ // check that keys are present
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+
+ assert.NoError(t, s.Delete(ctx, "key", "key2"))
+ // check that keys are not present
+ res, err = s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 0)
+}
+
+func TestStorage_Set_GetM(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+
+ defer func() {
+ cleanup(t, s, "key", "key2")
+
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_MExpire_TTL(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+ // set timeout to 5 sec
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ i1 := kv.Item{
+ Key: "key",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ i2 := kv.Item{
+ Key: "key2",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ assert.NoError(t, s.MExpire(ctx, i1, i2))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestNilAndWrongArgs(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ cleanup(t, s, "key")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ // check
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ _, err = s.Has(ctx, "")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, "")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, " ")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, " ")
+ assert.Error(t, err)
+
+ _, err = s.MGet(ctx, "key", "key2", "")
+ assert.Error(t, err)
+
+ _, err = s.MGet(ctx, "key", "key2", " ")
+ assert.Error(t, err)
+
+ assert.Error(t, s.Set(ctx, kv.Item{}))
+
+ err = s.Delete(ctx, "")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key", "")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key", " ")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key")
+ assert.NoError(t, err)
+}
+
+func TestStorage_SetExpire_TTL(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ // set timeout to 5 sec
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: nowPlusFive,
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: nowPlusFive,
+ }))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestConcurrentReadWriteTransactions(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ m := &sync.RWMutex{}
+ // concurrently set the keys
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ // set is writable transaction
+ // it should stop readable
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }, kv.Item{
+ Key: "key2" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }))
+ m.Unlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.RLock()
+ v, err = s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ m.RUnlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ err = s.Delete(ctx, "key"+strconv.Itoa(i))
+ assert.NoError(t, err)
+ m.Unlock()
+ }
+ }(s)
+
+ wg.Wait()
+}
diff --git a/plugins/kv/memory/config.go b/plugins/kv/memory/config.go
new file mode 100644
index 00000000..329e7fff
--- /dev/null
+++ b/plugins/kv/memory/config.go
@@ -0,0 +1,12 @@
+package memory
+
+// Config is default config for the in-memory driver
+type Config struct {
+ // Enabled or disabled (true or false)
+ Enabled bool
+}
+
+// InitDefaults by default driver is turned off
+func (c *Config) InitDefaults() {
+ c.Enabled = false
+}
diff --git a/plugins/kv/memory/storage.go b/plugins/kv/memory/storage.go
new file mode 100644
index 00000000..1b6cb580
--- /dev/null
+++ b/plugins/kv/memory/storage.go
@@ -0,0 +1,263 @@
+package memory
+
+import (
+ "context"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const PluginName = "memory"
+
+type Plugin struct {
+ heap *sync.Map
+ stop chan struct{}
+
+ log logger.Logger
+ cfg *Config
+}
+
+func NewInMemoryStorage() kv.Storage {
+ p := &Plugin{
+ heap: &sync.Map{},
+ stop: make(chan struct{}),
+ }
+
+ go p.gc()
+
+ return p
+}
+
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("in-memory storage init")
+ s.cfg = &Config{}
+ s.cfg.InitDefaults()
+
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ s.log = log
+ // init in-memory
+ s.heap = &sync.Map{}
+ s.stop = make(chan struct{}, 1)
+ return nil
+}
+
+func (s Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ // start in-memory gc for kv
+ go s.gc()
+
+ return errCh
+}
+
+func (s Plugin) Stop() error {
+ const op = errors.Op("in-memory storage stop")
+ err := s.Close()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+func (s Plugin) Has(ctx context.Context, keys ...string) (map[string]bool, error) {
+ const op = errors.Op("in-memory storage Has")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+ m := make(map[string]bool)
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+
+ if _, ok := s.heap.Load(key); ok {
+ m[key] = true
+ }
+ }
+
+ return m, nil
+}
+
+func (s Plugin) Get(ctx context.Context, key string) ([]byte, error) {
+ const op = errors.Op("in-memory storage Get")
+ // to get cases like " "
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+
+ if data, exist := s.heap.Load(key); exist {
+ // here might be a panic
+ // but data only could be a string, see Set function
+ return []byte(data.(kv.Item).Value), nil
+ }
+ return nil, nil
+}
+
+func (s Plugin) MGet(ctx context.Context, keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("in-memory storage MGet")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ for _, key := range keys {
+ if value, ok := s.heap.Load(key); ok {
+ m[key] = value
+ }
+ }
+
+ return m, nil
+}
+
+func (s Plugin) Set(ctx context.Context, items ...kv.Item) error {
+ const op = errors.Op("in-memory storage Set")
+ if items == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ for _, item := range items {
+ // TTL is set
+ if item.TTL != "" {
+ // check the TTL in the item
+ _, err := time.Parse(time.RFC3339, item.TTL)
+ if err != nil {
+ return err
+ }
+ }
+
+ s.heap.Store(item.Key, item)
+ }
+ return nil
+}
+
+// MExpire sets the expiration time to the key
+// If key already has the expiration time, it will be overwritten
+func (s Plugin) MExpire(ctx context.Context, items ...kv.Item) error {
+ const op = errors.Op("in-memory storage MExpire")
+ for _, item := range items {
+ if item.TTL == "" || strings.TrimSpace(item.Key) == "" {
+ return errors.E(op, errors.Str("should set timeout and at least one key"))
+ }
+
+ // if key exist, overwrite it value
+ if pItem, ok := s.heap.Load(item.Key); ok {
+ // check that time is correct
+ _, err := time.Parse(time.RFC3339, item.TTL)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ tmp := pItem.(kv.Item)
+ // guess that t is in the future
+ // in memory is just FOR TESTING PURPOSES
+ // LOGIC ISN'T IDEAL
+ s.heap.Store(item.Key, kv.Item{
+ Key: item.Key,
+ Value: tmp.Value,
+ TTL: item.TTL,
+ })
+ }
+ }
+
+ return nil
+}
+
+func (s Plugin) TTL(ctx context.Context, keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("in-memory storage TTL")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ for _, key := range keys {
+ if item, ok := s.heap.Load(key); ok {
+ m[key] = item.(kv.Item).TTL
+ }
+ }
+ return m, nil
+}
+
+func (s Plugin) Delete(ctx context.Context, keys ...string) error {
+ const op = errors.Op("in-memory storage Delete")
+ if keys == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ for _, key := range keys {
+ s.heap.Delete(key)
+ }
+ return nil
+}
+
+// Close clears the in-memory storage
+func (s Plugin) Close() error {
+ s.heap = &sync.Map{}
+ s.stop <- struct{}{}
+ return nil
+}
+
+// ================================== PRIVATE ======================================
+
+func (s *Plugin) gc() {
+ // TODO check
+ ticker := time.NewTicker(time.Millisecond * 500)
+ for {
+ select {
+ case <-s.stop:
+ ticker.Stop()
+ return
+ case now := <-ticker.C:
+ // check every second
+ s.heap.Range(func(key, value interface{}) bool {
+ v := value.(kv.Item)
+ if v.TTL == "" {
+ return true
+ }
+
+ t, err := time.Parse(time.RFC3339, v.TTL)
+ if err != nil {
+ return false
+ }
+
+ if now.After(t) {
+ s.heap.Delete(key)
+ }
+ return true
+ })
+ }
+ }
+}
diff --git a/plugins/kv/memory/storage_test.go b/plugins/kv/memory/storage_test.go
new file mode 100644
index 00000000..b7b46637
--- /dev/null
+++ b/plugins/kv/memory/storage_test.go
@@ -0,0 +1,470 @@
+package memory
+
+import (
+ "context"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/stretchr/testify/assert"
+)
+
+func initStorage() kv.Storage {
+ return NewInMemoryStorage()
+}
+
+func cleanup(t *testing.T, s kv.Storage, keys ...string) {
+ err := s.Delete(context.Background(), keys...)
+ if err != nil {
+ t.Fatalf("error during cleanup: %s", err.Error())
+ }
+}
+
+func TestStorage_Has(t *testing.T) {
+ s := initStorage()
+
+ ctx := context.Background()
+
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+}
+
+func TestStorage_Has_Set_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+}
+
+func TestStorage_Has_Set_MGet(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_Has_Set_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.Get(ctx, "key")
+ assert.NoError(t, err)
+
+ if string(res) != "value" {
+ t.Fatal("wrong value by key")
+ }
+}
+
+func TestStorage_Set_Del_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ // check that keys are present
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+
+ assert.NoError(t, s.Delete(ctx, "key", "key2"))
+ // check that keys are not presentps -eo state,uid,pid,ppid,rtprio,time,comm
+ res, err = s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 0)
+}
+
+func TestStorage_Set_GetM(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+
+ defer func() {
+ cleanup(t, s, "key", "key2")
+
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ res, err := s.MGet(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_MExpire_TTL(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+ // set timeout to 5 sec
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ i1 := kv.Item{
+ Key: "key",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ i2 := kv.Item{
+ Key: "key2",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ assert.NoError(t, s.MExpire(ctx, i1, i2))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestNilAndWrongArgs(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ // check
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ _, err = s.Has(ctx, "")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, "")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, " ")
+ assert.Error(t, err)
+
+ _, err = s.Get(ctx, " ")
+ assert.Error(t, err)
+
+ _, err = s.MGet(ctx, "key", "key2", "")
+ assert.Error(t, err)
+
+ _, err = s.MGet(ctx, "key", "key2", " ")
+ assert.Error(t, err)
+
+ assert.NoError(t, s.Set(ctx, kv.Item{}))
+ _, err = s.Has(ctx, "key")
+ assert.NoError(t, err)
+
+ err = s.Delete(ctx, "")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key", "")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key", " ")
+ assert.Error(t, err)
+
+ err = s.Delete(ctx, "key")
+ assert.NoError(t, err)
+}
+
+func TestStorage_SetExpire_TTL(t *testing.T) {
+ s := initStorage()
+ ctx := context.Background()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ // set timeout to 5 sec
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: nowPlusFive,
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: nowPlusFive,
+ }))
+
+ time.Sleep(time.Second * 2)
+ m, err := s.TTL(ctx, "key", "key2")
+ assert.NoError(t, err)
+
+ // remove a precision 4.02342342 -> 4
+ keyTTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // remove a precision 4.02342342 -> 4
+ key2TTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.True(t, keyTTL < 5)
+ assert.True(t, key2TTL < 5)
+
+ time.Sleep(time.Second * 4)
+
+ // ensure that storage is clean
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestConcurrentReadWriteTransactions(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ ctx := context.Background()
+ v, err := s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has(ctx, "key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ m := &sync.RWMutex{}
+ // concurrently set the keys
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ // set is writable transaction
+ // it should stop readable
+ assert.NoError(t, s.Set(ctx, kv.Item{
+ Key: "key" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }, kv.Item{
+ Key: "key2" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }))
+ m.Unlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.RLock()
+ v, err = s.Has(ctx, "key")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ m.RUnlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ err = s.Delete(ctx, "key"+strconv.Itoa(i))
+ assert.NoError(t, err)
+ m.Unlock()
+ }
+ }(s)
+
+ wg.Wait()
+}
diff --git a/plugins/reload/plugin.go b/plugins/reload/plugin.go
index 452e03a3..eb1b61b2 100644
--- a/plugins/reload/plugin.go
+++ b/plugins/reload/plugin.go
@@ -57,7 +57,7 @@ func (s *Plugin) Init(cfg config.Configurer, log logger.Logger, res resetter.Res
return nil
}
}
- return errors.E(op, errors.Skip)
+ return errors.E(op, errors.SkipFile)
},
Files: make(map[string]os.FileInfo),
Ignored: ignored,
diff --git a/plugins/reload/watcher.go b/plugins/reload/watcher.go
index c232f16f..08c85af9 100644
--- a/plugins/reload/watcher.go
+++ b/plugins/reload/watcher.go
@@ -179,7 +179,7 @@ outer:
// if filename does not contain pattern --> ignore that file
if w.watcherConfigs[serviceName].FilePatterns != nil && w.watcherConfigs[serviceName].FilterHooks != nil {
err = w.watcherConfigs[serviceName].FilterHooks(fileInfoList[i].Name(), w.watcherConfigs[serviceName].FilePatterns)
- if errors.Is(errors.Skip, err) {
+ if errors.Is(errors.SkipFile, err) {
continue outer
}
}
@@ -293,7 +293,7 @@ func (w *Watcher) retrieveFilesRecursive(serviceName, root string) (map[string]o
// if filename does not contain pattern --> ignore that file
err = w.watcherConfigs[serviceName].FilterHooks(info.Name(), w.watcherConfigs[serviceName].FilePatterns)
- if errors.Is(errors.Skip, err) {
+ if errors.Is(errors.SkipFile, err) {
return nil
}