summaryrefslogtreecommitdiff
path: root/plugins/reload
diff options
context:
space:
mode:
authorValery Piashchynski <[email protected]>2020-12-21 19:42:23 +0300
committerValery Piashchynski <[email protected]>2020-12-21 19:42:23 +0300
commitee8b4075c0f836d698d1ae505c87c17147de447a (patch)
tree531d980e5bfb94ee39b03952a97e0445f7955409 /plugins/reload
parent0ad45031047bb479e06ce0a0f496c6db9b2641c9 (diff)
Move plugins to the roadrunner-plugins repository
Diffstat (limited to 'plugins/reload')
-rw-r--r--plugins/reload/config.go58
-rw-r--r--plugins/reload/plugin.go158
-rw-r--r--plugins/reload/tests/config_test.go63
-rw-r--r--plugins/reload/tests/configs/.rr-reload-2.yaml44
-rw-r--r--plugins/reload/tests/configs/.rr-reload-3.yaml46
-rw-r--r--plugins/reload/tests/configs/.rr-reload-4.yaml46
-rw-r--r--plugins/reload/tests/configs/.rr-reload.yaml44
-rw-r--r--plugins/reload/tests/reload_plugin_test.go812
-rw-r--r--plugins/reload/watcher.go372
9 files changed, 0 insertions, 1643 deletions
diff --git a/plugins/reload/config.go b/plugins/reload/config.go
deleted file mode 100644
index 9ca2c0dc..00000000
--- a/plugins/reload/config.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package reload
-
-import (
- "time"
-
- "github.com/spiral/errors"
-)
-
-// Config is a Reload configuration point.
-type Config struct {
- // Interval is a global refresh interval
- Interval time.Duration
-
- // Patterns is a global file patterns to watch. It will be applied to every directory in project
- Patterns []string
-
- // Services is set of services which would be reloaded in case of FS changes
- Services map[string]ServiceConfig
-}
-
-type ServiceConfig struct {
- // Enabled indicates that service must be watched, doest not required when any other option specified
- Enabled bool
-
- // Recursive is options to use nested files from root folder
- Recursive bool
-
- // Patterns is per-service specific files to watch
- Patterns []string
-
- // Dirs is per-service specific dirs which will be combined with Patterns
- Dirs []string
-
- // Ignore is set of files which would not be watched
- Ignore []string
-}
-
-// InitDefaults sets missing values to their default values.
-func InitDefaults(c *Config) {
- c.Interval = time.Second
- c.Patterns = []string{".php"}
-}
-
-// Valid validates the configuration.
-func (c *Config) Valid() error {
- const op = errors.Op("config validation [reload plugin]")
- if c.Interval < time.Second {
- return errors.E(op, errors.Str("too short interval"))
- }
-
- if c.Services == nil {
- return errors.E(op, errors.Str("should add at least 1 service"))
- } else if len(c.Services) == 0 {
- return errors.E(op, errors.Str("service initialized, however, no config added"))
- }
-
- return nil
-}
diff --git a/plugins/reload/plugin.go b/plugins/reload/plugin.go
deleted file mode 100644
index dce9502c..00000000
--- a/plugins/reload/plugin.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package reload
-
-import (
- "os"
- "strings"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/interfaces/config"
- "github.com/spiral/roadrunner/v2/interfaces/log"
- "github.com/spiral/roadrunner/v2/interfaces/resetter"
-)
-
-// PluginName contains default plugin name.
-const PluginName string = "reload"
-const thresholdChanBuffer uint = 1000
-
-type Plugin struct {
- cfg *Config
- log log.Logger
- watcher *Watcher
- services map[string]interface{}
- res resetter.Resetter
- stopc chan struct{}
-}
-
-// Init controller service
-func (s *Plugin) Init(cfg config.Configurer, log log.Logger, res resetter.Resetter) error {
- const op = errors.Op("reload plugin init")
- s.cfg = &Config{}
- InitDefaults(s.cfg)
- err := cfg.UnmarshalKey(PluginName, &s.cfg)
- if err != nil {
- // disable plugin in case of error
- return errors.E(op, errors.Disabled, err)
- }
-
- s.log = log
- s.res = res
- s.stopc = make(chan struct{})
- s.services = make(map[string]interface{})
-
- var configs []WatcherConfig
-
- for serviceName, serviceConfig := range s.cfg.Services {
- ignored, err := ConvertIgnored(serviceConfig.Ignore)
- if err != nil {
- return errors.E(op, err)
- }
- configs = append(configs, WatcherConfig{
- ServiceName: serviceName,
- Recursive: serviceConfig.Recursive,
- Directories: serviceConfig.Dirs,
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return errors.E(op, errors.Skip)
- },
- Files: make(map[string]os.FileInfo),
- Ignored: ignored,
- FilePatterns: append(serviceConfig.Patterns, s.cfg.Patterns...),
- })
- }
-
- s.watcher, err = NewWatcher(configs, s.log)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (s *Plugin) Serve() chan error {
- const op = errors.Op("reload plugin serve")
- errCh := make(chan error, 1)
- if s.cfg.Interval < time.Second {
- errCh <- errors.E(op, errors.Str("reload interval is too fast"))
- return errCh
- }
-
- // make a map with unique services
- // so, if we would have a 100 events from http service
- // in map we would see only 1 key and it's config
- treshholdc := make(chan struct {
- serviceConfig ServiceConfig
- service string
- }, thresholdChanBuffer)
-
- // use the same interval
- timer := time.NewTimer(s.cfg.Interval)
-
- go func() {
- for e := range s.watcher.Event {
- treshholdc <- struct {
- serviceConfig ServiceConfig
- service string
- }{serviceConfig: s.cfg.Services[e.service], service: e.service}
- }
- }()
-
- // map with configs by services
- updated := make(map[string]ServiceConfig, len(s.cfg.Services))
-
- go func() {
- for {
- select {
- case cfg := <-treshholdc:
- // logic is following:
- // restart
- timer.Stop()
- // replace previous value in map by more recent without adding new one
- updated[cfg.service] = cfg.serviceConfig
- // if we getting a lot of events, we shouldn't restart particular service on each of it (user doing batch move or very fast typing)
- // instead, we are resetting the timer and wait for s.cfg.Interval time
- // If there is no more events, we restart service only once
- timer.Reset(s.cfg.Interval)
- case <-timer.C:
- if len(updated) > 0 {
- for name := range updated {
- err := s.res.ResetByName(name)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
- }
- // zero map
- updated = make(map[string]ServiceConfig, len(s.cfg.Services))
- }
- case <-s.stopc:
- timer.Stop()
- return
- }
- }
- }()
-
- go func() {
- err := s.watcher.StartPolling(s.cfg.Interval)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
- }()
-
- return errCh
-}
-
-func (s *Plugin) Stop() error {
- s.watcher.Stop()
- s.stopc <- struct{}{}
- return nil
-}
-
-func (s *Plugin) Name() string {
- return PluginName
-}
diff --git a/plugins/reload/tests/config_test.go b/plugins/reload/tests/config_test.go
deleted file mode 100644
index 5bb64b6b..00000000
--- a/plugins/reload/tests/config_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package tests
-
-import (
- "testing"
- "time"
-
- "github.com/spiral/roadrunner/v2/plugins/reload"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Config_Valid(t *testing.T) {
- services := make(map[string]reload.ServiceConfig)
- services["test"] = reload.ServiceConfig{
- Recursive: false,
- Patterns: nil,
- Dirs: nil,
- Ignore: nil,
- }
-
- cfg := &reload.Config{
- Interval: time.Second,
- Patterns: nil,
- Services: services,
- }
- assert.NoError(t, cfg.Valid())
-}
-
-func Test_Fake_ServiceConfig(t *testing.T) {
- services := make(map[string]reload.ServiceConfig)
- cfg := &reload.Config{
- Interval: time.Microsecond,
- Patterns: nil,
- Services: services,
- }
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Interval(t *testing.T) {
- services := make(map[string]reload.ServiceConfig)
- services["test"] = reload.ServiceConfig{
- Enabled: false,
- Recursive: false,
- Patterns: nil,
- Dirs: nil,
- Ignore: nil,
- }
-
- cfg := &reload.Config{
- Interval: time.Millisecond, // should crash here
- Patterns: nil,
- Services: services,
- }
- assert.Error(t, cfg.Valid())
-}
-
-func Test_NoServiceConfig(t *testing.T) {
- cfg := &reload.Config{
- Interval: time.Second,
- Patterns: nil,
- Services: nil,
- }
- assert.Error(t, cfg.Valid())
-}
diff --git a/plugins/reload/tests/configs/.rr-reload-2.yaml b/plugins/reload/tests/configs/.rr-reload-2.yaml
deleted file mode 100644
index 5be3179b..00000000
--- a/plugins/reload/tests/configs/.rr-reload-2.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-server:
- command: php ../../../tests/psr-worker-bench.php
- user: ''
- group: ''
- env:
- RR_HTTP: 'true'
- relay: pipes
- relayTimeout: 20s
-http:
- debug: true
- address: '127.0.0.1:27388'
- maxRequestSize: 1024
- middleware:
- - ''
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trustedSubnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- numWorkers: 2
- maxJobs: 0
- allocateTimeout: 60s
- destroyTimeout: 60s
-logs:
- mode: development
- level: error
-reload:
- interval: 2s
- patterns:
- - .txt
- services:
- http:
- dirs:
- - './unit_tests'
- recursive: true
diff --git a/plugins/reload/tests/configs/.rr-reload-3.yaml b/plugins/reload/tests/configs/.rr-reload-3.yaml
deleted file mode 100644
index b97ed667..00000000
--- a/plugins/reload/tests/configs/.rr-reload-3.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-server:
- command: php ../../../tests/psr-worker-bench.php
- user: ''
- group: ''
- env:
- RR_HTTP: 'true'
- relay: pipes
- relayTimeout: 20s
-http:
- debug: true
- address: '127.0.0.1:37388'
- maxRequestSize: 1024
- middleware:
- - ''
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trustedSubnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- numWorkers: 2
- maxJobs: 0
- allocateTimeout: 60s
- destroyTimeout: 60s
-logs:
- mode: development
- level: error
-reload:
- interval: 2s
- patterns:
- - .txt
- services:
- http:
- dirs:
- - './unit_tests'
- - './unit_tests_copied'
- - './dir1'
- recursive: true
diff --git a/plugins/reload/tests/configs/.rr-reload-4.yaml b/plugins/reload/tests/configs/.rr-reload-4.yaml
deleted file mode 100644
index b664b836..00000000
--- a/plugins/reload/tests/configs/.rr-reload-4.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-server:
- command: php ../../../tests/psr-worker-bench.php
- user: ''
- group: ''
- env:
- RR_HTTP: 'true'
- relay: pipes
- relayTimeout: 20s
-http:
- debug: true
- address: '127.0.0.1:22766'
- maxRequestSize: 1024
- middleware:
- - ''
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trustedSubnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- numWorkers: 2
- maxJobs: 0
- allocateTimeout: 60s
- destroyTimeout: 60s
-logs:
- mode: development
- level: error
-reload:
- interval: 2s
- patterns:
- - .aaa
- services:
- http:
- dirs:
- - './unit_tests'
- - './unit_tests_copied'
- - './dir1'
- recursive: false
diff --git a/plugins/reload/tests/configs/.rr-reload.yaml b/plugins/reload/tests/configs/.rr-reload.yaml
deleted file mode 100644
index 5e223db3..00000000
--- a/plugins/reload/tests/configs/.rr-reload.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-server:
- command: php ../../../tests/psr-worker-bench.php
- user: ''
- group: ''
- env:
- RR_HTTP: 'true'
- relay: pipes
- relayTimeout: 20s
-http:
- debug: true
- address: '127.0.0.1:22388'
- maxRequestSize: 1024
- middleware:
- - ''
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trustedSubnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- numWorkers: 2
- maxJobs: 0
- allocateTimeout: 60s
- destroyTimeout: 60s
-logs:
- mode: development
- level: error
-reload:
- interval: 1s
- patterns:
- - .txt
- services:
- http:
- dirs:
- - './unit_tests'
- recursive: true
diff --git a/plugins/reload/tests/reload_plugin_test.go b/plugins/reload/tests/reload_plugin_test.go
deleted file mode 100644
index 82336ec9..00000000
--- a/plugins/reload/tests/reload_plugin_test.go
+++ /dev/null
@@ -1,812 +0,0 @@
-package tests
-
-import (
- "io"
- "io/ioutil"
- "math/rand"
- "os"
- "os/signal"
- "path/filepath"
- "strconv"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- "github.com/spiral/endure"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/mocks"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/reload"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/stretchr/testify/assert"
-)
-
-const testDir string = "unit_tests"
-const testCopyToDir string = "unit_tests_copied"
-const dir1 string = "dir1"
-const hugeNumberOfFiles uint = 500
-
-func TestReloadInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- err = os.Mkdir(testDir, 0755)
- assert.NoError(t, err)
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", "file.txt", "size", gomock.Any()).Times(2)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", "file.txt", "size", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").Times(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").Times(1)
- mockLogger.EXPECT().Info("HTTP listeners successfully re-added").Times(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").Times(1)
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- tt := time.NewTimer(time.Second * 10)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- t.Run("ReloadTestInit", reloadTestInit)
-
- wg.Wait()
- assert.NoError(t, freeResources(testDir))
-}
-
-func reloadTestInit(t *testing.T) {
- err := ioutil.WriteFile(filepath.Join(testDir, "file.txt"), //nolint:gosec
- []byte{}, 0755)
- assert.NoError(t, err)
-}
-
-func TestReloadHugeNumberOfFiles(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file was updated", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- tt := time.NewTimer(time.Second * 60)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- t.Run("ReloadTestHugeNumberOfFiles", reloadHugeNumberOfFiles)
- t.Run("ReloadRandomlyChangeFile", randomlyChangeFile)
-
- wg.Wait()
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
-}
-
-func randomlyChangeFile(t *testing.T) {
- // we know, that directory contains 500 files (0-499)
- // let's try to randomly change it
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) // nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(hugeNumberOfFiles)) // nolint:gosec
- err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".txt"), []byte("Hello, Gophers!"), 0755) // nolint:gosec
- assert.NoError(t, err)
- }
-}
-
-func reloadHugeNumberOfFiles(t *testing.T) {
- for i := uint(0); i < hugeNumberOfFiles; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
-}
-
-// Should be events only about creating files with txt ext
-func TestReloadFilterFileExt(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload-2.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, os.Mkdir(testDir, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(100)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").Times(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").Times(1)
- mockLogger.EXPECT().Info("HTTP listeners successfully re-added").Times(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").Times(1)
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- tt := time.NewTimer(time.Second * 60)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- t.Run("ReloadMakeFiles", reloadMakeFiles)
- t.Run("ReloadFilteredExt", reloadFilteredExt)
-
- wg.Wait()
-
- assert.NoError(t, freeResources(testDir))
-}
-
-func reloadMakeFiles(t *testing.T) {
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
- }
-}
-
-func reloadFilteredExt(t *testing.T) {
- // change files with abc extension
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(1000) // nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(hugeNumberOfFiles)) // nolint:gosec
- err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".abc"), []byte("Hello, Gophers!"), 0755) // nolint:gosec
- assert.NoError(t, err)
- }
-
- // change files with def extension
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(1000) // nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(hugeNumberOfFiles)) // nolint:gosec
- err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".def"), []byte("Hello, Gophers!"), 0755) // nolint:gosec
- assert.NoError(t, err)
- }
-}
-
-// Should be events only about creating files with txt ext
-func TestReloadCopy500(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload-3.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
- assert.NoError(t, os.Mkdir(dir1, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
- //
- mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file was removed from watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file was updated", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- tt := time.NewTimer(time.Second * 120)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- // Scenario
- // 1
- // Create 3k files with txt, abc, def extensions
- // Copy files to the unit_tests_copy dir
- // 2
- // Delete both dirs, recreate
- // Create 3k files with txt, abc, def extensions
- // Move files to the unit_tests_copy dir
- // 3
- // Recursive
-
- t.Run("ReloadMake300Files", reloadMake300Files)
- t.Run("ReloadCopyFiles", reloadCopyFiles)
- t.Run("ReloadRecursiveDirsSupport", copyFilesRecursive)
- t.Run("RandomChangesInRecursiveDirs", randomChangesInRecursiveDirs)
- t.Run("RemoveFilesSupport", removeFilesSupport)
- t.Run("ReloadMoveSupport", reloadMoveSupport)
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-
- wg.Wait()
-}
-
-func reloadMoveSupport(t *testing.T) {
- t.Run("MoveSupportCopy", copyFilesRecursive)
- // move some files
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) // nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(100)) // nolint:gosec
- rDir := rand.Int63n(9) // nolint:gosec
- rExt := rand.Int63n(3) // nolint:gosec
-
- ext := []string{
- ".txt",
- ".abc",
- ".def",
- }
-
- // change files with def extension
- dirs := []string{
- "dir1",
- "dir1/dir2",
- "dir1/dir2/dir3",
- "dir1/dir2/dir3/dir4",
- "dir1/dir2/dir3/dir4/dir5",
- "dir1/dir2/dir3/dir4/dir5/dir6",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
- }
-
- // move file
- err := os.Rename(filepath.Join(dirs[rDir], "file_"+strconv.Itoa(int(rNum))+ext[rExt]), filepath.Join(dirs[rDir+1], "file_"+strconv.Itoa(int(rNum))+ext[rExt]))
- assert.NoError(t, err)
- }
-}
-
-func removeFilesSupport(t *testing.T) {
- // remove some files
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) // nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(100)) // nolint:gosec
- rDir := rand.Int63n(10) // nolint:gosec
- rExt := rand.Int63n(3) // nolint:gosec
-
- ext := []string{
- ".txt",
- ".abc",
- ".def",
- }
-
- // change files with def extension
- dirs := []string{
- "dir1",
- "dir1/dir2",
- "dir1/dir2/dir3",
- "dir1/dir2/dir3/dir4",
- "dir1/dir2/dir3/dir4/dir5",
- "dir1/dir2/dir3/dir4/dir5/dir6",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
- }
- // here can be a situation, when file already deleted
- _ = os.Remove(filepath.Join(dirs[rDir], "file_"+strconv.Itoa(int(rNum))+ext[rExt]))
- }
-}
-
-func randomChangesInRecursiveDirs(t *testing.T) {
- // change files with def extension
- dirs := []string{
- "dir1",
- "dir1/dir2",
- "dir1/dir2/dir3",
- "dir1/dir2/dir3/dir4",
- "dir1/dir2/dir3/dir4/dir5",
- "dir1/dir2/dir3/dir4/dir5/dir6",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
- }
-
- ext := []string{
- ".txt",
- ".abc",
- ".def",
- }
-
- filenames := []string{
- "file_", // should be update
- "foo_", // should be created
- "bar_", // should be created
- }
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) // nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(100)) // nolint:gosec
- rDir := rand.Int63n(10) // nolint:gosec
- rExt := rand.Int63n(3) // nolint:gosec
- rName := rand.Int63n(3) // nolint:gosec
-
- err := ioutil.WriteFile(filepath.Join(dirs[rDir], filenames[rName]+strconv.Itoa(int(rNum))+ext[rExt]), []byte("Hello, Gophers!"), 0755) // nolint:gosec
- assert.NoError(t, err)
- }
-}
-
-func copyFilesRecursive(t *testing.T) {
- err := copyDir(testDir, "dir1")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10")
- assert.NoError(t, err)
-}
-
-func reloadCopyFiles(t *testing.T) {
- err := copyDir(testDir, testCopyToDir)
- assert.NoError(t, err)
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
-
- // recreate files
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
- }
-
- err = copyDir(testDir, testCopyToDir)
- assert.NoError(t, err)
-}
-
-func reloadMake300Files(t *testing.T) {
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
- }
-}
-
-func TestReloadNoRecursion(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload-4.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(dir1, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // http server should not be restarted. all event from wrong file extensions should be skipped
- mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- tt := time.NewTimer(time.Second * 30)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- t.Run("ReloadMakeFiles", reloadMakeFiles) // make files in the testDir
- t.Run("ReloadCopyFilesRecursive", reloadCopyFiles)
-
- wg.Wait()
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-}
-
-// ========================================================================
-
-func freeResources(path string) error {
- return os.RemoveAll(path)
-}
-
-func makeFile(filename string) error {
- return ioutil.WriteFile(filepath.Join(testDir, filename), []byte{}, 0755) //nolint:gosec
-}
-
-func copyDir(src string, dst string) error {
- src = filepath.Clean(src)
- dst = filepath.Clean(dst)
-
- si, err := os.Stat(src)
- if err != nil {
- return err
- }
- if !si.IsDir() {
- return errors.E(errors.Str("source is not a directory"))
- }
-
- _, err = os.Stat(dst)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- err = os.MkdirAll(dst, si.Mode())
- if err != nil {
- return err
- }
-
- entries, err := ioutil.ReadDir(src)
- if err != nil {
- return err
- }
-
- for _, entry := range entries {
- srcPath := filepath.Join(src, entry.Name())
- dstPath := filepath.Join(dst, entry.Name())
-
- if entry.IsDir() {
- err = copyDir(srcPath, dstPath)
- if err != nil {
- return err
- }
- } else {
- // Skip symlinks.
- if entry.Mode()&os.ModeSymlink != 0 {
- continue
- }
-
- err = copyFile(srcPath, dstPath)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func copyFile(src, dst string) error {
- in, err := os.Open(src)
- if err != nil {
- return errors.E(err)
- }
- defer func() {
- _ = in.Close()
- }()
-
- out, err := os.Create(dst)
- if err != nil {
- return errors.E(err)
- }
- defer func() {
- _ = out.Close()
- }()
-
- _, err = io.Copy(out, in)
- if err != nil {
- return errors.E(err)
- }
-
- err = out.Sync()
- if err != nil {
- return errors.E(err)
- }
-
- si, err := os.Stat(src)
- if err != nil {
- return errors.E(err)
- }
- err = os.Chmod(dst, si.Mode())
- if err != nil {
- return errors.E(err)
- }
- return nil
-}
diff --git a/plugins/reload/watcher.go b/plugins/reload/watcher.go
deleted file mode 100644
index 55e1d9d5..00000000
--- a/plugins/reload/watcher.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package reload
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/interfaces/log"
-)
-
-// SimpleHook is used to filter by simple criteria, CONTAINS
-type SimpleHook func(filename string, pattern []string) error
-
-// An Event describes an event that is received when files or directory
-// changes occur. It includes the os.FileInfo of the changed file or
-// directory and the type of event that's occurred and the full path of the file.
-type Event struct {
- Path string
- Info os.FileInfo
-
- service string // type of service, http, grpc, etc...
-}
-
-type WatcherConfig struct {
- // service name
- ServiceName string
-
- // Recursive or just add by singe directory
- Recursive bool
-
- // Directories used per-service
- Directories []string
-
- // simple hook, just CONTAINS
- FilterHooks func(filename string, pattern []string) error
-
- // path to file with Files
- Files map[string]os.FileInfo
-
- // Ignored Directories, used map for O(1) amortized get
- Ignored map[string]struct{}
-
- // FilePatterns to ignore
- FilePatterns []string
-}
-
-type Watcher struct {
- // main event channel
- Event chan Event
- close chan struct{}
-
- // =============================
- mu *sync.Mutex
-
- // indicates is walker started or not
- started bool
-
- // config for each service
- // need pointer here to assign files
- watcherConfigs map[string]WatcherConfig
-
- // logger
- log log.Logger
-}
-
-// Options is used to set Watcher Options
-type Options func(*Watcher)
-
-// NewWatcher returns new instance of File Watcher
-func NewWatcher(configs []WatcherConfig, log log.Logger, options ...Options) (*Watcher, error) {
- w := &Watcher{
- Event: make(chan Event),
- mu: &sync.Mutex{},
-
- log: log,
-
- close: make(chan struct{}),
-
- //workingDir: workDir,
- watcherConfigs: make(map[string]WatcherConfig),
- }
-
- // add watcherConfigs by service names
- for _, v := range configs {
- w.watcherConfigs[v.ServiceName] = v
- }
-
- // apply options
- for _, option := range options {
- option(w)
- }
- err := w.initFs()
- if err != nil {
- return nil, err
- }
-
- return w, nil
-}
-
-// initFs makes initial map with files
-func (w *Watcher) initFs() error {
- for srvName, config := range w.watcherConfigs {
- fileList, err := w.retrieveFileList(srvName, config)
- if err != nil {
- return err
- }
- // workaround. in golang you can't assign to map in struct field
- tmp := w.watcherConfigs[srvName]
- tmp.Files = fileList
- w.watcherConfigs[srvName] = tmp
- }
- return nil
-}
-
-// ConvertIgnored is used to convert slice to map with ignored files
-func ConvertIgnored(ignored []string) (map[string]struct{}, error) {
- if len(ignored) == 0 {
- return nil, nil
- }
-
- ign := make(map[string]struct{}, len(ignored))
- for i := 0; i < len(ignored); i++ {
- abs, err := filepath.Abs(ignored[i])
- if err != nil {
- return nil, err
- }
- ign[abs] = struct{}{}
- }
-
- return ign, nil
-}
-
-// https://en.wikipedia.org/wiki/Inotify
-// SetMaxFileEvents sets max file notify events for Watcher
-// In case of file watch errors, this value can be increased system-wide
-// For linux: set --> fs.inotify.max_user_watches = 600000 (under /etc/<choose_name_here>.conf)
-// Add apply: sudo sysctl -p --system
-// func SetMaxFileEvents(events int) Options {
-// return func(watcher *Watcher) {
-// watcher.maxFileWatchEvents = events
-// }
-//
-// }
-
-// pass map from outside
-func (w *Watcher) retrieveFilesSingle(serviceName, path string) (map[string]os.FileInfo, error) {
- const op = errors.Op("retrieve")
- stat, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
-
- filesList := make(map[string]os.FileInfo, 10)
- filesList[path] = stat
-
- // if it's not a dir, return
- if !stat.IsDir() {
- return filesList, nil
- }
-
- fileInfoList, err := ioutil.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- // recursive calls are slow in compare to goto
- // so, we will add files with goto pattern
-outer:
- for i := 0; i < len(fileInfoList); i++ {
- // if file in ignored --> continue
- if _, ignored := w.watcherConfigs[serviceName].Ignored[path]; ignored {
- continue
- }
-
- // if filename does not contain pattern --> ignore that file
- if w.watcherConfigs[serviceName].FilePatterns != nil && w.watcherConfigs[serviceName].FilterHooks != nil {
- err = w.watcherConfigs[serviceName].FilterHooks(fileInfoList[i].Name(), w.watcherConfigs[serviceName].FilePatterns)
- if errors.Is(errors.Skip, err) {
- continue outer
- }
- }
-
- filesList[fileInfoList[i].Name()] = fileInfoList[i]
- }
-
- return filesList, nil
-}
-
-func (w *Watcher) StartPolling(duration time.Duration) error {
- w.mu.Lock()
- const op = errors.Op("start polling")
- if w.started {
- w.mu.Unlock()
- return errors.E(op, errors.Str("already started"))
- }
-
- w.started = true
- w.mu.Unlock()
-
- return w.waitEvent(duration)
-}
-
-// this is blocking operation
-func (w *Watcher) waitEvent(d time.Duration) error {
- ticker := time.NewTicker(d)
- for {
- select {
- case <-w.close:
- ticker.Stop()
- // just exit
- // no matter for the pollEvents
- return nil
- case <-ticker.C:
- // this is not very effective way
- // because we have to wait on Lock
- // better is to listen files in parallel, but, since that would be used in debug... TODO
- for serviceName := range w.watcherConfigs {
- // TODO sync approach
- fileList, _ := w.retrieveFileList(serviceName, w.watcherConfigs[serviceName])
- w.pollEvents(w.watcherConfigs[serviceName].ServiceName, fileList)
- }
- }
- }
-}
-
-// retrieveFileList get file list for service
-func (w *Watcher) retrieveFileList(serviceName string, config WatcherConfig) (map[string]os.FileInfo, error) {
- fileList := make(map[string]os.FileInfo)
- if config.Recursive {
- // walk through directories recursively
- for i := 0; i < len(config.Directories); i++ {
- // full path is workdir/relative_path
- fullPath, err := filepath.Abs(config.Directories[i])
- if err != nil {
- return nil, err
- }
- list, err := w.retrieveFilesRecursive(serviceName, fullPath)
- if err != nil {
- return nil, err
- }
-
- for k := range list {
- fileList[k] = list[k]
- }
- }
- return fileList, nil
- }
-
- for i := 0; i < len(config.Directories); i++ {
- // full path is workdir/relative_path
- fullPath, err := filepath.Abs(config.Directories[i])
- if err != nil {
- return nil, err
- }
-
- // list is pathToFiles with files
- list, err := w.retrieveFilesSingle(serviceName, fullPath)
- if err != nil {
- return nil, err
- }
-
- for pathToFile, file := range list {
- fileList[pathToFile] = file
- }
- }
-
- return fileList, nil
-}
-
-func (w *Watcher) retrieveFilesRecursive(serviceName, root string) (map[string]os.FileInfo, error) {
- fileList := make(map[string]os.FileInfo)
-
- return fileList, filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // If path is ignored and it's a directory, skip the directory. If it's
- // ignored and it's a single file, skip the file.
- _, ignored := w.watcherConfigs[serviceName].Ignored[path]
- if ignored {
- if info.IsDir() {
- // if it's dir, ignore whole
- return filepath.SkipDir
- }
- return nil
- }
-
- // if filename does not contain pattern --> ignore that file
- err = w.watcherConfigs[serviceName].FilterHooks(info.Name(), w.watcherConfigs[serviceName].FilePatterns)
- if errors.Is(errors.Skip, err) {
- return nil
- }
-
- // Add the path and it's info to the file list.
- fileList[path] = info
- return nil
- })
-}
-
-func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- // Store create and remove events for use to check for rename events.
- creates := make(map[string]os.FileInfo)
- removes := make(map[string]os.FileInfo)
-
- // Check for removed files.
- for pth := range w.watcherConfigs[serviceName].Files {
- if _, found := files[pth]; !found {
- removes[pth] = w.watcherConfigs[serviceName].Files[pth]
- w.log.Debug("file added to the list of removed files", "path", pth, "name", w.watcherConfigs[serviceName].Files[pth].Name(), "size", w.watcherConfigs[serviceName].Files[pth].Size())
- }
- }
-
- // Check for created files, writes and chmods.
- for pth := range files {
- if files[pth].IsDir() {
- continue
- }
- oldInfo, found := w.watcherConfigs[serviceName].Files[pth]
- if !found {
- // A file was created.
- creates[pth] = files[pth]
- w.log.Debug("file was created", "path", pth, "name", files[pth].Name(), "size", files[pth].Size())
- continue
- }
-
- if oldInfo.ModTime() != files[pth].ModTime() || oldInfo.Mode() != files[pth].Mode() {
- w.watcherConfigs[serviceName].Files[pth] = files[pth]
- w.log.Debug("file was updated", "path", pth, "name", files[pth].Name(), "size", files[pth].Size())
- w.Event <- Event{
- Path: pth,
- Info: files[pth],
- service: serviceName,
- }
- }
- }
-
- // Send all the remaining create and remove events.
- for pth := range creates {
- // add file to the plugin watch files
- w.watcherConfigs[serviceName].Files[pth] = creates[pth]
- w.log.Debug("file was added to watcher", "path", pth, "name", creates[pth].Name(), "size", creates[pth].Size())
-
- w.Event <- Event{
- Path: pth,
- Info: creates[pth],
- service: serviceName,
- }
- }
-
- for pth := range removes {
- // delete path from the config
- delete(w.watcherConfigs[serviceName].Files, pth)
- w.log.Debug("file was removed from watcher", "path", pth, "name", removes[pth].Name(), "size", removes[pth].Size())
-
- w.Event <- Event{
- Path: pth,
- Info: removes[pth],
- service: serviceName,
- }
- }
-}
-
-func (w *Watcher) Stop() {
- w.close <- struct{}{}
-}