summaryrefslogtreecommitdiff
path: root/plugins/reload
diff options
context:
space:
mode:
authorValery Piashchynski <[email protected]>2020-12-14 11:35:32 +0300
committerValery Piashchynski <[email protected]>2020-12-14 11:35:32 +0300
commit909076dbf20dce19207d338ca3f5c931953b64a6 (patch)
treedd5c25106e5dfc724102397a357cc2cf8f42a885 /plugins/reload
parent673da74925dee5c62064d3304289ae81cb499217 (diff)
Initial commit
Diffstat (limited to 'plugins/reload')
-rw-r--r--plugins/reload/config.go72
-rw-r--r--plugins/reload/config_test.go63
-rw-r--r--plugins/reload/samefile.go9
-rw-r--r--plugins/reload/samefile_windows.go12
-rw-r--r--plugins/reload/service.go159
-rw-r--r--plugins/reload/service_test.go1
-rw-r--r--plugins/reload/watcher.go400
-rw-r--r--plugins/reload/watcher_test.go673
8 files changed, 1389 insertions, 0 deletions
diff --git a/plugins/reload/config.go b/plugins/reload/config.go
new file mode 100644
index 00000000..efc71972
--- /dev/null
+++ b/plugins/reload/config.go
@@ -0,0 +1,72 @@
+package reload
+
+import (
+ "errors"
+ "github.com/spiral/roadrunner"
+ "github.com/spiral/roadrunner/service"
+ "time"
+)
+
+// Config is a Reload configuration point.
+type Config struct {
+ // Interval is a global refresh interval
+ Interval time.Duration
+
+ // Patterns is a global file patterns to watch. It will be applied to every directory in project
+ Patterns []string
+
+ // Services is set of services which would be reloaded in case of FS changes
+ Services map[string]ServiceConfig
+}
+
+type ServiceConfig struct {
+ // Enabled indicates that service must be watched, doest not required when any other option specified
+ Enabled bool
+
+ // Recursive is options to use nested files from root folder
+ Recursive bool
+
+ // Patterns is per-service specific files to watch
+ Patterns []string
+
+ // Dirs is per-service specific dirs which will be combined with Patterns
+ Dirs []string
+
+ // Ignore is set of files which would not be watched
+ Ignore []string
+
+ // service is a link to service to restart
+ service *roadrunner.Controllable
+}
+
+// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
+func (c *Config) Hydrate(cfg service.Config) error {
+ if err := cfg.Unmarshal(c); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// InitDefaults sets missing values to their default values.
+func (c *Config) InitDefaults() error {
+ c.Interval = time.Second
+ c.Patterns = []string{".php"}
+
+ return nil
+}
+
+// Valid validates the configuration.
+func (c *Config) Valid() error {
+ if c.Interval < time.Second {
+ return errors.New("too short interval")
+ }
+
+ if c.Services == nil {
+ return errors.New("should add at least 1 service")
+ } else if len(c.Services) == 0 {
+ return errors.New("service initialized, however, no config added")
+ }
+
+ return nil
+}
diff --git a/plugins/reload/config_test.go b/plugins/reload/config_test.go
new file mode 100644
index 00000000..600975d3
--- /dev/null
+++ b/plugins/reload/config_test.go
@@ -0,0 +1,63 @@
+package reload
+
+import (
+ "github.com/stretchr/testify/assert"
+ "testing"
+ "time"
+)
+
+func Test_Config_Valid(t *testing.T) {
+ services := make(map[string]ServiceConfig)
+ services["test"] = ServiceConfig{
+ Recursive: false,
+ Patterns: nil,
+ Dirs: nil,
+ Ignore: nil,
+ service: nil,
+ }
+
+ cfg := &Config{
+ Interval: time.Second,
+ Patterns: nil,
+ Services: services,
+ }
+ assert.NoError(t, cfg.Valid())
+}
+
+func Test_Fake_ServiceConfig(t *testing.T) {
+ services := make(map[string]ServiceConfig)
+ cfg := &Config{
+ Interval: time.Microsecond,
+ Patterns: nil,
+ Services: services,
+ }
+ assert.Error(t, cfg.Valid())
+}
+
+func Test_Interval(t *testing.T) {
+ services := make(map[string]ServiceConfig)
+ services["test"] = ServiceConfig{
+ Enabled: false,
+ Recursive: false,
+ Patterns: nil,
+ Dirs: nil,
+ Ignore: nil,
+ service: nil,
+ }
+
+ cfg := &Config{
+ Interval: time.Millisecond, // should crash here
+ Patterns: nil,
+ Services: services,
+ }
+ assert.Error(t, cfg.Valid())
+}
+
+func Test_NoServiceConfig(t *testing.T) {
+ cfg := &Config{
+ Interval: time.Second,
+ Patterns: nil,
+ Services: nil,
+ }
+ assert.Error(t, cfg.Valid())
+}
diff --git a/plugins/reload/samefile.go b/plugins/reload/samefile.go
new file mode 100644
index 00000000..80df0431
--- /dev/null
+++ b/plugins/reload/samefile.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package reload
+
+import "os"
+
+func sameFile(fi1, fi2 os.FileInfo) bool {
+ return os.SameFile(fi1, fi2)
+}
diff --git a/plugins/reload/samefile_windows.go b/plugins/reload/samefile_windows.go
new file mode 100644
index 00000000..5f70d327
--- /dev/null
+++ b/plugins/reload/samefile_windows.go
@@ -0,0 +1,12 @@
+// +build windows
+
+package reload
+
+import "os"
+
+func sameFile(fi1, fi2 os.FileInfo) bool {
+ return fi1.ModTime() == fi2.ModTime() &&
+ fi1.Size() == fi2.Size() &&
+ fi1.Mode() == fi2.Mode() &&
+ fi1.IsDir() == fi2.IsDir()
+}
diff --git a/plugins/reload/service.go b/plugins/reload/service.go
new file mode 100644
index 00000000..b2c320f9
--- /dev/null
+++ b/plugins/reload/service.go
@@ -0,0 +1,159 @@
+package reload
+
+import (
+ "os"
+ "strings"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/log"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+// PluginName contains default plugin name.
+const PluginName string = "reload"
+
+type Service struct {
+ cfg *Config
+ log log.Logger
+ watcher *Watcher
+ services map[string]interface{}
+ stopc chan struct{}
+}
+
+// Init controller service
+func (s *Service) Init(cfg config.Configurer, log log.Logger) error {
+ const op = errors.Op("reload plugin init")
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ // disable plugin in case of error
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ s.log = log
+ s.stopc = make(chan struct{})
+ s.services = make(map[string]interface{})
+
+ var configs []WatcherConfig
+
+ for serviceName, serviceConfig := range s.cfg.Services {
+ if s.cfg.Services[serviceName].service == nil {
+ continue
+ }
+ ignored, err := ConvertIgnored(serviceConfig.Ignore)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ configs = append(configs, WatcherConfig{
+ serviceName: serviceName,
+ recursive: serviceConfig.Recursive,
+ directories: serviceConfig.Dirs,
+ filterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return ErrorSkip
+ },
+ files: make(map[string]os.FileInfo),
+ ignored: ignored,
+ filePatterns: append(serviceConfig.Patterns, s.cfg.Patterns...),
+ })
+ }
+
+ s.watcher, err = NewWatcher(configs)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ return nil
+}
+
+func (s *Service) Serve() chan error {
+ const op = errors.Op("reload plugin serve")
+ errCh := make(chan error, 1)
+ if s.cfg.Interval < time.Second {
+ errCh <- errors.E(op, errors.Str("reload interval is too fast"))
+ return errCh
+ }
+
+ // make a map with unique services
+ // so, if we would have a 100 events from http service
+ // in map we would see only 1 key and it's config
+ treshholdc := make(chan struct {
+ serviceConfig ServiceConfig
+ service string
+ }, 100)
+
+ // use the same interval
+ ticker := time.NewTicker(s.cfg.Interval)
+
+ // drain channel in case of leaved messages
+ defer func() {
+ go func() {
+ for range treshholdc {
+
+ }
+ }()
+ }()
+
+ go func() {
+ for e := range s.watcher.Event {
+ treshholdc <- struct {
+ serviceConfig ServiceConfig
+ service string
+ }{serviceConfig: s.cfg.Services[e.service], service: e.service}
+ }
+ }()
+
+ // map with configs by services
+ updated := make(map[string]ServiceConfig, 100)
+
+ go func() {
+ for {
+ select {
+ case cfg := <-treshholdc:
+ // replace previous value in map by more recent without adding new one
+ updated[cfg.service] = cfg.serviceConfig
+ // stop ticker
+ ticker.Stop()
+ // restart
+ // logic is following:
+ // if we getting a lot of events, we should't restart particular service on each of it (user doing bug move or very fast typing)
+ // instead, we are resetting the ticker and wait for Interval time
+ // If there is no more events, we restart service only once
+ ticker = time.NewTicker(s.cfg.Interval)
+ case <-ticker.C:
+ if len(updated) > 0 {
+ for k, v := range updated {
+ sv := *v.service
+ err := sv.Server().Reset()
+ if err != nil {
+ s.log.Error(err)
+ }
+ s.log.Debugf("[%s] found %v file(s) changes, reloading", k, len(updated))
+ }
+ // zero map
+ updated = make(map[string]ServiceConfig, 100)
+ }
+ case <-s.stopc:
+ ticker.Stop()
+ return
+ }
+ }
+ }()
+
+ err := s.watcher.StartPolling(s.cfg.Interval)
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+
+ return errCh
+}
+
+func (s *Service) Stop() {
+ s.watcher.Stop()
+ s.stopc <- struct{}{}
+}
diff --git a/plugins/reload/service_test.go b/plugins/reload/service_test.go
new file mode 100644
index 00000000..7cad4a5d
--- /dev/null
+++ b/plugins/reload/service_test.go
@@ -0,0 +1 @@
+package reload
diff --git a/plugins/reload/watcher.go b/plugins/reload/watcher.go
new file mode 100644
index 00000000..721495c3
--- /dev/null
+++ b/plugins/reload/watcher.go
@@ -0,0 +1,400 @@
+package reload
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+var ErrorSkip = errors.New("file is skipped")
+var NoWalkerConfig = errors.New("should add at least one walker config, when reload is set to true")
+
+// SimpleHook is used to filter by simple criteria, CONTAINS
+type SimpleHook func(filename string, pattern []string) error
+
+// An Event describes an event that is received when files or directory
+// changes occur. It includes the os.FileInfo of the changed file or
+// directory and the type of event that's occurred and the full path of the file.
+type Event struct {
+ path string
+ info os.FileInfo
+
+ service string // type of service, http, grpc, etc...
+}
+
+type WatcherConfig struct {
+ // service name
+ serviceName string
+
+ // recursive or just add by singe directory
+ recursive bool
+
+ // directories used per-service
+ directories []string
+
+ // simple hook, just CONTAINS
+ filterHooks func(filename string, pattern []string) error
+
+ // path to file with files
+ files map[string]os.FileInfo
+
+ // ignored directories, used map for O(1) amortized get
+ ignored map[string]struct{}
+
+ // filePatterns to ignore
+ filePatterns []string
+}
+
+type Watcher struct {
+ // main event channel
+ Event chan Event
+ close chan struct{}
+
+ //=============================
+ mu *sync.Mutex
+
+ // indicates is walker started or not
+ started bool
+
+ // config for each service
+ // need pointer here to assign files
+ watcherConfigs map[string]WatcherConfig
+}
+
+// Options is used to set Watcher Options
+type Options func(*Watcher)
+
+// NewWatcher returns new instance of File Watcher
+func NewWatcher(configs []WatcherConfig, options ...Options) (*Watcher, error) {
+ w := &Watcher{
+ Event: make(chan Event),
+ mu: &sync.Mutex{},
+
+ close: make(chan struct{}),
+
+ //workingDir: workDir,
+ watcherConfigs: make(map[string]WatcherConfig),
+ }
+
+ // add watcherConfigs by service names
+ for _, v := range configs {
+ w.watcherConfigs[v.serviceName] = v
+ }
+
+ // apply options
+ for _, option := range options {
+ option(w)
+ }
+ err := w.initFs()
+ if err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// initFs makes initial map with files
+func (w *Watcher) initFs() error {
+ for srvName, config := range w.watcherConfigs {
+ fileList, err := w.retrieveFileList(srvName, config)
+ if err != nil {
+ return err
+ }
+ // workaround. in golang you can't assign to map in struct field
+ tmp := w.watcherConfigs[srvName]
+ tmp.files = fileList
+ w.watcherConfigs[srvName] = tmp
+ }
+ return nil
+}
+
+// ConvertIgnored is used to convert slice to map with ignored files
+func ConvertIgnored(ignored []string) (map[string]struct{}, error) {
+ if len(ignored) == 0 {
+ return nil, nil
+ }
+
+ ign := make(map[string]struct{}, len(ignored))
+ for i := 0; i < len(ignored); i++ {
+ abs, err := filepath.Abs(ignored[i])
+ if err != nil {
+ return nil, err
+ }
+ ign[abs] = struct{}{}
+ }
+
+ return ign, nil
+
+}
+
+// GetAllFiles returns all files initialized for particular company
+func (w *Watcher) GetAllFiles(serviceName string) []os.FileInfo {
+ var ret []os.FileInfo
+
+ for _, v := range w.watcherConfigs[serviceName].files {
+ ret = append(ret, v)
+ }
+
+ return ret
+}
+
+// https://en.wikipedia.org/wiki/Inotify
+// SetMaxFileEvents sets max file notify events for Watcher
+// In case of file watch errors, this value can be increased system-wide
+// For linux: set --> fs.inotify.max_user_watches = 600000 (under /etc/<choose_name_here>.conf)
+// Add apply: sudo sysctl -p --system
+//func SetMaxFileEvents(events int) Options {
+// return func(watcher *Watcher) {
+// watcher.maxFileWatchEvents = events
+// }
+//
+//}
+
+// pass map from outside
+func (w *Watcher) retrieveFilesSingle(serviceName, path string) (map[string]os.FileInfo, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ filesList := make(map[string]os.FileInfo, 10)
+ filesList[path] = stat
+
+ // if it's not a dir, return
+ if !stat.IsDir() {
+ return filesList, nil
+ }
+
+ fileInfoList, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+
+ // recursive calls are slow in compare to goto
+ // so, we will add files with goto pattern
+outer:
+ for i := 0; i < len(fileInfoList); i++ {
+ // if file in ignored --> continue
+ if _, ignored := w.watcherConfigs[serviceName].ignored[path]; ignored {
+ continue
+ }
+
+ // if filename does not contain pattern --> ignore that file
+ if w.watcherConfigs[serviceName].filePatterns != nil && w.watcherConfigs[serviceName].filterHooks != nil {
+ err = w.watcherConfigs[serviceName].filterHooks(fileInfoList[i].Name(), w.watcherConfigs[serviceName].filePatterns)
+ if err == ErrorSkip {
+ continue outer
+ }
+ }
+
+ filesList[fileInfoList[i].Name()] = fileInfoList[i]
+ }
+
+ return filesList, nil
+}
+
+func (w *Watcher) StartPolling(duration time.Duration) error {
+ w.mu.Lock()
+ if w.started {
+ w.mu.Unlock()
+ return errors.New("already started")
+ }
+
+ w.started = true
+ w.mu.Unlock()
+
+ return w.waitEvent(duration)
+}
+
+// this is blocking operation
+func (w *Watcher) waitEvent(d time.Duration) error {
+ ticker := time.NewTicker(d)
+ for {
+ select {
+ case <-w.close:
+ ticker.Stop()
+ // just exit
+ // no matter for the pollEvents
+ return nil
+ case <-ticker.C:
+ // this is not very effective way
+ // because we have to wait on Lock
+ // better is to listen files in parallel, but, since that would be used in debug... TODO
+ for serviceName, config := range w.watcherConfigs {
+ go func(sn string, c WatcherConfig) {
+ fileList, _ := w.retrieveFileList(sn, c)
+ w.pollEvents(c.serviceName, fileList)
+ }(serviceName, config)
+ }
+ }
+ }
+
+}
+
+// retrieveFileList get file list for service
+func (w *Watcher) retrieveFileList(serviceName string, config WatcherConfig) (map[string]os.FileInfo, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ fileList := make(map[string]os.FileInfo)
+ if config.recursive {
+ // walk through directories recursively
+ for _, dir := range config.directories {
+ // full path is workdir/relative_path
+ fullPath, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ list, err := w.retrieveFilesRecursive(serviceName, fullPath)
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range list {
+ fileList[k] = v
+ }
+ }
+ return fileList, nil
+ }
+
+ for _, dir := range config.directories {
+ // full path is workdir/relative_path
+ fullPath, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ // list is pathToFiles with files
+ list, err := w.retrieveFilesSingle(serviceName, fullPath)
+ if err != nil {
+ return nil, err
+ }
+
+ for pathToFile, file := range list {
+ fileList[pathToFile] = file
+ }
+ }
+
+ return fileList, nil
+}
+
+func (w *Watcher) retrieveFilesRecursive(serviceName, root string) (map[string]os.FileInfo, error) {
+ fileList := make(map[string]os.FileInfo)
+
+ return fileList, filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // If path is ignored and it's a directory, skip the directory. If it's
+ // ignored and it's a single file, skip the file.
+ _, ignored := w.watcherConfigs[serviceName].ignored[path]
+ if ignored {
+ if info.IsDir() {
+ // if it's dir, ignore whole
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ // if filename does not contain pattern --> ignore that file
+ err = w.watcherConfigs[serviceName].filterHooks(info.Name(), w.watcherConfigs[serviceName].filePatterns)
+ if err == ErrorSkip {
+ return nil
+ }
+
+ // Add the path and it's info to the file list.
+ fileList[path] = info
+ return nil
+ })
+}
+
+func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // Store create and remove events for use to check for rename events.
+ creates := make(map[string]os.FileInfo)
+ removes := make(map[string]os.FileInfo)
+
+ // Check for removed files.
+ for pth, info := range w.watcherConfigs[serviceName].files {
+ if _, found := files[pth]; !found {
+ removes[pth] = info
+ }
+ }
+
+ // Check for created files, writes and chmods.
+ for pth, info := range files {
+ if info.IsDir() {
+ continue
+ }
+ oldInfo, found := w.watcherConfigs[serviceName].files[pth]
+ if !found {
+ // A file was created.
+ creates[pth] = info
+ continue
+ }
+ if oldInfo.ModTime() != info.ModTime() {
+ w.watcherConfigs[serviceName].files[pth] = info
+ w.Event <- Event{
+ path: pth,
+ info: info,
+ service: serviceName,
+ }
+ }
+ if oldInfo.Mode() != info.Mode() {
+ w.watcherConfigs[serviceName].files[pth] = info
+ w.Event <- Event{
+ path: pth,
+ info: info,
+ service: serviceName,
+ }
+ }
+ }
+
+ //Check for renames and moves.
+ for path1, info1 := range removes {
+ for path2, info2 := range creates {
+ if sameFile(info1, info2) {
+ e := Event{
+ path: path2,
+ info: info2,
+ service: serviceName,
+ }
+
+ // remove initial path
+ delete(w.watcherConfigs[serviceName].files, path1)
+ // update with new
+ w.watcherConfigs[serviceName].files[path2] = info2
+
+
+ w.Event <- e
+ }
+ }
+ }
+
+ //Send all the remaining create and remove events.
+ for pth, info := range creates {
+ w.watcherConfigs[serviceName].files[pth] = info
+ w.Event <- Event{
+ path: pth,
+ info: info,
+ service: serviceName,
+ }
+ }
+ for pth, info := range removes {
+ delete(w.watcherConfigs[serviceName].files, pth)
+ w.Event <- Event{
+ path: pth,
+ info: info,
+ service: serviceName,
+ }
+ }
+}
+
+func (w *Watcher) Stop() {
+ w.close <- struct{}{}
+}
diff --git a/plugins/reload/watcher_test.go b/plugins/reload/watcher_test.go
new file mode 100644
index 00000000..9683d2de
--- /dev/null
+++ b/plugins/reload/watcher_test.go
@@ -0,0 +1,673 @@
+package reload
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+var testServiceName = "test"
+
+// scenario
+// Create walker instance, init with default config, check that Watcher found all files from config
+func Test_Correct_Watcher_Init(t *testing.T) {
+ tempDir, err := ioutil.TempDir(".", "")
+ defer func() {
+ err = freeResources(tempDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: false,
+ directories: []string{tempDir},
+ filterHooks: nil,
+ files: make(map[string]os.FileInfo),
+ ignored: nil,
+ filePatterns: nil,
+ }
+
+ w, err := NewWatcher([]WatcherConfig{wc})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(w.GetAllFiles(testServiceName)) != 2 {
+ t.Fatal("incorrect directories len")
+ }
+}
+
+// scenario
+// create 3 files, create walker instance
+// Start poll events
+// change file and see, if event had come to handler
+func Test_Get_FileEvent(t *testing.T) {
+ tempDir, err := ioutil.TempDir(".", "")
+ c := make(chan struct{})
+ defer func(name string) {
+ err = freeResources(name)
+ if err != nil {
+ c <- struct{}{}
+ t.Fatal(err)
+ }
+ c <- struct{}{}
+ }(tempDir)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file1.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file2.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file3.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: false,
+ directories: []string{tempDir},
+ filterHooks: nil,
+ files: make(map[string]os.FileInfo),
+ ignored: nil,
+ filePatterns: []string{"aaa", "txt"},
+ }
+
+ w, err := NewWatcher([]WatcherConfig{wc})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // should be 3 files and directory
+ if len(w.GetAllFiles(testServiceName)) != 4 {
+ t.Fatal("incorrect directories len")
+ }
+
+ go limitTime(time.Second * 10, t.Name(), c)
+
+ go func() {
+ go func() {
+ time.Sleep(time.Second)
+ err2 := ioutil.WriteFile(filepath.Join(tempDir, "file2.txt"),
+ []byte{1, 1, 1}, 0755)
+ if err2 != nil {
+ panic(err2)
+ }
+ runtime.Goexit()
+ }()
+
+ go func() {
+ for e := range w.Event {
+ if e.path != "file2.txt" {
+ panic("didn't handle event when write file2")
+ }
+ w.Stop()
+ }
+ }()
+ }()
+
+ err = w.StartPolling(time.Second)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// scenario
+// create 3 files with different extensions, create walker instance
+// Start poll events
+// change file with txt extension, and see, if event had not come to handler because it was filtered
+func Test_FileExtensionFilter(t *testing.T) {
+ tempDir, err := ioutil.TempDir(".", "")
+ c := make(chan struct{})
+ defer func(name string) {
+ err = freeResources(name)
+ if err != nil {
+ c <- struct{}{}
+ t.Fatal(err)
+ }
+ c <- struct{}{}
+ }(tempDir)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file3.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: false,
+ directories: []string{tempDir},
+ filterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return ErrorSkip
+ },
+ files: make(map[string]os.FileInfo),
+ ignored: nil,
+ filePatterns: []string{"aaa", "bbb"},
+ }
+
+ w, err := NewWatcher([]WatcherConfig{wc})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dirLen := len(w.GetAllFiles(testServiceName))
+ // should be 2 files (one filtered) and directory
+ if dirLen != 3 {
+ t.Fatalf("incorrect directories len, len is: %d", dirLen)
+ }
+
+ go limitTime(time.Second * 5, t.Name(), c)
+
+ go func() {
+ go func() {
+ err2 := ioutil.WriteFile(filepath.Join(tempDir, "file3.txt"),
+ []byte{1, 1, 1}, 0755)
+ if err2 != nil {
+ panic(err2)
+ }
+
+ runtime.Goexit()
+ }()
+
+ go func() {
+ for e := range w.Event {
+ fmt.Println(e.info.Name())
+ panic("handled event from filtered file")
+ }
+ }()
+ w.Stop()
+ runtime.Goexit()
+ }()
+
+ err = w.StartPolling(time.Second)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// nested
+// scenario
+// create dir and nested dir
+// make files with aaa, bbb and txt extensions, filter txt
+// change not filtered file, handle event
+func Test_Recursive_Support(t *testing.T) {
+ tempDir, err := ioutil.TempDir(".", "")
+ defer func() {
+ err = freeResources(tempDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ nestedDir, err := ioutil.TempDir(tempDir, "nested")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file3.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: true,
+ directories: []string{tempDir},
+ filterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return ErrorSkip
+ },
+ files: make(map[string]os.FileInfo),
+ ignored: nil,
+ filePatterns: []string{"aaa", "bbb"},
+ }
+
+ w, err := NewWatcher([]WatcherConfig{wc})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dirLen := len(w.GetAllFiles(testServiceName))
+ // should be 3 files (2 from root dir, and 1 from nested), filtered txt
+ if dirLen != 3 {
+ t.Fatalf("incorrect directories len, len is: %d", dirLen)
+ }
+
+ go func() {
+ // time sleep is used here because StartPolling is blocking operation
+ time.Sleep(time.Second * 5)
+ // change file in nested directory
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
+ []byte{1, 1, 1}, 0755)
+ if err != nil {
+ panic(err)
+ }
+ go func() {
+ for e := range w.Event {
+ if e.info.Name() != "file4.aaa" {
+ panic("wrong handled event from watcher in nested dir")
+ }
+ w.Stop()
+ }
+ }()
+ }()
+
+ err = w.StartPolling(time.Second)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func Test_Wrong_Dir(t *testing.T) {
+ // no such file or directory
+ wrongDir := "askdjfhaksdlfksdf"
+
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: true,
+ directories: []string{wrongDir},
+ filterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return ErrorSkip
+ },
+ files: make(map[string]os.FileInfo),
+ ignored: nil,
+ filePatterns: []string{"aaa", "bbb"},
+ }
+
+ _, err := NewWatcher([]WatcherConfig{wc})
+ if err == nil {
+ t.Fatal(err)
+ }
+}
+
+func Test_Filter_Directory(t *testing.T) {
+ tempDir, err := ioutil.TempDir(".", "")
+ c := make(chan struct{})
+ defer func(name string) {
+ err = freeResources(name)
+ if err != nil {
+ c <- struct{}{}
+ t.Fatal(err)
+ }
+ c <- struct{}{}
+ }(tempDir)
+
+ go limitTime(time.Second*10, t.Name(), c)
+
+ nestedDir, err := ioutil.TempDir(tempDir, "nested")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file3.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ignored, err := ConvertIgnored([]string{nestedDir})
+ if err != nil {
+ t.Fatal(err)
+ }
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: true,
+ directories: []string{tempDir},
+ filterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return ErrorSkip
+ },
+ files: make(map[string]os.FileInfo),
+ ignored: ignored,
+ filePatterns: []string{"aaa", "bbb", "txt"},
+ }
+
+ w, err := NewWatcher([]WatcherConfig{wc})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dirLen := len(w.GetAllFiles(testServiceName))
+ // should be 2 files (2 from root dir), filtered other
+ if dirLen != 2 {
+ t.Fatalf("incorrect directories len, len is: %d", dirLen)
+ }
+
+ go func() {
+ go func() {
+ err2 := ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
+ []byte{1, 1, 1}, 0755)
+ if err2 != nil {
+ panic(err2)
+ }
+ }()
+
+ go func() {
+ for e := range w.Event {
+ fmt.Println("file: " + e.info.Name())
+ panic("handled event from watcher in nested dir")
+ }
+ }()
+
+ // time sleep is used here because StartPolling is blocking operation
+ time.Sleep(time.Second * 5)
+ w.Stop()
+
+ }()
+
+ err = w.StartPolling(time.Second)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// copy files from nested dir to not ignored
+// should fire an event
+func Test_Copy_Directory(t *testing.T) {
+ tempDir, err := ioutil.TempDir(".", "")
+ c := make(chan struct{})
+ defer func() {
+ err = freeResources(tempDir)
+ if err != nil {
+ c <- struct{}{}
+ t.Fatal(err)
+ }
+ c <- struct{}{}
+ }()
+
+ nestedDir, err := ioutil.TempDir(tempDir, "nested")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file3.txt"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
+ []byte{}, 0755)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ignored, err := ConvertIgnored([]string{nestedDir})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wc := WatcherConfig{
+ serviceName: testServiceName,
+ recursive: true,
+ directories: []string{tempDir},
+ filterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return ErrorSkip
+ },
+ files: make(map[string]os.FileInfo),
+ ignored: ignored,
+ filePatterns: []string{"aaa", "bbb", "txt"},
+ }
+
+ w, err := NewWatcher([]WatcherConfig{wc})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dirLen := len(w.GetAllFiles(testServiceName))
+ // should be 2 files (2 from root dir), filtered other
+ if dirLen != 2 {
+ t.Fatalf("incorrect directories len, len is: %d", dirLen)
+ }
+
+ go limitTime(time.Second*10, t.Name(), c)
+
+ go func() {
+ go func() {
+ err2 := copyDir(nestedDir, filepath.Join(tempDir, "copyTo"))
+ if err2 != nil {
+ panic(err2)
+ }
+
+ // exit from current goroutine
+ runtime.Goexit()
+ }()
+
+ go func() {
+ for range w.Event {
+ // here should be event, otherwise we won't stop
+ w.Stop()
+ }
+ }()
+ }()
+
+ err = w.StartPolling(time.Second)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func limitTime(d time.Duration, name string, free chan struct{}) {
+ go func() {
+ ticket := time.NewTicker(d)
+ for {
+ select {
+ case <-ticket.C:
+ ticket.Stop()
+ panic("timeout exceed, test: " + name)
+ case <-free:
+ ticket.Stop()
+ return
+ }
+ }
+ }()
+}
+
+func copyFile(src, dst string) (err error) {
+ in, err := os.Open(src)
+ if err != nil {
+ return
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if e := out.Close(); e != nil {
+ err = e
+ }
+ }()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return
+ }
+
+ err = out.Sync()
+ if err != nil {
+ return
+ }
+
+ si, err := os.Stat(src)
+ if err != nil {
+ return
+ }
+ err = os.Chmod(dst, si.Mode())
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+func copyDir(src string, dst string) (err error) {
+ src = filepath.Clean(src)
+ dst = filepath.Clean(dst)
+
+ si, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !si.IsDir() {
+ return fmt.Errorf("source is not a directory")
+ }
+
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return
+ }
+ if err == nil {
+ return fmt.Errorf("destination already exists")
+ }
+
+ err = os.MkdirAll(dst, si.Mode())
+ if err != nil {
+ return
+ }
+
+ entries, err := ioutil.ReadDir(src)
+ if err != nil {
+ return
+ }
+
+ for _, entry := range entries {
+ srcPath := filepath.Join(src, entry.Name())
+ dstPath := filepath.Join(dst, entry.Name())
+
+ if entry.IsDir() {
+ err = copyDir(srcPath, dstPath)
+ if err != nil {
+ return
+ }
+ } else {
+ // Skip symlinks.
+ if entry.Mode()&os.ModeSymlink != 0 {
+ continue
+ }
+
+ err = copyFile(srcPath, dstPath)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func freeResources(path string) error {
+ return os.RemoveAll(path)
+}