diff options
author | Wolfy-J <[email protected]> | 2020-10-25 16:50:49 +0300 |
---|---|---|
committer | Wolfy-J <[email protected]> | 2020-10-25 16:50:49 +0300 |
commit | 96c11f2b1830833c7e190f2476334380cf8697ce (patch) | |
tree | 3a343603db4c36eada47b4b62bdc50d08ddd8895 /supervisor_pool.go | |
parent | 6d0cea83e2a253c3baf4564a37eebaf54fc124c0 (diff) |
- massive update in roadrunner 2.0 abstractions
Diffstat (limited to 'supervisor_pool.go')
-rw-r--r-- | supervisor_pool.go | 139 |
1 files changed, 139 insertions, 0 deletions
diff --git a/supervisor_pool.go b/supervisor_pool.go new file mode 100644 index 00000000..e63b4a59 --- /dev/null +++ b/supervisor_pool.go @@ -0,0 +1,139 @@ +package roadrunner + +import ( + "context" + "github.com/spiral/roadrunner/v2/util" + "time" +) + +const MB = 1024 * 1024 + +type SupervisedPool interface { + Pool + + // ExecWithContext provides the ability to execute with time deadline. Attention, worker will be destroyed if context + // deadline reached. + ExecWithContext(ctx context.Context, rqs Payload) (Payload, error) +} + +type supervisedPool struct { + cfg SupervisorConfig + events *util.EventHandler + pool Pool + stopCh chan struct{} +} + +func newPoolWatcher(pool *StaticPool, events *util.EventHandler, cfg SupervisorConfig) *supervisedPool { + return &supervisedPool{ + cfg: cfg, + events: events, + pool: pool, + stopCh: make(chan struct{}), + } +} + +func (sp *supervisedPool) Start() { + go func() { + watchTout := time.NewTicker(sp.cfg.WatchTick) + for { + select { + case <-sp.stopCh: + watchTout.Stop() + return + // stop here + case <-watchTout.C: + sp.control() + } + } + }() +} + +func (sp *supervisedPool) Stop() { + sp.stopCh <- struct{}{} +} + +func (sp *supervisedPool) control() { + now := time.Now() + ctx := context.TODO() + + // THIS IS A COPY OF WORKERS + workers := sp.pool.Workers() + + for i := 0; i < len(workers); i++ { + if workers[i].State().Value() == StateInvalid { + continue + } + + s, err := WorkerProcessState(workers[i]) + if err != nil { + // worker not longer valid for supervision + continue + } + + if sp.cfg.TTL != 0 && now.Sub(workers[i].Created()).Seconds() >= float64(sp.cfg.TTL) { + err = sp.pool.RemoveWorker(ctx, workers[i]) + if err != nil { + sp.events.Push(PoolEvent{Event: EventSupervisorError, Payload: err}) + return + } else { + sp.events.Push(PoolEvent{Event: EventTTL, Payload: workers[i]}) + } + + continue + } + + if sp.cfg.MaxWorkerMemory != 0 && s.MemoryUsage >= sp.cfg.MaxWorkerMemory*MB { + // TODO events + //sp.pool.Events() <- PoolEvent{Payload: fmt.Errorf("max allowed memory reached (%vMB)", sp.maxWorkerMemory)} + err = sp.pool.RemoveWorker(ctx, workers[i]) + if err != nil { + sp.events.Push(PoolEvent{Event: EventSupervisorError, Payload: err}) + return + } else { + sp.events.Push(PoolEvent{Event: EventTTL, Payload: workers[i]}) + } + + continue + } + + // firs we check maxWorker idle + if sp.cfg.IdleTTL != 0 { + // then check for the worker state + if workers[i].State().Value() != StateReady { + continue + } + + /* + Calculate idle time + If worker in the StateReady, we read it LastUsed timestamp as UnixNano uint64 + 2. For example maxWorkerIdle is equal to 5sec, then, if (time.Now - LastUsed) > maxWorkerIdle + we are guessing that worker overlap idle time and has to be killed + */ + + // get last used unix nano + lu := workers[i].State().LastUsed() + + // convert last used to unixNano and sub time.now + res := int64(lu) - now.UnixNano() + + // maxWorkerIdle more than diff between now and last used + if sp.cfg.IdleTTL-res <= 0 { + err = sp.pool.RemoveWorker(ctx, workers[i]) + if err != nil { + sp.events.Push(PoolEvent{Event: EventSupervisorError, Payload: err}) + return + } else { + sp.events.Push(PoolEvent{Event: EventIdleTTL, Payload: workers[i]}) + } + } + } + + // the very last step is to calculate pool memory usage (except excluded workers) + //totalUsedMemory += s.MemoryUsage + } + + //// if current usage more than max allowed pool memory usage + //if totalUsedMemory > sp.maxPoolMemory { + // sp.pool.Destroy(ctx) + //} +} |