summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
authorValery Piashchynski <[email protected]>2020-12-22 23:02:25 +0300
committerValery Piashchynski <[email protected]>2020-12-22 23:02:25 +0300
commitfd1e98bc6339abfa66523bf9d2208d00df8ee4bc (patch)
treeb679441276717e687a5b460ebeba7ad0eee69be9 /pkg
parent40b6c3169931a3fef62b649db19ff01dc685b7d4 (diff)
events listeners refactor, CLI initial commit
Diffstat (limited to 'pkg')
-rwxr-xr-xpkg/pipe/pipe_factory.go15
-rwxr-xr-xpkg/pipe/pipe_factory_test.go15
-rwxr-xr-xpkg/pool/static_pool.go102
-rwxr-xr-xpkg/pool/static_pool_test.go137
-rwxr-xr-xpkg/pool/supervisor_pool.go11
-rw-r--r--pkg/pool/supervisor_test.go19
-rwxr-xr-xpkg/socket/socket_factory.go19
-rwxr-xr-xpkg/worker/sync_worker.go7
-rwxr-xr-xpkg/worker/worker.go19
-rwxr-xr-xpkg/worker_watcher/worker_watcher.go1
10 files changed, 169 insertions, 176 deletions
diff --git a/pkg/pipe/pipe_factory.go b/pkg/pipe/pipe_factory.go
index a0e0c258..f6211ab9 100755
--- a/pkg/pipe/pipe_factory.go
+++ b/pkg/pipe/pipe_factory.go
@@ -6,6 +6,7 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/pkg/pipe"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
"github.com/spiral/roadrunner/v2/interfaces/worker"
"github.com/spiral/roadrunner/v2/internal"
workerImpl "github.com/spiral/roadrunner/v2/pkg/worker"
@@ -14,14 +15,18 @@ import (
// Factory connects to stack using standard
// streams (STDIN, STDOUT pipes).
-type Factory struct{}
+type Factory struct {
+ listener []events.EventListener
+}
// NewPipeFactory returns new factory instance and starts
// listening
// todo: review tests
-func NewPipeFactory() worker.Factory {
- return &Factory{}
+func NewPipeFactory(listeners ...events.EventListener) worker.Factory {
+ return &Factory{
+ listener: listeners,
+ }
}
type SpawnResult struct {
@@ -35,7 +40,7 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd) (wo
c := make(chan SpawnResult)
const op = errors.Op("spawn worker with context")
go func() {
- w, err := workerImpl.InitBaseWorker(cmd)
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(f.listener...))
if err != nil {
c <- SpawnResult{
w: nil,
@@ -116,7 +121,7 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd) (wo
func (f *Factory) SpawnWorker(cmd *exec.Cmd) (worker.BaseProcess, error) {
const op = errors.Op("spawn worker")
- w, err := workerImpl.InitBaseWorker(cmd)
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(f.listener...))
if err != nil {
return nil, errors.E(op, err)
}
diff --git a/pkg/pipe/pipe_factory_test.go b/pkg/pipe/pipe_factory_test.go
index 0d548b7a..3cef0646 100755
--- a/pkg/pipe/pipe_factory_test.go
+++ b/pkg/pipe/pipe_factory_test.go
@@ -405,21 +405,20 @@ func Test_Echo_Slow(t *testing.T) {
func Test_Broken(t *testing.T) {
ctx := context.Background()
cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
-
- w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
- if err != nil {
- t.Fatal(err)
- }
-
data := ""
mu := &sync.Mutex{}
- w.AddListener(func(event interface{}) {
+ listener := func(event interface{}) {
if wev, ok := event.(events.WorkerEvent); ok {
mu.Lock()
data = string(wev.Payload.([]byte))
mu.Unlock()
}
- })
+ }
+
+ w, err := NewPipeFactory(listener).SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
syncWorker, err := workerImpl.From(w)
if err != nil {
diff --git a/pkg/pool/static_pool.go b/pkg/pool/static_pool.go
index 2a06b255..b181a805 100755
--- a/pkg/pool/static_pool.go
+++ b/pkg/pool/static_pool.go
@@ -22,20 +22,16 @@ const StopRequest = "{\"stop\":true}"
// ErrorEncoder encode error or make a decision based on the error type
type ErrorEncoder func(err error, w worker.BaseProcess) (payload.Payload, error)
-// Before is set of functions that executes BEFORE Exec
-type Before func(req payload.Payload) payload.Payload
-
-// After is set of functions that executes AFTER Exec
-type After func(req payload.Payload, resp payload.Payload) payload.Payload
-
type Options func(p *StaticPool)
+type Command func() *exec.Cmd
+
// StaticPool controls worker creation, destruction and task routing. Pool uses fixed amount of stack.
type StaticPool struct {
cfg Config
// worker command creator
- cmd func() *exec.Cmd
+ cmd Command
// creates and connects to stack
factory worker.Factory
@@ -49,14 +45,13 @@ type StaticPool struct {
// allocate new worker
allocator worker.Allocator
+ // errEncoder is the default Exec error encoder
errEncoder ErrorEncoder
- before []Before
- after []After
}
-// NewPool creates new worker pool and task multiplexer. StaticPool will initiate with one worker.
-func NewPool(ctx context.Context, cmd func() *exec.Cmd, factory worker.Factory, cfg Config, options ...Options) (pool.Pool, error) {
- const op = errors.Op("NewPool")
+// Initialize creates new worker pool and task multiplexer. StaticPool will initiate with one worker.
+func Initialize(ctx context.Context, cmd Command, factory worker.Factory, cfg Config, options ...Options) (pool.Pool, error) {
+ const op = errors.Op("Initialize")
if factory == nil {
return nil, errors.E(op, errors.Str("no factory initialized"))
}
@@ -72,11 +67,14 @@ func NewPool(ctx context.Context, cmd func() *exec.Cmd, factory worker.Factory,
cmd: cmd,
factory: factory,
events: eventsPkg.NewEventsHandler(),
- after: make([]After, 0, 0),
- before: make([]Before, 0, 0),
}
- p.allocator = newPoolAllocator(ctx, p.cfg.AllocateTimeout, factory, cmd)
+ // add pool options
+ for i := 0; i < len(options); i++ {
+ options[i](p)
+ }
+
+ p.allocator = p.newPoolAllocator(ctx, p.cfg.AllocateTimeout, factory, cmd)
p.ww = workerWatcher.NewWorkerWatcher(p.allocator, p.cfg.NumWorkers, p.events)
workers, err := p.allocateWorkers(p.cfg.NumWorkers)
@@ -92,14 +90,9 @@ func NewPool(ctx context.Context, cmd func() *exec.Cmd, factory worker.Factory,
p.errEncoder = defaultErrEncoder(p)
- // add pool options
- for i := 0; i < len(options); i++ {
- options[i](p)
- }
-
// if supervised config not nil, guess, that pool wanted to be supervised
if cfg.Supervisor != nil {
- sp := newPoolWatcher(p, p.events, p.cfg.Supervisor)
+ sp := supervisorWrapper(p, p.events, p.cfg.Supervisor)
// start watcher timer
sp.Start()
return sp, nil
@@ -108,20 +101,16 @@ func NewPool(ctx context.Context, cmd func() *exec.Cmd, factory worker.Factory,
return p, nil
}
-func ExecBefore(before ...Before) Options {
- return func(p *StaticPool) {
- p.before = append(p.before, before...)
- }
-}
-
-func ExecAfter(after ...After) Options {
+func AddListeners(listeners ...events.EventListener) Options {
return func(p *StaticPool) {
- p.after = append(p.after, after...)
+ for i := 0; i < len(listeners); i++ {
+ p.addListener(listeners[i])
+ }
}
}
// AddListener connects event listener to the pool.
-func (sp *StaticPool) AddListener(listener events.EventListener) {
+func (sp *StaticPool) addListener(listener events.EventListener) {
sp.events.AddListener(listener)
}
@@ -151,44 +140,30 @@ func (sp *StaticPool) Exec(p payload.Payload) (payload.Payload, error) {
return payload.Payload{}, errors.E(op, err)
}
- sw := w.(worker.SyncWorker)
-
- if len(sp.before) > 0 {
- for i := 0; i < len(sp.before); i++ {
- p = sp.before[i](p)
- }
- }
-
- rsp, err := sw.Exec(p)
+ rsp, err := w.Exec(p)
if err != nil {
- return sp.errEncoder(err, sw)
+ return sp.errEncoder(err, w)
}
// worker want's to be terminated
// TODO careful with string(rsp.Context)
if len(rsp.Body) == 0 && string(rsp.Context) == StopRequest {
- sw.State().Set(internal.StateInvalid)
- err = sw.Stop()
+ w.State().Set(internal.StateInvalid)
+ err = w.Stop()
if err != nil {
- sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: sw, Payload: errors.E(op, err)})
+ sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)})
}
return sp.Exec(p)
}
- if sp.cfg.MaxJobs != 0 && sw.State().NumExecs() >= sp.cfg.MaxJobs {
+ if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs {
err = sp.ww.AllocateNew()
if err != nil {
return payload.Payload{}, errors.E(op, err)
}
} else {
- sp.ww.PushWorker(sw)
- }
-
- if len(sp.after) > 0 {
- for i := 0; i < len(sp.after); i++ {
- rsp = sp.after[i](p, rsp)
- }
+ sp.ww.PushWorker(w)
}
return rsp, nil
@@ -196,20 +171,13 @@ func (sp *StaticPool) Exec(p payload.Payload) (payload.Payload, error) {
func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs payload.Payload) (payload.Payload, error) {
const op = errors.Op("exec with context")
- ctxGetFree, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout)
+ ctxGetFree, cancel := context.WithTimeout(ctx, sp.cfg.AllocateTimeout)
defer cancel()
w, err := sp.getWorker(ctxGetFree, op)
if err != nil {
return payload.Payload{}, errors.E(op, err)
}
- // apply all before function
- if len(sp.before) > 0 {
- for i := 0; i < len(sp.before); i++ {
- rqs = sp.before[i](rqs)
- }
- }
-
rsp, err := w.ExecWithTimeout(ctx, rqs)
if err != nil {
return sp.errEncoder(err, w)
@@ -223,7 +191,7 @@ func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs payload.Payload)
sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)})
}
- return sp.Exec(rqs)
+ return sp.ExecWithContext(ctx, rqs)
}
if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs {
@@ -235,13 +203,6 @@ func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs payload.Payload)
sp.ww.PushWorker(w)
}
- // apply all after functions
- if len(sp.after) > 0 {
- for i := 0; i < len(sp.after); i++ {
- rsp = sp.after[i](rqs, rsp)
- }
- }
-
return rsp, nil
}
@@ -300,7 +261,7 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder {
}
}
-func newPoolAllocator(ctx context.Context, timeout time.Duration, factory worker.Factory, cmd func() *exec.Cmd) worker.Allocator {
+func (sp *StaticPool) newPoolAllocator(ctx context.Context, timeout time.Duration, factory worker.Factory, cmd func() *exec.Cmd) worker.Allocator {
return func() (worker.BaseProcess, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
@@ -313,6 +274,11 @@ func newPoolAllocator(ctx context.Context, timeout time.Duration, factory worker
if err != nil {
return nil, err
}
+
+ sp.events.Push(events.PoolEvent{
+ Event: events.EventWorkerConstruct,
+ Payload: sw,
+ })
return sw, nil
}
}
diff --git a/pkg/pool/static_pool_test.go b/pkg/pool/static_pool_test.go
index 30345aee..dcc930f6 100755
--- a/pkg/pool/static_pool_test.go
+++ b/pkg/pool/static_pool_test.go
@@ -27,10 +27,10 @@ var cfg = Config{
func Test_NewPool(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
assert.NoError(t, err)
@@ -41,10 +41,10 @@ func Test_NewPool(t *testing.T) {
}
func Test_StaticPool_Invalid(t *testing.T) {
- p, err := NewPool(
+ p, err := Initialize(
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/invalid.php") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
@@ -53,10 +53,10 @@ func Test_StaticPool_Invalid(t *testing.T) {
}
func Test_ConfigNoErrorInitDefaults(t *testing.T) {
- p, err := NewPool(
+ p, err := Initialize(
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -69,10 +69,10 @@ func Test_ConfigNoErrorInitDefaults(t *testing.T) {
func Test_StaticPool_Echo(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
assert.NoError(t, err)
@@ -93,10 +93,10 @@ func Test_StaticPool_Echo(t *testing.T) {
func Test_StaticPool_Echo_NilContext(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
assert.NoError(t, err)
@@ -117,10 +117,10 @@ func Test_StaticPool_Echo_NilContext(t *testing.T) {
func Test_StaticPool_Echo_Context(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "head", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
assert.NoError(t, err)
@@ -141,10 +141,10 @@ func Test_StaticPool_Echo_Context(t *testing.T) {
func Test_StaticPool_JobError(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "error", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
assert.NoError(t, err)
@@ -167,18 +167,9 @@ func Test_StaticPool_JobError(t *testing.T) {
func Test_StaticPool_Broken_Replace(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
- ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "broken", "pipes") },
- pipe.NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
- assert.NotNil(t, p)
-
block := make(chan struct{}, 1)
- p.AddListener(func(event interface{}) {
+ listener := func(event interface{}) {
if wev, ok := event.(events.WorkerEvent); ok {
if wev.Event == events.EventWorkerLog {
e := string(wev.Payload.([]byte))
@@ -188,7 +179,17 @@ func Test_StaticPool_Broken_Replace(t *testing.T) {
}
}
}
- })
+ }
+
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "broken", "pipes") },
+ pipe.NewPipeFactory(listener),
+ cfg,
+ AddListeners(listener),
+ )
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
time.Sleep(time.Second)
res, err := p.ExecWithContext(ctx, payload.Payload{Body: []byte("hello")})
@@ -203,11 +204,28 @@ func Test_StaticPool_Broken_Replace(t *testing.T) {
func Test_StaticPool_Broken_FromOutside(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ // Consume pool events
+ ev := make(chan struct{}, 1)
+ listener := func(event interface{}) {
+ if pe, ok := event.(events.PoolEvent); ok {
+ if pe.Event == events.EventWorkerConstruct {
+ ev <- struct{}{}
+ }
+ }
+ }
+
+ var cfg = Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 5,
+ DestroyTimeout: time.Second * 5,
+ }
+
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
+ AddListeners(listener),
)
assert.NoError(t, err)
defer p.Destroy(ctx)
@@ -222,39 +240,30 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) {
assert.Empty(t, res.Context)
assert.Equal(t, "hello", res.String())
- assert.Equal(t, runtime.NumCPU(), len(p.Workers()))
-
- // Consume pool events
- wg := sync.WaitGroup{}
- wg.Add(1)
- p.AddListener(func(event interface{}) {
- if pe, ok := event.(events.PoolEvent); ok {
- if pe.Event == events.EventWorkerConstruct {
- wg.Done()
- }
- }
- })
+ assert.Equal(t, 1, len(p.Workers()))
+ // first creation
+ <-ev
// killing random worker and expecting pool to replace it
err = p.Workers()[0].Kill()
if err != nil {
t.Errorf("error killing the process: error %v", err)
}
- wg.Wait()
+ // re-creation
+ <-ev
list := p.Workers()
for _, w := range list {
assert.Equal(t, internal.StateReady, w.State().Value())
}
- wg.Wait()
}
func Test_StaticPool_AllocateTimeout(t *testing.T) {
- p, err := NewPool(
+ p, err := Initialize(
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 1,
AllocateTimeout: time.Nanosecond * 1,
@@ -270,10 +279,10 @@ func Test_StaticPool_AllocateTimeout(t *testing.T) {
func Test_StaticPool_Replace_Worker(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 1,
MaxJobs: 1,
@@ -307,10 +316,10 @@ func Test_StaticPool_Replace_Worker(t *testing.T) {
func Test_StaticPool_Debug_Worker(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
Debug: true,
AllocateTimeout: time.Second,
@@ -347,10 +356,10 @@ func Test_StaticPool_Debug_Worker(t *testing.T) {
// identical to replace but controlled on worker side
func Test_StaticPool_Stop_Worker(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "stop", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 1,
AllocateTimeout: time.Second,
@@ -387,10 +396,10 @@ func Test_StaticPool_Stop_Worker(t *testing.T) {
// identical to replace but controlled on worker side
func Test_Static_Pool_Destroy_And_Close(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 1,
AllocateTimeout: time.Second,
@@ -409,10 +418,10 @@ func Test_Static_Pool_Destroy_And_Close(t *testing.T) {
// identical to replace but controlled on worker side
func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 1,
AllocateTimeout: time.Second,
@@ -439,10 +448,10 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {
// identical to replace but controlled on worker side
func Test_Static_Pool_Handle_Dead(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 5,
AllocateTimeout: time.Second,
@@ -464,10 +473,10 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) {
// identical to replace but controlled on worker side
func Test_Static_Pool_Slow_Destroy(t *testing.T) {
- p, err := NewPool(
+ p, err := Initialize(
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 5,
AllocateTimeout: time.Second,
@@ -483,10 +492,10 @@ func Test_Static_Pool_Slow_Destroy(t *testing.T) {
func Benchmark_Pool_Echo(b *testing.B) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfg,
)
if err != nil {
@@ -505,16 +514,17 @@ func Benchmark_Pool_Echo(b *testing.B) {
//
func Benchmark_Pool_Echo_Batched(b *testing.B) {
ctx := context.Background()
- p, _ := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: int64(runtime.NumCPU()),
AllocateTimeout: time.Second * 100,
DestroyTimeout: time.Second,
},
)
+ assert.NoError(b, err)
defer p.Destroy(ctx)
var wg sync.WaitGroup
@@ -535,10 +545,10 @@ func Benchmark_Pool_Echo_Batched(b *testing.B) {
//
func Benchmark_Pool_Echo_Replaced(b *testing.B) {
ctx := context.Background()
- p, _ := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
Config{
NumWorkers: 1,
MaxJobs: 1,
@@ -546,6 +556,7 @@ func Benchmark_Pool_Echo_Replaced(b *testing.B) {
DestroyTimeout: time.Second,
},
)
+ assert.NoError(b, err)
defer p.Destroy(ctx)
b.ResetTimer()
b.ReportAllocs()
diff --git a/pkg/pool/supervisor_pool.go b/pkg/pool/supervisor_pool.go
index 6faa609c..378be7dd 100755
--- a/pkg/pool/supervisor_pool.go
+++ b/pkg/pool/supervisor_pool.go
@@ -6,12 +6,12 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2"
"github.com/spiral/roadrunner/v2/interfaces/events"
"github.com/spiral/roadrunner/v2/interfaces/pool"
"github.com/spiral/roadrunner/v2/interfaces/worker"
"github.com/spiral/roadrunner/v2/internal"
"github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/tools"
)
const MB = 1024 * 1024
@@ -30,7 +30,7 @@ type supervised struct {
mu *sync.RWMutex
}
-func newPoolWatcher(pool pool.Pool, events events.Handler, cfg *SupervisorConfig) Supervised {
+func supervisorWrapper(pool pool.Pool, events events.Handler, cfg *SupervisorConfig) Supervised {
sp := &supervised{
cfg: cfg,
events: events,
@@ -38,6 +38,7 @@ func newPoolWatcher(pool pool.Pool, events events.Handler, cfg *SupervisorConfig
mu: &sync.RWMutex{},
stopCh: make(chan struct{}),
}
+
return sp
}
@@ -93,10 +94,6 @@ func (sp *supervised) Exec(p payload.Payload) (payload.Payload, error) {
return rsp, nil
}
-func (sp *supervised) AddListener(listener events.EventListener) {
- sp.pool.AddListener(listener)
-}
-
func (sp *supervised) GetConfig() interface{} {
return sp.pool.GetConfig()
}
@@ -149,7 +146,7 @@ func (sp *supervised) control() {
continue
}
- s, err := roadrunner.WorkerProcessState(workers[i])
+ s, err := tools.WorkerProcessState(workers[i])
if err != nil {
// worker not longer valid for supervision
continue
diff --git a/pkg/pool/supervisor_test.go b/pkg/pool/supervisor_test.go
index 7dd423b8..bdb64a3b 100644
--- a/pkg/pool/supervisor_test.go
+++ b/pkg/pool/supervisor_test.go
@@ -6,9 +6,9 @@ import (
"testing"
"time"
- "github.com/spiral/roadrunner/v2"
"github.com/spiral/roadrunner/v2/pkg/payload"
"github.com/spiral/roadrunner/v2/pkg/pipe"
+ "github.com/spiral/roadrunner/v2/tools"
"github.com/stretchr/testify/assert"
)
@@ -27,10 +27,10 @@ var cfgSupervised = Config{
func TestSupervisedPool_Exec(t *testing.T) {
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/memleak.php", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfgSupervised,
)
@@ -47,7 +47,7 @@ func TestSupervisedPool_Exec(t *testing.T) {
default:
workers := p.Workers()
if len(workers) > 0 {
- s, err := roadrunner.WorkerProcessState(workers[0])
+ s, err := tools.WorkerProcessState(workers[0])
assert.NoError(t, err)
assert.NotNil(t, s)
// since this is soft limit, double max memory limit watch
@@ -85,10 +85,10 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) {
},
}
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfgExecTTL,
)
@@ -104,7 +104,8 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) {
})
assert.Error(t, err)
- assert.Empty(t, resp)
+ assert.Empty(t, resp.Body)
+ assert.Empty(t, resp.Context)
time.Sleep(time.Second * 1)
// should be new worker with new pid
@@ -125,10 +126,10 @@ func TestSupervisedPool_ExecTTL_OK(t *testing.T) {
},
}
ctx := context.Background()
- p, err := NewPool(
+ p, err := Initialize(
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
- pipe.NewPipeFactory(),
+ pipe.NewPipeFactory(nil),
cfgExecTTL,
)
diff --git a/pkg/socket/socket_factory.go b/pkg/socket/socket_factory.go
index 49456bd9..a3a0bf18 100755
--- a/pkg/socket/socket_factory.go
+++ b/pkg/socket/socket_factory.go
@@ -11,6 +11,7 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/interfaces/relay"
"github.com/spiral/goridge/v3/pkg/socket"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
"github.com/spiral/roadrunner/v2/interfaces/worker"
"github.com/spiral/roadrunner/v2/internal"
workerImpl "github.com/spiral/roadrunner/v2/pkg/worker"
@@ -24,6 +25,9 @@ type Factory struct {
// listens for incoming connections from underlying processes
ls net.Listener
+ // events listener
+ listeners []events.EventListener
+
// relay connection timeout
tout time.Duration
@@ -38,12 +42,13 @@ type Factory struct {
// NewSocketServer returns Factory attached to a given socket listener.
// tout specifies for how long factory should serve for incoming relay connection
-func NewSocketServer(ls net.Listener, tout time.Duration) worker.Factory {
+func NewSocketServer(ls net.Listener, tout time.Duration, listeners ...events.EventListener) worker.Factory {
f := &Factory{
- ls: ls,
- tout: tout,
- relays: sync.Map{},
- ErrCh: make(chan error, 10),
+ ls: ls,
+ tout: tout,
+ relays: sync.Map{},
+ listeners: listeners,
+ ErrCh: make(chan error, 10),
}
// Be careful
@@ -91,7 +96,7 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd) (wo
go func() {
ctx, cancel := context.WithTimeout(ctx, f.tout)
defer cancel()
- w, err := workerImpl.InitBaseWorker(cmd)
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(f.listeners...))
if err != nil {
c <- socketSpawn{
w: nil,
@@ -147,7 +152,7 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd) (wo
func (f *Factory) SpawnWorker(cmd *exec.Cmd) (worker.BaseProcess, error) {
const op = errors.Op("spawn_worker")
- w, err := workerImpl.InitBaseWorker(cmd)
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(f.listeners...))
if err != nil {
return nil, err
}
diff --git a/pkg/worker/sync_worker.go b/pkg/worker/sync_worker.go
index 11992f22..13212cc6 100755
--- a/pkg/worker/sync_worker.go
+++ b/pkg/worker/sync_worker.go
@@ -8,7 +8,6 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/interfaces/relay"
"github.com/spiral/goridge/v3/pkg/frame"
- "github.com/spiral/roadrunner/v2/interfaces/events"
"github.com/spiral/roadrunner/v2/interfaces/worker"
"github.com/spiral/roadrunner/v2/internal"
"github.com/spiral/roadrunner/v2/pkg/payload"
@@ -19,7 +18,7 @@ type syncWorker struct {
w worker.BaseProcess
}
-// From creates SyncWorker from WorkerBasa
+// From creates SyncWorker from BaseProcess
func From(w worker.BaseProcess) (worker.SyncWorker, error) {
return &syncWorker{
w: w,
@@ -196,10 +195,6 @@ func (tw *syncWorker) Created() time.Time {
return tw.w.Created()
}
-func (tw *syncWorker) AddListener(listener events.EventListener) {
- tw.w.AddListener(listener)
-}
-
func (tw *syncWorker) State() internal.State {
return tw.w.State()
}
diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go
index 456f4bea..6e9141c9 100755
--- a/pkg/worker/worker.go
+++ b/pkg/worker/worker.go
@@ -29,6 +29,8 @@ const (
ReadBufSize = 10240 // Kb
)
+type Options func(p *Process)
+
// Process - supervised process with api over goridge.Relay.
type Process struct {
// created indicates at what time Process has been created.
@@ -76,7 +78,7 @@ type Process struct {
}
// InitBaseWorker creates new Process over given exec.cmd.
-func InitBaseWorker(cmd *exec.Cmd) (worker.BaseProcess, error) {
+func InitBaseWorker(cmd *exec.Cmd, options ...Options) (worker.BaseProcess, error) {
if cmd.Process != nil {
return nil, fmt.Errorf("can't attach to running process")
}
@@ -103,6 +105,11 @@ func InitBaseWorker(cmd *exec.Cmd) (worker.BaseProcess, error) {
// at this point we know, that stderr will contain huge messages
w.stderr.Grow(ReadBufSize)
+ // add options
+ for i := 0; i < len(options); i++ {
+ options[i](w)
+ }
+
go func() {
w.watch()
}()
@@ -110,6 +117,14 @@ func InitBaseWorker(cmd *exec.Cmd) (worker.BaseProcess, error) {
return w, nil
}
+func AddListeners(listeners ...events.EventListener) Options {
+ return func(p *Process) {
+ for i := 0; i < len(listeners); i++ {
+ p.addListener(listeners[i])
+ }
+ }
+}
+
// Pid returns worker pid.
func (w *Process) Pid() int64 {
return int64(w.pid)
@@ -121,7 +136,7 @@ func (w *Process) Created() time.Time {
}
// AddListener registers new worker event listener.
-func (w *Process) AddListener(listener events.EventListener) {
+func (w *Process) addListener(listener events.EventListener) {
w.events.AddListener(listener)
}
diff --git a/pkg/worker_watcher/worker_watcher.go b/pkg/worker_watcher/worker_watcher.go
index 918145e5..55191ce6 100755
--- a/pkg/worker_watcher/worker_watcher.go
+++ b/pkg/worker_watcher/worker_watcher.go
@@ -163,7 +163,6 @@ type workerWatcher struct {
func (ww *workerWatcher) AddToWatch(workers []worker.BaseProcess) error {
for i := 0; i < len(workers); i++ {
ww.stack.Push(workers[i])
- workers[i].AddListener(ww.events.Push)
go func(swc worker.BaseProcess) {
ww.wait(swc)