diff options
Diffstat (limited to 'pkg')
-rwxr-xr-x | pkg/payload/payload.go | 16 | ||||
-rwxr-xr-x | pkg/pipe/pipe_factory.go | 6 | ||||
-rwxr-xr-x | pkg/pipe/pipe_factory_test.go | 53 | ||||
-rwxr-xr-x | pkg/pool/static_pool.go | 47 | ||||
-rwxr-xr-x | pkg/pool/static_pool_test.go | 39 | ||||
-rwxr-xr-x | pkg/pool/supervisor_pool.go | 15 | ||||
-rw-r--r-- | pkg/pool/supervisor_test.go | 8 | ||||
-rwxr-xr-x | pkg/socket/socket_factory.go | 15 | ||||
-rwxr-xr-x | pkg/socket/socket_factory_test.go | 14 | ||||
-rwxr-xr-x | pkg/worker/sync_worker.go | 75 | ||||
-rwxr-xr-x | pkg/worker/sync_worker_test.go | 4 | ||||
-rwxr-xr-x | pkg/worker/worker.go | 12 |
12 files changed, 160 insertions, 144 deletions
diff --git a/pkg/payload/payload.go b/pkg/payload/payload.go new file mode 100755 index 00000000..fac36852 --- /dev/null +++ b/pkg/payload/payload.go @@ -0,0 +1,16 @@ +package payload + +// Payload carries binary header and body to stack and +// back to the server. +type Payload struct { + // Context represent payload context, might be omitted. + Context []byte + + // body contains binary payload to be processed by WorkerProcess. + Body []byte +} + +// String returns payload body as string +func (p *Payload) String() string { + return string(p.Body) +} diff --git a/pkg/pipe/pipe_factory.go b/pkg/pipe/pipe_factory.go index c86d78c4..34735fe6 100755 --- a/pkg/pipe/pipe_factory.go +++ b/pkg/pipe/pipe_factory.go @@ -5,7 +5,7 @@ import ( "os/exec" "github.com/spiral/errors" - "github.com/spiral/goridge/v3" + "github.com/spiral/goridge/v3/pkg/pipe" "github.com/spiral/roadrunner/v2/interfaces/worker" "github.com/spiral/roadrunner/v2/internal" workerImpl "github.com/spiral/roadrunner/v2/pkg/worker" @@ -65,7 +65,7 @@ func (f *Factory) SpawnWorkerWithContext(ctx context.Context, cmd *exec.Cmd) (wo } // Init new PIPE relay - relay := goridge.NewPipeRelay(in, out) + relay := pipe.NewPipeRelay(in, out) w.AttachRelay(relay) // Start the worker @@ -134,7 +134,7 @@ func (f *Factory) SpawnWorker(cmd *exec.Cmd) (worker.BaseProcess, error) { } // Init new PIPE relay - relay := goridge.NewPipeRelay(in, out) + relay := pipe.NewPipeRelay(in, out) w.AttachRelay(relay) // Start the worker diff --git a/pkg/pipe/pipe_factory_test.go b/pkg/pipe/pipe_factory_test.go index 7045b785..40797747 100755 --- a/pkg/pipe/pipe_factory_test.go +++ b/pkg/pipe/pipe_factory_test.go @@ -11,7 +11,8 @@ import ( "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/interfaces/events" "github.com/spiral/roadrunner/v2/internal" - "github.com/spiral/roadrunner/v2/pkg/worker" + "github.com/spiral/roadrunner/v2/pkg/payload" + workerImpl "github.com/spiral/roadrunner/v2/pkg/worker" "github.com/stretchr/testify/assert" ) @@ -146,12 +147,12 @@ func Test_Pipe_Echo(t *testing.T) { } }() - sw, err := worker.From(w) + sw, err := workerImpl.From(w) if err != nil { t.Fatal(err) } - res, err := sw.Exec(internal.Payload{Body: []byte("hello")}) + res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -174,12 +175,12 @@ func Test_Pipe_Broken(t *testing.T) { assert.Error(t, err) }() - sw, err := worker.From(w) + sw, err := workerImpl.From(w) if err != nil { t.Fatal(err) } - res, err := sw.Exec(internal.Payload{Body: []byte("hello")}) + res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) assert.Error(t, err) assert.Nil(t, res.Body) @@ -208,7 +209,7 @@ func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) { cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes") w, _ := NewPipeFactory().SpawnWorkerWithContext(context.Background(), cmd) - sw, err := worker.From(w) + sw, err := workerImpl.From(w) if err != nil { b.Fatal(err) } @@ -228,7 +229,7 @@ func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) { }() for n := 0; n < b.N; n++ { - if _, err := sw.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -249,13 +250,13 @@ func Benchmark_Pipe_Worker_ExecEcho3(b *testing.B) { } }() - sw, err := worker.From(w) + sw, err := workerImpl.From(w) if err != nil { b.Fatal(err) } for n := 0; n < b.N; n++ { - if _, err := sw.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -276,13 +277,13 @@ func Benchmark_Pipe_Worker_ExecEchoWithoutContext(b *testing.B) { } }() - sw, err := worker.From(w) + sw, err := workerImpl.From(w) if err != nil { b.Fatal(err) } for n := 0; n < b.N; n++ { - if _, err := sw.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -297,7 +298,7 @@ func Test_Echo(t *testing.T) { t.Fatal(err) } - syncWorker, err := worker.From(w) + syncWorker, err := workerImpl.From(w) if err != nil { t.Fatal(err) } @@ -311,7 +312,7 @@ func Test_Echo(t *testing.T) { } }() - res, err := syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")}) assert.Nil(t, err) assert.NotNil(t, res) @@ -327,7 +328,7 @@ func Test_BadPayload(t *testing.T) { w, _ := NewPipeFactory().SpawnWorkerWithContext(ctx, cmd) - syncWorker, err := worker.From(w) + syncWorker, err := workerImpl.From(w) if err != nil { t.Fatal(err) } @@ -342,7 +343,7 @@ func Test_BadPayload(t *testing.T) { } }() - res, err := syncWorker.Exec(internal.Payload{}) + res, err := syncWorker.Exec(payload.Payload{}) assert.Error(t, err) assert.Nil(t, res.Body) @@ -386,12 +387,12 @@ func Test_Echo_Slow(t *testing.T) { } }() - syncWorker, err := worker.From(w) + syncWorker, err := workerImpl.From(w) if err != nil { t.Fatal(err) } - res, err := syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")}) assert.Nil(t, err) assert.NotNil(t, res) @@ -420,12 +421,12 @@ func Test_Broken(t *testing.T) { } }) - syncWorker, err := worker.From(w) + syncWorker, err := workerImpl.From(w) if err != nil { t.Fatal(err) } - res, err := syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")}) assert.NotNil(t, err) assert.Nil(t, res.Body) assert.Nil(t, res.Context) @@ -455,12 +456,12 @@ func Test_Error(t *testing.T) { } }() - syncWorker, err := worker.From(w) + syncWorker, err := workerImpl.From(w) if err != nil { t.Fatal(err) } - res, err := syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")}) assert.NotNil(t, err) assert.Nil(t, res.Body) assert.Nil(t, res.Context) @@ -468,7 +469,7 @@ func Test_Error(t *testing.T) { if errors.Is(errors.ErrSoftJob, err) == false { t.Fatal("error should be of type errors.ErrSoftJob") } - assert.Contains(t, err.Error(), "exec payload: SoftJobError: hello") + assert.Contains(t, err.Error(), "hello") } func Test_NumExecs(t *testing.T) { @@ -486,24 +487,24 @@ func Test_NumExecs(t *testing.T) { } }() - syncWorker, err := worker.From(w) + syncWorker, err := workerImpl.From(w) if err != nil { t.Fatal(err) } - _, err = syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + _, err = syncWorker.Exec(payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } assert.Equal(t, int64(1), w.State().NumExecs()) - _, err = syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + _, err = syncWorker.Exec(payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } assert.Equal(t, int64(2), w.State().NumExecs()) - _, err = syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + _, err = syncWorker.Exec(payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } diff --git a/pkg/pool/static_pool.go b/pkg/pool/static_pool.go index 837fd183..9cf79fd4 100755 --- a/pkg/pool/static_pool.go +++ b/pkg/pool/static_pool.go @@ -9,7 +9,8 @@ import ( "github.com/spiral/roadrunner/v2/interfaces/pool" "github.com/spiral/roadrunner/v2/interfaces/worker" "github.com/spiral/roadrunner/v2/internal" - eventsHandler "github.com/spiral/roadrunner/v2/pkg/events" + eventsPkg "github.com/spiral/roadrunner/v2/pkg/events" + "github.com/spiral/roadrunner/v2/pkg/payload" syncWorker "github.com/spiral/roadrunner/v2/pkg/worker" workerWatcher "github.com/spiral/roadrunner/v2/pkg/worker_watcher" ) @@ -20,13 +21,13 @@ const StopRequest = "{\"stop\":true}" var bCtx = context.Background() // ErrorEncoder encode error or make a decision based on the error type -type ErrorEncoder func(err error, w worker.BaseProcess) (internal.Payload, error) +type ErrorEncoder func(err error, w worker.BaseProcess) (payload.Payload, error) // Before is set of functions that executes BEFORE Exec -type Before func(req internal.Payload) internal.Payload +type Before func(req payload.Payload) payload.Payload // After is set of functions that executes AFTER Exec -type After func(req internal.Payload, resp internal.Payload) internal.Payload +type After func(req payload.Payload, resp payload.Payload) payload.Payload type Options func(p *StaticPool) @@ -71,7 +72,7 @@ func NewPool(ctx context.Context, cmd func() *exec.Cmd, factory worker.Factory, cfg: cfg, cmd: cmd, factory: factory, - events: eventsHandler.NewEventsHandler(), + events: eventsPkg.NewEventsHandler(), after: make([]After, 0, 0), before: make([]Before, 0, 0), } @@ -79,7 +80,7 @@ func NewPool(ctx context.Context, cmd func() *exec.Cmd, factory worker.Factory, p.allocator = newPoolAllocator(factory, cmd) p.ww = workerWatcher.NewWorkerWatcher(p.allocator, p.cfg.NumWorkers, p.events) - workers, err := p.allocateSyncWorkers(ctx, p.cfg.NumWorkers) + workers, err := p.allocateWorkers(ctx, p.cfg.NumWorkers) if err != nil { return nil, errors.E(op, err) } @@ -139,7 +140,7 @@ func (sp *StaticPool) RemoveWorker(wb worker.BaseProcess) error { return sp.ww.RemoveWorker(wb) } -func (sp *StaticPool) Exec(p internal.Payload) (internal.Payload, error) { +func (sp *StaticPool) Exec(p payload.Payload) (payload.Payload, error) { const op = errors.Op("exec") if sp.cfg.Debug { return sp.execDebug(p) @@ -148,7 +149,7 @@ func (sp *StaticPool) Exec(p internal.Payload) (internal.Payload, error) { defer cancel() w, err := sp.getWorker(ctxGetFree, op) if err != nil { - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } sw := w.(worker.SyncWorker) @@ -179,7 +180,7 @@ func (sp *StaticPool) Exec(p internal.Payload) (internal.Payload, error) { if sp.cfg.MaxJobs != 0 && sw.State().NumExecs() >= sp.cfg.MaxJobs { err = sp.ww.AllocateNew() if err != nil { - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } } else { sp.ww.PushWorker(sw) @@ -194,13 +195,13 @@ func (sp *StaticPool) Exec(p internal.Payload) (internal.Payload, error) { return rsp, nil } -func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs internal.Payload) (internal.Payload, error) { +func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs payload.Payload) (payload.Payload, error) { const op = errors.Op("exec with context") ctxGetFree, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout) defer cancel() w, err := sp.getWorker(ctxGetFree, op) if err != nil { - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } sw := w.(worker.SyncWorker) @@ -231,7 +232,7 @@ func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs internal.Payload) if sp.cfg.MaxJobs != 0 && sw.State().NumExecs() >= sp.cfg.MaxJobs { err = sp.ww.AllocateNew() if err != nil { - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } } else { sp.ww.PushWorker(sw) @@ -268,7 +269,7 @@ func (sp *StaticPool) Destroy(ctx context.Context) { } func defaultErrEncoder(sp *StaticPool) ErrorEncoder { - return func(err error, w worker.BaseProcess) (internal.Payload, error) { + return func(err error, w worker.BaseProcess) (payload.Payload, error) { const op = errors.Op("error encoder") // soft job errors are allowed if errors.Is(errors.ErrSoftJob, err) { @@ -287,7 +288,7 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder { sp.ww.PushWorker(w) } - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } w.State().Set(internal.StateInvalid) @@ -295,10 +296,10 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder { errS := w.Stop(bCtx) if errS != nil { - return internal.Payload{}, errors.E(op, errors.Errorf("%v, %v", err, errS)) + return payload.Payload{}, errors.E(op, errors.Errorf("%v, %v", err, errS)) } - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } } @@ -317,10 +318,10 @@ func newPoolAllocator(factory worker.Factory, cmd func() *exec.Cmd) worker.Alloc } } -func (sp *StaticPool) execDebug(p internal.Payload) (internal.Payload, error) { +func (sp *StaticPool) execDebug(p payload.Payload) (payload.Payload, error) { sw, err := sp.allocator() if err != nil { - return internal.Payload{}, err + return payload.Payload{}, err } r, err := sw.(worker.SyncWorker).Exec(p) @@ -333,12 +334,11 @@ func (sp *StaticPool) execDebug(p internal.Payload) (internal.Payload, error) { } // allocate required number of stack -func (sp *StaticPool) allocateSyncWorkers(ctx context.Context, numWorkers int64) ([]worker.BaseProcess, error) { +func (sp *StaticPool) allocateWorkers(ctx context.Context, numWorkers int64) ([]worker.BaseProcess, error) { const op = errors.Op("allocate workers") var workers []worker.BaseProcess // constant number of stack simplify logic - // TODO do not allocate context on every loop cycle?? for i := int64(0); i < numWorkers; i++ { ctx, cancel := context.WithTimeout(ctx, sp.cfg.AllocateTimeout) w, err := sp.factory.SpawnWorkerWithContext(ctx, sp.cmd()) @@ -346,12 +346,7 @@ func (sp *StaticPool) allocateSyncWorkers(ctx context.Context, numWorkers int64) cancel() return nil, errors.E(op, errors.WorkerAllocate, err) } - sw, err := syncWorker.From(w) - if err != nil { - cancel() - return nil, errors.E(op, err) - } - workers = append(workers, sw) + workers = append(workers, w) cancel() } return workers, nil diff --git a/pkg/pool/static_pool_test.go b/pkg/pool/static_pool_test.go index dd33a1a6..b96e9214 100755 --- a/pkg/pool/static_pool_test.go +++ b/pkg/pool/static_pool_test.go @@ -14,6 +14,7 @@ import ( "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/interfaces/events" "github.com/spiral/roadrunner/v2/internal" + "github.com/spiral/roadrunner/v2/pkg/payload" "github.com/spiral/roadrunner/v2/pkg/pipe" "github.com/stretchr/testify/assert" ) @@ -80,7 +81,7 @@ func Test_StaticPool_Echo(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -104,7 +105,7 @@ func Test_StaticPool_Echo_NilContext(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(internal.Payload{Body: []byte("hello"), Context: nil}) + res, err := p.Exec(payload.Payload{Body: []byte("hello"), Context: nil}) assert.NoError(t, err) assert.NotNil(t, res) @@ -128,7 +129,7 @@ func Test_StaticPool_Echo_Context(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(internal.Payload{Body: []byte("hello"), Context: []byte("world")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello"), Context: []byte("world")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -151,7 +152,7 @@ func Test_StaticPool_JobError(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) assert.Error(t, err) assert.Nil(t, res.Body) @@ -189,7 +190,7 @@ func Test_StaticPool_Broken_Replace(t *testing.T) { } }) - res, err := p.ExecWithContext(ctx, internal.Payload{Body: []byte("hello")}) + res, err := p.ExecWithContext(ctx, payload.Payload{Body: []byte("hello")}) assert.Error(t, err) assert.Nil(t, res.Context) assert.Nil(t, res.Body) @@ -212,7 +213,7 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -287,11 +288,11 @@ func Test_StaticPool_Replace_Worker(t *testing.T) { var lastPID string lastPID = strconv.Itoa(int(p.Workers()[0].Pid())) - res, _ := p.Exec(internal.Payload{Body: []byte("hello")}) + res, _ := p.Exec(payload.Payload{Body: []byte("hello")}) assert.Equal(t, lastPID, string(res.Body)) for i := 0; i < 10; i++ { - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -323,14 +324,14 @@ func Test_StaticPool_Debug_Worker(t *testing.T) { assert.Len(t, p.Workers(), 0) var lastPID string - res, _ := p.Exec(internal.Payload{Body: []byte("hello")}) + res, _ := p.Exec(payload.Payload{Body: []byte("hello")}) assert.NotEqual(t, lastPID, string(res.Body)) assert.Len(t, p.Workers(), 0) for i := 0; i < 10; i++ { assert.Len(t, p.Workers(), 0) - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -363,14 +364,14 @@ func Test_StaticPool_Stop_Worker(t *testing.T) { var lastPID string lastPID = strconv.Itoa(int(p.Workers()[0].Pid())) - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) if err != nil { t.Fatal(err) } assert.Equal(t, lastPID, string(res.Body)) for i := 0; i < 10; i++ { - res, err := p.Exec(internal.Payload{Body: []byte("hello")}) + res, err := p.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -400,7 +401,7 @@ func Test_Static_Pool_Destroy_And_Close(t *testing.T) { assert.NoError(t, err) p.Destroy(ctx) - _, err = p.Exec(internal.Payload{Body: []byte("100")}) + _, err = p.Exec(payload.Payload{Body: []byte("100")}) assert.Error(t, err) } @@ -422,7 +423,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) { assert.NoError(t, err) go func() { - _, err := p.Exec(internal.Payload{Body: []byte("100")}) + _, err := p.Exec(payload.Payload{Body: []byte("100")}) if err != nil { t.Errorf("error executing payload: error %v", err) } @@ -430,7 +431,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) { time.Sleep(time.Millisecond * 10) p.Destroy(ctx) - _, err = p.Exec(internal.Payload{Body: []byte("100")}) + _, err = p.Exec(payload.Payload{Body: []byte("100")}) assert.Error(t, err) } @@ -456,7 +457,7 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) { w.State().Set(internal.StateErrored) } - _, err = p.Exec(internal.Payload{Body: []byte("hello")}) + _, err = p.Exec(payload.Payload{Body: []byte("hello")}) assert.Error(t, err) } @@ -494,7 +495,7 @@ func Benchmark_Pool_Echo(b *testing.B) { b.ResetTimer() b.ReportAllocs() for n := 0; n < b.N; n++ { - if _, err := p.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -520,7 +521,7 @@ func Benchmark_Pool_Echo_Batched(b *testing.B) { wg.Add(1) go func() { defer wg.Done() - if _, err := p.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() log.Println(err) } @@ -549,7 +550,7 @@ func Benchmark_Pool_Echo_Replaced(b *testing.B) { b.ReportAllocs() for n := 0; n < b.N; n++ { - if _, err := p.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() log.Println(err) } diff --git a/pkg/pool/supervisor_pool.go b/pkg/pool/supervisor_pool.go index 6d1f0c58..6faa609c 100755 --- a/pkg/pool/supervisor_pool.go +++ b/pkg/pool/supervisor_pool.go @@ -11,6 +11,7 @@ import ( "github.com/spiral/roadrunner/v2/interfaces/pool" "github.com/spiral/roadrunner/v2/interfaces/worker" "github.com/spiral/roadrunner/v2/internal" + "github.com/spiral/roadrunner/v2/pkg/payload" ) const MB = 1024 * 1024 @@ -42,10 +43,10 @@ func newPoolWatcher(pool pool.Pool, events events.Handler, cfg *SupervisorConfig type ttlExec struct { err error - p internal.Payload + p payload.Payload } -func (sp *supervised) ExecWithContext(ctx context.Context, rqs internal.Payload) (internal.Payload, error) { +func (sp *supervised) ExecWithContext(ctx context.Context, rqs payload.Payload) (payload.Payload, error) { const op = errors.Op("exec_supervised") if sp.cfg.ExecTTL == 0 { return sp.pool.Exec(rqs) @@ -59,7 +60,7 @@ func (sp *supervised) ExecWithContext(ctx context.Context, rqs internal.Payload) if err != nil { c <- ttlExec{ err: errors.E(op, err), - p: internal.Payload{}, + p: payload.Payload{}, } } @@ -72,10 +73,10 @@ func (sp *supervised) ExecWithContext(ctx context.Context, rqs internal.Payload) for { select { case <-ctx.Done(): - return internal.Payload{}, errors.E(op, errors.TimeOut, ctx.Err()) + return payload.Payload{}, errors.E(op, errors.TimeOut, ctx.Err()) case res := <-c: if res.err != nil { - return internal.Payload{}, res.err + return payload.Payload{}, res.err } return res.p, nil @@ -83,11 +84,11 @@ func (sp *supervised) ExecWithContext(ctx context.Context, rqs internal.Payload) } } -func (sp *supervised) Exec(p internal.Payload) (internal.Payload, error) { +func (sp *supervised) Exec(p payload.Payload) (payload.Payload, error) { const op = errors.Op("supervised exec") rsp, err := sp.pool.Exec(p) if err != nil { - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } return rsp, nil } diff --git a/pkg/pool/supervisor_test.go b/pkg/pool/supervisor_test.go index 2e3e7fd2..7dd423b8 100644 --- a/pkg/pool/supervisor_test.go +++ b/pkg/pool/supervisor_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/spiral/roadrunner/v2" - "github.com/spiral/roadrunner/v2/internal" + "github.com/spiral/roadrunner/v2/pkg/payload" "github.com/spiral/roadrunner/v2/pkg/pipe" "github.com/stretchr/testify/assert" ) @@ -61,7 +61,7 @@ func TestSupervisedPool_Exec(t *testing.T) { for i := 0; i < 100; i++ { time.Sleep(time.Millisecond * 50) - _, err = p.Exec(internal.Payload{ + _, err = p.Exec(payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -98,7 +98,7 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) { pid := p.Workers()[0].Pid() - resp, err := p.ExecWithContext(context.Background(), internal.Payload{ + resp, err := p.ExecWithContext(context.Background(), payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -139,7 +139,7 @@ func TestSupervisedPool_ExecTTL_OK(t *testing.T) { pid := p.Workers()[0].Pid() time.Sleep(time.Millisecond * 100) - resp, err := p.Exec(internal.Payload{ + resp, err := p.Exec(payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) diff --git a/pkg/socket/socket_factory.go b/pkg/socket/socket_factory.go index f721ad66..b08d24e4 100755 --- a/pkg/socket/socket_factory.go +++ b/pkg/socket/socket_factory.go @@ -9,11 +9,12 @@ import ( "github.com/shirou/gopsutil/process" "github.com/spiral/errors" + "github.com/spiral/goridge/v3/interfaces/relay" + "github.com/spiral/goridge/v3/pkg/socket" "github.com/spiral/roadrunner/v2/interfaces/worker" "github.com/spiral/roadrunner/v2/internal" workerImpl "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/spiral/goridge/v3" "go.uber.org/multierr" "golang.org/x/sync/errgroup" ) @@ -65,7 +66,7 @@ func (f *Factory) listen() error { return err } - rl := goridge.NewSocketRelay(conn) + rl := socket.NewSocketRelay(conn) pid, err := internal.FetchPID(rl) if err != nil { return err @@ -178,7 +179,7 @@ func (f *Factory) Close(ctx context.Context) error { } // waits for Process to connect over socket and returns associated relay of timeout -func (f *Factory) findRelayWithContext(ctx context.Context, w worker.BaseProcess) (*goridge.SocketRelay, error) { +func (f *Factory) findRelayWithContext(ctx context.Context, w worker.BaseProcess) (*socket.Relay, error) { ticker := time.NewTicker(time.Millisecond * 100) for { select { @@ -194,12 +195,12 @@ func (f *Factory) findRelayWithContext(ctx context.Context, w worker.BaseProcess if !ok { continue } - return tmp.(*goridge.SocketRelay), nil + return tmp.(*socket.Relay), nil } } } -func (f *Factory) findRelay(w worker.BaseProcess) (*goridge.SocketRelay, error) { +func (f *Factory) findRelay(w worker.BaseProcess) (*socket.Relay, error) { const op = errors.Op("find_relay") // poll every 1ms for the relay pollDone := time.NewTimer(f.tout) @@ -212,13 +213,13 @@ func (f *Factory) findRelay(w worker.BaseProcess) (*goridge.SocketRelay, error) if !ok { continue } - return tmp.(*goridge.SocketRelay), nil + return tmp.(*socket.Relay), nil } } } // chan to store relay associated with specific pid -func (f *Factory) attachRelayToPid(pid int64, relay goridge.Relay) { +func (f *Factory) attachRelayToPid(pid int64, relay relay.Relay) { f.relays.Store(pid, relay) } diff --git a/pkg/socket/socket_factory_test.go b/pkg/socket/socket_factory_test.go index f1a7d637..6a88713a 100755 --- a/pkg/socket/socket_factory_test.go +++ b/pkg/socket/socket_factory_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/spiral/roadrunner/v2/internal" + "github.com/spiral/roadrunner/v2/pkg/payload" "github.com/spiral/roadrunner/v2/pkg/worker" "github.com/stretchr/testify/assert" ) @@ -211,7 +211,7 @@ func Test_Tcp_Broken(t *testing.T) { t.Fatal(err) } - res, err := sw.Exec(internal.Payload{Body: []byte("hello")}) + res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) assert.Error(t, err) assert.Nil(t, res.Body) assert.Nil(t, res.Context) @@ -251,7 +251,7 @@ func Test_Tcp_Echo(t *testing.T) { t.Fatal(err) } - res, err := sw.Exec(internal.Payload{Body: []byte("hello")}) + res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -396,7 +396,7 @@ func Test_Unix_Broken(t *testing.T) { t.Fatal(err) } - res, err := sw.Exec(internal.Payload{Body: []byte("hello")}) + res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) assert.Error(t, err) assert.Nil(t, res.Context) @@ -439,7 +439,7 @@ func Test_Unix_Echo(t *testing.T) { t.Fatal(err) } - res, err := sw.Exec(internal.Payload{Body: []byte("hello")}) + res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -515,7 +515,7 @@ func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) { } for n := 0; n < b.N; n++ { - if _, err := sw.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -583,7 +583,7 @@ func Benchmark_Unix_Worker_ExecEcho(b *testing.B) { } for n := 0; n < b.N; n++ { - if _, err := sw.Exec(internal.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } diff --git a/pkg/worker/sync_worker.go b/pkg/worker/sync_worker.go index 1eb1396e..eacb8a8a 100755 --- a/pkg/worker/sync_worker.go +++ b/pkg/worker/sync_worker.go @@ -6,12 +6,13 @@ import ( "time" "github.com/spiral/errors" + "github.com/spiral/goridge/v3/interfaces/relay" + "github.com/spiral/goridge/v3/pkg/frame" "github.com/spiral/roadrunner/v2/interfaces/events" "github.com/spiral/roadrunner/v2/interfaces/worker" "github.com/spiral/roadrunner/v2/internal" + "github.com/spiral/roadrunner/v2/pkg/payload" "go.uber.org/multierr" - - "github.com/spiral/goridge/v3" ) type syncWorker struct { @@ -26,14 +27,14 @@ func From(w worker.BaseProcess) (worker.SyncWorker, error) { } // Exec payload without TTL timeout. -func (tw *syncWorker) Exec(p internal.Payload) (internal.Payload, error) { +func (tw *syncWorker) Exec(p payload.Payload) (payload.Payload, error) { const op = errors.Op("sync worker Exec") if len(p.Body) == 0 && len(p.Context) == 0 { - return internal.Payload{}, errors.E(op, errors.Str("payload can not be empty")) + return payload.Payload{}, errors.E(op, errors.Str("payload can not be empty")) } if tw.w.State().Value() != internal.StateReady { - return internal.Payload{}, errors.E(op, errors.Errorf("Process is not ready (%s)", tw.w.State().String())) + return payload.Payload{}, errors.E(op, errors.Errorf("Process is not ready (%s)", tw.w.State().String())) } // set last used time @@ -47,7 +48,7 @@ func (tw *syncWorker) Exec(p internal.Payload) (internal.Payload, error) { tw.w.State().Set(internal.StateErrored) tw.w.State().RegisterExec() } - return internal.Payload{}, err + return payload.Payload{}, err } tw.w.State().Set(internal.StateReady) @@ -57,18 +58,18 @@ func (tw *syncWorker) Exec(p internal.Payload) (internal.Payload, error) { } type wexec struct { - payload internal.Payload + payload payload.Payload err error } // Exec payload without TTL timeout. -func (tw *syncWorker) ExecWithContext(ctx context.Context, p internal.Payload) (internal.Payload, error) { +func (tw *syncWorker) ExecWithContext(ctx context.Context, p payload.Payload) (payload.Payload, error) { const op = errors.Op("ExecWithContext") c := make(chan wexec, 1) go func() { if len(p.Body) == 0 && len(p.Context) == 0 { c <- wexec{ - payload: internal.Payload{}, + payload: payload.Payload{}, err: errors.E(op, errors.Str("payload can not be empty")), } return @@ -76,7 +77,7 @@ func (tw *syncWorker) ExecWithContext(ctx context.Context, p internal.Payload) ( if tw.w.State().Value() != internal.StateReady { c <- wexec{ - payload: internal.Payload{}, + payload: payload.Payload{}, err: errors.E(op, errors.Errorf("Process is not ready (%s)", tw.w.State().String())), } return @@ -94,7 +95,7 @@ func (tw *syncWorker) ExecWithContext(ctx context.Context, p internal.Payload) ( tw.w.State().RegisterExec() } c <- wexec{ - payload: internal.Payload{}, + payload: payload.Payload{}, err: errors.E(op, err), } return @@ -113,22 +114,22 @@ func (tw *syncWorker) ExecWithContext(ctx context.Context, p internal.Payload) ( case <-ctx.Done(): err := multierr.Combine(tw.Kill()) if err != nil { - return internal.Payload{}, multierr.Append(err, ctx.Err()) + return payload.Payload{}, multierr.Append(err, ctx.Err()) } - return internal.Payload{}, ctx.Err() + return payload.Payload{}, ctx.Err() case res := <-c: if res.err != nil { - return internal.Payload{}, res.err + return payload.Payload{}, res.err } return res.payload, nil } } -func (tw *syncWorker) execPayload(p internal.Payload) (internal.Payload, error) { - const op = errors.Op("exec payload") +func (tw *syncWorker) execPayload(p payload.Payload) (payload.Payload, error) { + const op = errors.Op("exec pl") - frame := goridge.NewFrame() - frame.WriteVersion(goridge.VERSION_1) + fr := frame.NewFrame() + fr.WriteVersion(frame.VERSION_1) // can be 0 here buf := new(bytes.Buffer) @@ -136,50 +137,50 @@ func (tw *syncWorker) execPayload(p internal.Payload) (internal.Payload, error) buf.Write(p.Body) // Context offset - frame.WriteOptions(uint32(len(p.Context))) - frame.WritePayloadLen(uint32(buf.Len())) - frame.WritePayload(buf.Bytes()) + fr.WriteOptions(uint32(len(p.Context))) + fr.WritePayloadLen(uint32(buf.Len())) + fr.WritePayload(buf.Bytes()) - frame.WriteCRC() + fr.WriteCRC() // empty and free the buffer buf.Truncate(0) - err := tw.Relay().Send(frame) + err := tw.Relay().Send(fr) if err != nil { - return internal.Payload{}, err + return payload.Payload{}, err } - frameR := goridge.NewFrame() + frameR := frame.NewFrame() err = tw.w.Relay().Receive(frameR) if err != nil { - return internal.Payload{}, errors.E(op, err) + return payload.Payload{}, errors.E(op, err) } if frameR == nil { - return internal.Payload{}, errors.E(op, errors.Str("nil frame received")) + return payload.Payload{}, errors.E(op, errors.Str("nil fr received")) } if !frameR.VerifyCRC() { - return internal.Payload{}, errors.E(op, errors.Str("failed to verify CRC")) + return payload.Payload{}, errors.E(op, errors.Str("failed to verify CRC")) } flags := frameR.ReadFlags() - if flags&byte(goridge.ERROR) != byte(0) { - return internal.Payload{}, errors.E(op, errors.ErrSoftJob, errors.Str(string(frameR.Payload()))) + if flags&byte(frame.ERROR) != byte(0) { + return payload.Payload{}, errors.E(op, errors.ErrSoftJob, errors.Str(string(frameR.Payload()))) } options := frameR.ReadOptions() if len(options) != 1 { - return internal.Payload{}, errors.E(op, errors.Str("options length should be equal 1 (body offset)")) + return payload.Payload{}, errors.E(op, errors.Str("options length should be equal 1 (body offset)")) } - payload := internal.Payload{} - payload.Context = frameR.Payload()[:options[0]] - payload.Body = frameR.Payload()[options[0]:] + pl := payload.Payload{} + pl.Context = frameR.Payload()[:options[0]] + pl.Body = frameR.Payload()[options[0]:] - return payload, nil + return pl, nil } func (tw *syncWorker) String() string { @@ -218,10 +219,10 @@ func (tw *syncWorker) Kill() error { return tw.w.Kill() } -func (tw *syncWorker) Relay() goridge.Relay { +func (tw *syncWorker) Relay() relay.Relay { return tw.w.Relay() } -func (tw *syncWorker) AttachRelay(rl goridge.Relay) { +func (tw *syncWorker) AttachRelay(rl relay.Relay) { tw.w.AttachRelay(rl) } diff --git a/pkg/worker/sync_worker_test.go b/pkg/worker/sync_worker_test.go index e224e105..40988b06 100755 --- a/pkg/worker/sync_worker_test.go +++ b/pkg/worker/sync_worker_test.go @@ -4,7 +4,7 @@ import ( "os/exec" "testing" - "github.com/spiral/roadrunner/v2/internal" + "github.com/spiral/roadrunner/v2/pkg/payload" "github.com/stretchr/testify/assert" ) @@ -27,7 +27,7 @@ func Test_NotStarted_Exec(t *testing.T) { t.Fatal(err) } - res, err := syncWorker.Exec(internal.Payload{Body: []byte("hello")}) + res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")}) assert.Error(t, err) assert.Nil(t, res.Body) diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go index 9a2e76b4..e60ab3f4 100755 --- a/pkg/worker/worker.go +++ b/pkg/worker/worker.go @@ -13,11 +13,11 @@ import ( "time" "github.com/spiral/errors" - "github.com/spiral/goridge/v3" + "github.com/spiral/goridge/v3/interfaces/relay" "github.com/spiral/roadrunner/v2/interfaces/events" "github.com/spiral/roadrunner/v2/interfaces/worker" "github.com/spiral/roadrunner/v2/internal" - events2 "github.com/spiral/roadrunner/v2/pkg/events" + eventsPkg "github.com/spiral/roadrunner/v2/pkg/events" "go.uber.org/multierr" ) @@ -67,7 +67,7 @@ type Process struct { mu sync.RWMutex // communication bus with underlying process. - relay goridge.Relay + relay relay.Relay // rd in a second part of pipe to read from stderr rd io.Reader // stop signal terminates io.Pipe from reading from stderr @@ -83,7 +83,7 @@ func InitBaseWorker(cmd *exec.Cmd) (worker.BaseProcess, error) { } w := &Process{ created: time.Now(), - events: events2.NewEventsHandler(), + events: eventsPkg.NewEventsHandler(), cmd: cmd, state: internal.NewWorkerState(internal.StateInactive), stderr: new(bytes.Buffer), @@ -134,13 +134,13 @@ func (w *Process) State() internal.State { // State return receive-only Process state object, state can be used to safely access // Process status, time when status changed and number of Process executions. -func (w *Process) AttachRelay(rl goridge.Relay) { +func (w *Process) AttachRelay(rl relay.Relay) { w.relay = rl } // State return receive-only Process state object, state can be used to safely access // Process status, time when status changed and number of Process executions. -func (w *Process) Relay() goridge.Relay { +func (w *Process) Relay() relay.Relay { return w.relay } |