summaryrefslogtreecommitdiff
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rwxr-xr-xpkg/events/general.go2
-rw-r--r--pkg/events/interface.go4
-rw-r--r--pkg/events/jobs_events.go84
-rw-r--r--pkg/events/pool_events.go2
-rw-r--r--pkg/events/worker_events.go2
-rw-r--r--pkg/pool/config.go2
-rwxr-xr-xpkg/pool/static_pool.go4
-rwxr-xr-xpkg/pool/static_pool_test.go32
-rw-r--r--pkg/pool/supervisor_test.go10
-rw-r--r--pkg/priority_queue/binary_heap.go12
-rw-r--r--pkg/priority_queue/interface.go7
-rw-r--r--pkg/priority_queue/queue.go21
-rw-r--r--pkg/pubsub/interface.go54
-rw-r--r--pkg/pubsub/psmessage.go15
14 files changed, 154 insertions, 97 deletions
diff --git a/pkg/events/general.go b/pkg/events/general.go
index a09a8759..5cf13e10 100755
--- a/pkg/events/general.go
+++ b/pkg/events/general.go
@@ -4,6 +4,8 @@ import (
"sync"
)
+const UnknownEventType string = "Unknown event type"
+
// HandlerImpl helps to broadcast events to multiple listeners.
type HandlerImpl struct {
listeners []Listener
diff --git a/pkg/events/interface.go b/pkg/events/interface.go
index ac6c15a4..7d57e4d0 100644
--- a/pkg/events/interface.go
+++ b/pkg/events/interface.go
@@ -2,7 +2,7 @@ package events
// Handler interface
type Handler interface {
- // Return number of active listeners
+ // NumListeners return number of active listeners
NumListeners() int
// AddListener adds lister to the publisher
AddListener(listener Listener)
@@ -10,5 +10,5 @@ type Handler interface {
Push(e interface{})
}
-// Event listener listens for the events produced by worker, worker pool or other service.
+// Listener .. (type alias) event listener listens for the events produced by worker, worker pool or other service.
type Listener func(event interface{})
diff --git a/pkg/events/jobs_events.go b/pkg/events/jobs_events.go
new file mode 100644
index 00000000..ed07c7da
--- /dev/null
+++ b/pkg/events/jobs_events.go
@@ -0,0 +1,84 @@
+package events
+
+import (
+ "time"
+)
+
+const (
+ // EventPushOK thrown when new job has been added. JobEvent is passed as context.
+ EventPushOK = iota + 12000
+
+ // EventPushError caused when job can not be registered.
+ EventPushError
+
+ // EventJobStart thrown when new job received.
+ EventJobStart
+
+ // EventJobOK thrown when job execution is successfully completed. JobEvent is passed as context.
+ EventJobOK
+
+ // EventJobError thrown on all job related errors. See JobError as context.
+ EventJobError
+
+ // EventPipeConsume when pipeline pipelines has been requested.
+ EventPipeConsume
+
+ // EventPipeActive when pipeline has started.
+ EventPipeActive
+
+ // EventPipeStop when pipeline has begun stopping.
+ EventPipeStop
+
+ // EventPipeStopped when pipeline has been stopped.
+ EventPipeStopped
+
+ // EventPipeError when pipeline specific error happen.
+ EventPipeError
+
+ // EventBrokerReady thrown when broken is ready to accept/serve tasks.
+ EventBrokerReady
+)
+
+type J int64
+
+func (ev J) String() string {
+ switch ev {
+ case EventPushOK:
+ return "EventPushOK"
+ case EventPushError:
+ return "EventPushError"
+ case EventJobStart:
+ return "EventJobStart"
+ case EventJobOK:
+ return "EventJobOK"
+ case EventJobError:
+ return "EventJobError"
+ case EventPipeConsume:
+ return "EventPipeConsume"
+ case EventPipeActive:
+ return "EventPipeActive"
+ case EventPipeStop:
+ return "EventPipeStop"
+ case EventPipeStopped:
+ return "EventPipeStopped"
+ case EventPipeError:
+ return "EventPipeError"
+ case EventBrokerReady:
+ return "EventBrokerReady"
+ }
+ return UnknownEventType
+}
+
+// JobEvent represent job event.
+type JobEvent struct {
+ Event J
+ // String is job id.
+ ID string
+
+ // Job is failed job.
+ Job interface{} // this is *jobs.Job, but interface used to avoid package import
+
+ // event timings
+ Start time.Time
+ Elapsed time.Duration
+}
diff --git a/pkg/events/pool_events.go b/pkg/events/pool_events.go
index e7b451e0..4d4cae5d 100644
--- a/pkg/events/pool_events.go
+++ b/pkg/events/pool_events.go
@@ -57,7 +57,7 @@ func (ev P) String() string {
case EventPoolRestart:
return "EventPoolRestart"
}
- return "Unknown event type"
+ return UnknownEventType
}
// PoolEvent triggered by pool on different events. Pool as also trigger WorkerEvent in case of log.
diff --git a/pkg/events/worker_events.go b/pkg/events/worker_events.go
index 11bd6ab7..39c38e57 100644
--- a/pkg/events/worker_events.go
+++ b/pkg/events/worker_events.go
@@ -20,7 +20,7 @@ func (ev W) String() string {
case EventWorkerStderr:
return "EventWorkerStderr"
}
- return "Unknown event type"
+ return UnknownEventType
}
// WorkerEvent wraps worker events.
diff --git a/pkg/pool/config.go b/pkg/pool/config.go
index 2a3dabe4..3a058956 100644
--- a/pkg/pool/config.go
+++ b/pkg/pool/config.go
@@ -5,7 +5,7 @@ import (
"time"
)
-// Configures the pool behavior.
+// Config .. Pool config Configures the pool behavior.
type Config struct {
// Debug flag creates new fresh worker before every request.
Debug bool
diff --git a/pkg/pool/static_pool.go b/pkg/pool/static_pool.go
index ab025fa1..74e06b81 100755
--- a/pkg/pool/static_pool.go
+++ b/pkg/pool/static_pool.go
@@ -26,7 +26,7 @@ type Command func() *exec.Cmd
// StaticPool controls worker creation, destruction and task routing. Pool uses fixed amount of stack.
type StaticPool struct {
- cfg Config
+ cfg *Config
// worker command creator
cmd Command
@@ -51,7 +51,7 @@ type StaticPool struct {
}
// Initialize creates new worker pool and task multiplexer. StaticPool will initiate with one worker.
-func Initialize(ctx context.Context, cmd Command, factory transport.Factory, cfg Config, options ...Options) (Pool, error) {
+func Initialize(ctx context.Context, cmd Command, factory transport.Factory, cfg *Config, options ...Options) (Pool, error) {
const op = errors.Op("static_pool_initialize")
if factory == nil {
return nil, errors.E(op, errors.Str("no factory initialized"))
diff --git a/pkg/pool/static_pool_test.go b/pkg/pool/static_pool_test.go
index 6f875072..f264c6dc 100755
--- a/pkg/pool/static_pool_test.go
+++ b/pkg/pool/static_pool_test.go
@@ -20,7 +20,7 @@ import (
"github.com/stretchr/testify/assert"
)
-var cfg = Config{
+var cfg = &Config{
NumWorkers: uint64(runtime.NumCPU()),
AllocateTimeout: time.Second * 5,
DestroyTimeout: time.Second * 5,
@@ -58,7 +58,7 @@ func Test_ConfigNoErrorInitDefaults(t *testing.T) {
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
},
@@ -214,7 +214,7 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) {
}
}
- var cfg2 = Config{
+ var cfg2 = &Config{
NumWorkers: 1,
AllocateTimeout: time.Second * 5,
DestroyTimeout: time.Second * 5,
@@ -264,7 +264,7 @@ func Test_StaticPool_AllocateTimeout(t *testing.T) {
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 1,
AllocateTimeout: time.Nanosecond * 1,
DestroyTimeout: time.Second * 2,
@@ -283,7 +283,7 @@ func Test_StaticPool_Replace_Worker(t *testing.T) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 1,
MaxJobs: 1,
AllocateTimeout: time.Second,
@@ -320,7 +320,7 @@ func Test_StaticPool_Debug_Worker(t *testing.T) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
Debug: true,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -360,7 +360,7 @@ func Test_StaticPool_Stop_Worker(t *testing.T) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "stop", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 1,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -400,7 +400,7 @@ func Test_Static_Pool_Destroy_And_Close(t *testing.T) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 1,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -422,7 +422,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 1,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -452,7 +452,7 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) {
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 5,
AllocateTimeout: time.Second * 100,
DestroyTimeout: time.Second,
@@ -476,7 +476,7 @@ func Test_Static_Pool_Slow_Destroy(t *testing.T) {
context.Background(),
func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 5,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -506,7 +506,7 @@ func Test_StaticPool_NoFreeWorkers(t *testing.T) {
// sleep for the 3 seconds
func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
Debug: false,
NumWorkers: 1,
AllocateTimeout: time.Second,
@@ -539,7 +539,7 @@ func Test_Static_Pool_WrongCommand1(t *testing.T) {
context.Background(),
func() *exec.Cmd { return exec.Command("phg", "../../tests/slow-destroy.php", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 5,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -556,7 +556,7 @@ func Test_Static_Pool_WrongCommand2(t *testing.T) {
context.Background(),
func() *exec.Cmd { return exec.Command("php", "", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 5,
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -595,7 +595,7 @@ func Benchmark_Pool_Echo_Batched(b *testing.B) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: uint64(runtime.NumCPU()),
AllocateTimeout: time.Second * 100,
DestroyTimeout: time.Second,
@@ -626,7 +626,7 @@ func Benchmark_Pool_Echo_Replaced(b *testing.B) {
ctx,
func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
- Config{
+ &Config{
NumWorkers: 1,
MaxJobs: 1,
AllocateTimeout: time.Second,
diff --git a/pkg/pool/supervisor_test.go b/pkg/pool/supervisor_test.go
index dc307c33..348622c7 100644
--- a/pkg/pool/supervisor_test.go
+++ b/pkg/pool/supervisor_test.go
@@ -12,7 +12,7 @@ import (
"github.com/stretchr/testify/assert"
)
-var cfgSupervised = Config{
+var cfgSupervised = &Config{
NumWorkers: uint64(1),
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -82,7 +82,7 @@ func TestSupervisedPool_ExecWithDebugMode(t *testing.T) {
}
func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) {
- var cfgExecTTL = Config{
+ var cfgExecTTL = &Config{
NumWorkers: uint64(1),
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -123,7 +123,7 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) {
}
func TestSupervisedPool_Idle(t *testing.T) {
- var cfgExecTTL = Config{
+ var cfgExecTTL = &Config{
NumWorkers: uint64(1),
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -171,7 +171,7 @@ func TestSupervisedPool_Idle(t *testing.T) {
}
func TestSupervisedPool_ExecTTL_OK(t *testing.T) {
- var cfgExecTTL = Config{
+ var cfgExecTTL = &Config{
NumWorkers: uint64(1),
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
@@ -213,7 +213,7 @@ func TestSupervisedPool_ExecTTL_OK(t *testing.T) {
}
func TestSupervisedPool_MaxMemoryReached(t *testing.T) {
- var cfgExecTTL = Config{
+ var cfgExecTTL = &Config{
NumWorkers: uint64(1),
AllocateTimeout: time.Second,
DestroyTimeout: time.Second,
diff --git a/pkg/priority_queue/binary_heap.go b/pkg/priority_queue/binary_heap.go
new file mode 100644
index 00000000..c660ddb6
--- /dev/null
+++ b/pkg/priority_queue/binary_heap.go
@@ -0,0 +1,12 @@
+/*
+binary heap (min-heap) algorithm used as a core for the priority queue
+*/
+
+package priorityqueue
+
+type BinHeap struct {
+}
+
+func NewBinHeap() *BinHeap {
+ return &BinHeap{}
+}
diff --git a/pkg/priority_queue/interface.go b/pkg/priority_queue/interface.go
new file mode 100644
index 00000000..d1c3229f
--- /dev/null
+++ b/pkg/priority_queue/interface.go
@@ -0,0 +1,7 @@
+package priorityqueue
+
+type Queue interface {
+ Push()
+ Pop()
+ BLPop()
+}
diff --git a/pkg/priority_queue/queue.go b/pkg/priority_queue/queue.go
new file mode 100644
index 00000000..88d18acb
--- /dev/null
+++ b/pkg/priority_queue/queue.go
@@ -0,0 +1,21 @@
+package priorityqueue
+
+type QueueImpl struct {
+}
+
+func NewPriorityQueue() *QueueImpl {
+ return nil
+}
+
+// Push the task
+func (q *QueueImpl) Push() {
+
+}
+
+func (q *QueueImpl) Pop() {
+
+}
+
+func (q *QueueImpl) BLPop() {
+
+}
diff --git a/pkg/pubsub/interface.go b/pkg/pubsub/interface.go
deleted file mode 100644
index 06252d70..00000000
--- a/pkg/pubsub/interface.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package pubsub
-
-/*
-This interface is in BETA. It might be changed.
-*/
-
-// PubSub interface designed to implement on any storage type to provide pub-sub abilities
-// Publisher used to receive messages from the PHP app via RPC
-// Subscriber should be implemented to subscribe to a topics and provide a connections list per topic
-// Reader return next message from the channel
-type PubSub interface {
- Publisher
- Subscriber
- Reader
-}
-
-type SubReader interface {
- Subscriber
- Reader
-}
-
-// Subscriber defines the ability to operate as message passing broker.
-// BETA interface
-type Subscriber interface {
- // Subscribe broker to one or multiple topics.
- Subscribe(connectionID string, topics ...string) error
-
- // Unsubscribe from one or multiply topics
- Unsubscribe(connectionID string, topics ...string) error
-
- // Connections returns all connections associated with the particular topic
- Connections(topic string, ret map[string]struct{})
-}
-
-// Publisher publish one or more messages
-// BETA interface
-type Publisher interface {
- // Publish one or multiple Channel.
- Publish(message *Message) error
-
- // PublishAsync publish message and return immediately
- // If error occurred it will be printed into the logger
- PublishAsync(message *Message)
-}
-
-// Reader interface should return next message
-type Reader interface {
- Next() (*Message, error)
-}
-
-// Constructor is a special pub-sub interface made to return a constructed PubSub type
-type Constructor interface {
- PSConstruct(key string) (PubSub, error)
-}
diff --git a/pkg/pubsub/psmessage.go b/pkg/pubsub/psmessage.go
deleted file mode 100644
index e33d9284..00000000
--- a/pkg/pubsub/psmessage.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package pubsub
-
-import json "github.com/json-iterator/go"
-
-// Message represents a single message with payload bound to a particular topic
-type Message struct {
- // Topic (channel in terms of redis)
- Topic string `json:"topic"`
- // Payload (on some decode stages might be represented as base64 string)
- Payload []byte `json:"payload"`
-}
-
-func (m *Message) MarshalBinary() (data []byte, err error) {
- return json.Marshal(m)
-}