diff options
209 files changed, 10302 insertions, 897 deletions
diff --git a/.dockerignore b/.dockerignore index bfa82a3d..b817b3c8 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,4 +7,4 @@ /tests /bin composer.json -vendor_php
\ No newline at end of file +vendor_php diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2c561205..dac8ce13 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,4 +9,8 @@ updates: directory: "/" # Location of package manifests schedule: interval: daily - + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: daily diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml index cee7085c..24d839e5 100644 --- a/.github/workflows/linters.yml +++ b/.github/workflows/linters.yml @@ -13,6 +13,6 @@ jobs: - name: Run linter uses: golangci/golangci-lint-action@v2 # Action page: <https://github.com/golangci/golangci-lint-action> with: - version: v1.40 # without patch version + version: v1.41 # without patch version only-new-issues: false # show only new issues if it's a pull request args: --timeout=10m diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 62987771..de1fcba3 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -24,7 +24,7 @@ jobs: matrix: php: [ "7.4", "8.0" ] go: [ "1.16" ] - os: [ ubuntu-20.04 ] + os: [ ubuntu-latest ] steps: - name: Set up Go ${{ matrix.go }} uses: actions/setup-go@v2 # action page: <https://github.com/actions/setup-go> @@ -64,40 +64,10 @@ jobs: - name: Install Go dependencies run: go mod download - - name: Run golang tests on Linux - run: | - docker-compose -f ./tests/docker-compose.yaml up -d - mkdir ./coverage-ci - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipe.txt -covermode=atomic ./pkg/transport/pipe - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/socket.txt -covermode=atomic ./pkg/transport/socket - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pool.txt -covermode=atomic ./pkg/pool - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker.txt -covermode=atomic ./pkg/worker - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/bst.txt -covermode=atomic ./pkg/bst - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker_stack.txt -covermode=atomic ./pkg/worker_watcher - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http_config.txt -covermode=atomic ./plugins/http/config - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server_cmd.txt -covermode=atomic ./plugins/server - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http.txt -covermode=atomic ./tests/plugins/http - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/informer.txt -covermode=atomic ./tests/plugins/informer - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/reload.txt -covermode=atomic ./tests/plugins/reload - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server.txt -covermode=atomic ./tests/plugins/server - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/service.txt -covermode=atomic ./tests/plugins/service - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/status.txt -covermode=atomic ./tests/plugins/status - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/config.txt -covermode=atomic ./tests/plugins/config - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/gzip.txt -covermode=atomic ./tests/plugins/gzip - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/headers.txt -covermode=atomic ./tests/plugins/headers - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/logger.txt -covermode=atomic ./tests/plugins/logger - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/metrics.txt -covermode=atomic ./tests/plugins/metrics - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/redis.txt -covermode=atomic ./tests/plugins/redis - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/resetter.txt -covermode=atomic ./tests/plugins/resetter - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/rpc.txt -covermode=atomic ./tests/plugins/rpc - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/kv_plugin.txt -covermode=atomic ./tests/plugins/kv - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/broadcast_plugin.txt -covermode=atomic ./tests/plugins/broadcast - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/websockets.txt -covermode=atomic ./tests/plugins/websockets - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/ws_origin.txt -covermode=atomic ./plugins/websockets - docker-compose -f ./tests/docker-compose.yaml down - cat ./coverage-ci/*.txt > ./coverage-ci/summary.txt + - name: Run golang tests with coverage + run: make test_coverage - - uses: codecov/codecov-action@v1 # Docs: <https://github.com/codecov/codecov-action> + - uses: codecov/codecov-action@v2 # Docs: <https://github.com/codecov/codecov-action> with: file: ./coverage-ci/summary.txt fail_ci_if_error: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fa1abfd..8000a622 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,28 @@ CHANGELOG ========= +v2.4.0 (_.08.2021) +------------------- + +## 💔 Internal BC: + +- 🔨 Pool, worker interfaces: payload now passed and returned by pointer. + +## 👀 New: + +- ✏️ Long awaited, reworked `Jobs` plugin with pluggable drivers. Now you can allocate/destroy pipelines in the runtime. + Drivers included in the initial release: `RabbitMQ (0-9-1)`, `SQS v2`, `beanstalk`, `ephemeral`. All jobs can be prioritized now --> [PR](https://github.com/spiral/roadrunner/pull/726) + +## 🩹 Fixes: + +- 🐛 Fix: + +## 📈 Summary: + +- RR Milestone [2.4.0]() + +--- + v2.3.2 (14.07.2021) ------------------- diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ae0b283a..49aeb3c8 100755 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -43,4 +43,4 @@ Project maintainers who do not follow or enforce the Code of Conduct in good fai This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ +[version]: https://www.contributor-covenant.org/version/2/0/code_of_conduct/ @@ -5,49 +5,56 @@ SHELL = /bin/sh test_coverage: - docker-compose -f tests/docker-compose.yaml up -d --remove-orphans - rm -rf coverage - mkdir coverage - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/pipe.out -covermode=atomic ./pkg/transport/pipe - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/socket.out -covermode=atomic ./pkg/transport/socket - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/pool.out -covermode=atomic ./pkg/pool - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/worker.out -covermode=atomic ./pkg/worker - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/worker_stack.out -covermode=atomic ./pkg/worker_watcher - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/bst.out -covermode=atomic ./pkg/bst - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/http.out -covermode=atomic ./tests/plugins/http - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/http_config.out -covermode=atomic ./plugins/http/config - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/server_cmd.out -covermode=atomic ./plugins/server - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/informer.out -covermode=atomic ./tests/plugins/informer - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/reload.out -covermode=atomic ./tests/plugins/reload - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/server.out -covermode=atomic ./tests/plugins/server - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/service.out -covermode=atomic ./tests/plugins/service - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/status.out -covermode=atomic ./tests/plugins/status - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/config.out -covermode=atomic ./tests/plugins/config - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/gzip.out -covermode=atomic ./tests/plugins/gzip - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/headers.out -covermode=atomic ./tests/plugins/headers - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/logger.out -covermode=atomic ./tests/plugins/logger - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/metrics.out -covermode=atomic ./tests/plugins/metrics - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/redis.out -covermode=atomic ./tests/plugins/redis - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/resetter.out -covermode=atomic ./tests/plugins/resetter - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/rpc.out -covermode=atomic ./tests/plugins/rpc - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/kv_plugin.out -covermode=atomic ./tests/plugins/kv - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/broadcast_plugin.out -covermode=atomic ./tests/plugins/broadcast - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/ws_plugin.out -covermode=atomic ./tests/plugins/websockets - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage/ws_origin.out -covermode=atomic ./plugins/websockets - cat ./coverage/*.out > ./coverage/summary.out - docker-compose -f tests/docker-compose.yaml down + docker-compose -f tests/env/docker-compose.yaml up -d --remove-orphans + rm -rf coverage-ci + mkdir ./coverage-ci + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipe.txt -covermode=atomic ./pkg/transport/pipe + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/socket.txt -covermode=atomic ./pkg/transport/socket + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pool.txt -covermode=atomic ./pkg/pool + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker.txt -covermode=atomic ./pkg/worker + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/bst.txt -covermode=atomic ./pkg/bst + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pq.txt -covermode=atomic ./pkg/priority_queue + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker_stack.txt -covermode=atomic ./pkg/worker_watcher + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http_config.txt -covermode=atomic ./plugins/http/config + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server_cmd.txt -covermode=atomic ./plugins/server + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/struct_jobs.txt -covermode=atomic ./plugins/jobs/job + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipeline_jobs.txt -covermode=atomic ./plugins/jobs/pipeline + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http.txt -covermode=atomic ./tests/plugins/http + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/informer.txt -covermode=atomic ./tests/plugins/informer + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/reload.txt -covermode=atomic ./tests/plugins/reload + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server.txt -covermode=atomic ./tests/plugins/server + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/service.txt -covermode=atomic ./tests/plugins/service + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/status.txt -covermode=atomic ./tests/plugins/status + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/config.txt -covermode=atomic ./tests/plugins/config + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/gzip.txt -covermode=atomic ./tests/plugins/gzip + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/headers.txt -covermode=atomic ./tests/plugins/headers + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/logger.txt -covermode=atomic ./tests/plugins/logger + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/metrics.txt -covermode=atomic ./tests/plugins/metrics + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/redis.txt -covermode=atomic ./tests/plugins/redis + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/resetter.txt -covermode=atomic ./tests/plugins/resetter + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/rpc.txt -covermode=atomic ./tests/plugins/rpc + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/kv_plugin.txt -covermode=atomic ./tests/plugins/kv + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/broadcast_plugin.txt -covermode=atomic ./tests/plugins/broadcast + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/websockets.txt -covermode=atomic ./tests/plugins/websockets + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/ws_origin.txt -covermode=atomic ./plugins/websockets + go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/jobs_core.txt -covermode=atomic ./tests/plugins/jobs + cat ./coverage-ci/*.txt > ./coverage-ci/summary.txt + docker-compose -f tests/env/docker-compose.yaml down test: ## Run application tests - docker-compose -f tests/docker-compose.yaml up -d + docker-compose -f tests/env/docker-compose.yaml up -d go test -v -race -tags=debug ./pkg/transport/pipe go test -v -race -tags=debug ./pkg/transport/socket go test -v -race -tags=debug ./pkg/pool go test -v -race -tags=debug ./pkg/worker go test -v -race -tags=debug ./pkg/worker_watcher go test -v -race -tags=debug ./pkg/bst - go test -v -race -tags=debug ./tests/plugins/http + go test -v -race -tags=debug ./pkg/priority_queue + go test -v -race -tags=debug ./plugins/jobs/job + go test -v -race -tags=debug ./plugins/jobs/pipeline go test -v -race -tags=debug ./plugins/http/config go test -v -race -tags=debug ./plugins/server + go test -v -race -tags=debug ./tests/plugins/http go test -v -race -tags=debug ./tests/plugins/informer go test -v -race -tags=debug ./tests/plugins/reload go test -v -race -tags=debug ./tests/plugins/server @@ -65,34 +72,39 @@ test: ## Run application tests go test -v -race -tags=debug ./tests/plugins/broadcast go test -v -race -tags=debug ./tests/plugins/websockets go test -v -race -tags=debug ./plugins/websockets - docker-compose -f tests/docker-compose.yaml down + go test -v -race -tags=debug ./tests/plugins/jobs + docker-compose -f tests/env/docker-compose.yaml down -testGo1.17beta1: ## Run application tests - docker-compose -f tests/docker-compose.yaml up -d - go1.17beta1 test -v -race -tags=debug ./pkg/transport/pipe - go1.17beta1 test -v -race -tags=debug ./pkg/transport/socket - go1.17beta1 test -v -race -tags=debug ./pkg/pool - go1.17beta1 test -v -race -tags=debug ./pkg/worker - go1.17beta1 test -v -race -tags=debug ./pkg/worker_watcher - go1.17beta1 test -v -race -tags=debug ./pkg/bst - go1.17beta1 test -v -race -tags=debug ./tests/plugins/http - go1.17beta1 test -v -race -tags=debug ./plugins/http/config - go1.17beta1 test -v -race -tags=debug ./plugins/server - go1.17beta1 test -v -race -tags=debug ./tests/plugins/informer - go1.17beta1 test -v -race -tags=debug ./tests/plugins/reload - go1.17beta1 test -v -race -tags=debug ./tests/plugins/server - go1.17beta1 test -v -race -tags=debug ./tests/plugins/service - go1.17beta1 test -v -race -tags=debug ./tests/plugins/status - go1.17beta1 test -v -race -tags=debug ./tests/plugins/config - go1.17beta1 test -v -race -tags=debug ./tests/plugins/gzip - go1.17beta1 test -v -race -tags=debug ./tests/plugins/headers - go1.17beta1 test -v -race -tags=debug ./tests/plugins/logger - go1.17beta1 test -v -race -tags=debug ./tests/plugins/metrics - go1.17beta1 test -v -race -tags=debug ./tests/plugins/redis - go1.17beta1 test -v -race -tags=debug ./tests/plugins/resetter - go1.17beta1 test -v -race -tags=debug ./tests/plugins/rpc - go1.17beta1 test -v -race -tags=debug ./tests/plugins/kv - go1.17beta1 test -v -race -tags=debug ./tests/plugins/websockets - go1.17beta1 test -v -race -tags=debug ./tests/plugins/broadcast - go1.17beta1 test -v -race -tags=debug ./plugins/websockets - docker-compose -f tests/docker-compose.yaml down +testGo1.17rc1: ## Run application tests + docker-compose -f tests/env/docker-compose.yaml up -d + go1.17rc1 test -v -race -tags=debug ./pkg/transport/pipe + go1.17rc1 test -v -race -tags=debug ./pkg/transport/socket + go1.17rc1 test -v -race -tags=debug ./pkg/pool + go1.17rc1 test -v -race -tags=debug ./pkg/worker + go1.17rc1 test -v -race -tags=debug ./pkg/worker_watcher + go1.17rc1 test -v -race -tags=debug ./pkg/bst + go1.17rc1 test -v -race -tags=debug ./pkg/priority_queue + go1.17rc1 test -v -race -tags=debug ./plugins/jobs/job + go1.17rc1 test -v -race -tags=debug ./plugins/jobs/pipeline + go1.17rc1 test -v -race -tags=debug ./plugins/http/config + go1.17rc1 test -v -race -tags=debug ./plugins/server + go1.17rc1 test -v -race -tags=debug ./plugins/websockets + go1.17rc1 test -v -race -tags=debug ./tests/plugins/http + go1.17rc1 test -v -race -tags=debug ./tests/plugins/informer + go1.17rc1 test -v -race -tags=debug ./tests/plugins/reload + go1.17rc1 test -v -race -tags=debug ./tests/plugins/server + go1.17rc1 test -v -race -tags=debug ./tests/plugins/service + go1.17rc1 test -v -race -tags=debug ./tests/plugins/status + go1.17rc1 test -v -race -tags=debug ./tests/plugins/config + go1.17rc1 test -v -race -tags=debug ./tests/plugins/gzip + go1.17rc1 test -v -race -tags=debug ./tests/plugins/headers + go1.17rc1 test -v -race -tags=debug ./tests/plugins/logger + go1.17rc1 test -v -race -tags=debug ./tests/plugins/metrics + go1.17rc1 test -v -race -tags=debug ./tests/plugins/redis + go1.17rc1 test -v -race -tags=debug ./tests/plugins/resetter + go1.17rc1 test -v -race -tags=debug ./tests/plugins/rpc + go1.17rc1 test -v -race -tags=debug ./tests/plugins/kv + go1.17rc1 test -v -race -tags=debug ./tests/plugins/websockets + go1.17rc1 test -v -race -tags=debug ./tests/plugins/broadcast + go1.17rc1 test -v -race -tags=debug ./tests/plugins/jobs + docker-compose -f tests/env/docker-compose.yaml down @@ -3,10 +3,6 @@ status = [ 'Linux / Build (Go 1.15, PHP 7.4, OS ubuntu-20.04)', 'Linux / Build (Go 1.16, PHP 8.0, OS ubuntu-20.04)', 'Linux / Build (Go 1.15, PHP 8.0, OS ubuntu-20.04)', - 'macOS / Build (Go 1.16, PHP 7.4, OS macos-latest)', - 'macOS / Build (Go 1.15, PHP 7.4, OS macos-latest)', - 'macOS / Build (Go 1.16, PHP 8.0, OS macos-latest)', - 'macOS / Build (Go 1.15, PHP 8.0, OS macos-latest)', 'Linux / Golang-CI (lint) ', ] required_approvals = 0 diff --git a/common/doc.go b/common/doc.go new file mode 100644 index 00000000..adc03351 --- /dev/null +++ b/common/doc.go @@ -0,0 +1,9 @@ +/* +Package common used to collect common interfaces/structures which might be implemented (or imported) by a different plugins. +For example, 'pubsub' interface might be implemented by memory, redis, websockets and many other plugins. + +Folders: +- kv - contains KV interfaces and structures +- pubsub - contains pub-sub interfaces and structures +*/ +package common diff --git a/common/jobs/interface.go b/common/jobs/interface.go new file mode 100644 index 00000000..c957df2b --- /dev/null +++ b/common/jobs/interface.go @@ -0,0 +1,26 @@ +package jobs + +import ( + "context" + + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" +) + +// Consumer todo naming +type Consumer interface { + Push(ctx context.Context, job *job.Job) error + Register(ctx context.Context, pipeline *pipeline.Pipeline) error + Run(ctx context.Context, pipeline *pipeline.Pipeline) error + Stop(ctx context.Context) error + + Pause(ctx context.Context, pipeline string) + Resume(ctx context.Context, pipeline string) +} + +type Constructor interface { + JobsConstruct(configKey string, e events.Handler, queue priorityqueue.Queue) (Consumer, error) + FromPipeline(pipe *pipeline.Pipeline, e events.Handler, queue priorityqueue.Queue) (Consumer, error) +} diff --git a/plugins/kv/interface.go b/common/kv/interface.go index 5736a6a7..5736a6a7 100644 --- a/plugins/kv/interface.go +++ b/common/kv/interface.go diff --git a/pkg/pubsub/interface.go b/common/pubsub/interface.go index 06252d70..06252d70 100644 --- a/pkg/pubsub/interface.go +++ b/common/pubsub/interface.go diff --git a/pkg/pubsub/psmessage.go b/common/pubsub/psmessage.go index e33d9284..e33d9284 100644 --- a/pkg/pubsub/psmessage.go +++ b/common/pubsub/psmessage.go @@ -3,9 +3,19 @@ module github.com/spiral/roadrunner/v2 go 1.16 require ( + github.com/Shopify/toxiproxy v2.1.4+incompatible github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect github.com/alicebob/miniredis/v2 v2.14.5 + // ========= AWS SDK v2 + github.com/aws/aws-sdk-go-v2 v1.7.0 + github.com/aws/aws-sdk-go-v2/config v1.4.1 + github.com/aws/aws-sdk-go-v2/credentials v1.3.0 + github.com/aws/aws-sdk-go-v2/service/sqs v1.6.0 + github.com/aws/smithy-go v1.5.0 + // ===================== + github.com/beanstalkd/go-beanstalk v0.1.0 github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b + github.com/cenkalti/backoff/v4 v4.1.1 github.com/fasthttp/websocket v1.4.3 github.com/fatih/color v1.12.0 github.com/go-ole/go-ole v1.2.5 // indirect @@ -16,12 +26,13 @@ require ( github.com/json-iterator/go v1.1.11 github.com/klauspost/compress v1.13.0 github.com/prometheus/client_golang v1.10.0 + github.com/rabbitmq/amqp091-go v0.0.0-20210714180937-de74e8a7d0e0 github.com/shirou/gopsutil v3.21.3+incompatible github.com/spf13/viper v1.7.1 // SPIRAL ==== github.com/spiral/endure v1.0.2 github.com/spiral/errors v1.0.11 - github.com/spiral/goridge/v3 v3.1.4 + github.com/spiral/goridge/v3 v3.2.0 // =========== github.com/stretchr/testify v1.7.0 github.com/tklauser/go-sysconf v0.3.6 // indirect @@ -33,6 +44,6 @@ require ( golang.org/x/net v0.0.0-20210226101413-39120d07d75e golang.org/x/sync v0.0.0-20201207232520-09787c993a3a golang.org/x/sys v0.0.0-20210514084401-e8d321eab015 - google.golang.org/protobuf v1.26.0 + google.golang.org/protobuf v1.27.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -17,6 +17,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -43,6 +44,28 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.7.0 h1:UYGnoIPIzed+ycmgw8Snb/0HK+KlMD+SndLTneG8ncE= +github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= +github.com/aws/aws-sdk-go-v2/config v1.4.1 h1:PcGp9Kf+1dHJmP3EIDZJmAmWfGABFTU0obuvYQNzWH8= +github.com/aws/aws-sdk-go-v2/config v1.4.1/go.mod h1:HCDWZ/oeY59TPtXslxlbkCqLQBsVu6b09kiG43tdP+I= +github.com/aws/aws-sdk-go-v2/credentials v1.3.0 h1:vXxTINCsHn6LKhR043jwSLd6CsL7KOEU7b1woMr1K1A= +github.com/aws/aws-sdk-go-v2/credentials v1.3.0/go.mod h1:tOcv+qDZ0O+6Jk2beMl5JnZX6N0H7O8fw9UsD3bP7GI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.2.0 h1:ucExzYCoAiL9GpKOsKkQLsa43wTT23tcdP4cDTSbZqY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.2.0/go.mod h1:XvzoGzuS0kKPzCQtJCC22Xh/mMgVAzfGo/0V+mk/Cu0= +github.com/aws/aws-sdk-go-v2/internal/ini v1.1.0 h1:DJq/vXXF+LAFaa/kQX9C6arlf4xX4uaaqGWIyAKOCpM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.1.0/go.mod h1:qGQ/9IfkZonRNSNLE99/yBJ7EPA/h8jlWEqtJCcaj+Q= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.0 h1:g2npzssI/6XsoQaPYCxliMFeC5iNKKvO0aC+/wWOE0A= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.0/go.mod h1:a7XLWNKuVgOxjssEF019IiHPv35k8KHBaWv/wJAfi2A= +github.com/aws/aws-sdk-go-v2/service/sqs v1.6.0 h1:45YlPhQ/U5v8QnzJFD1bWlTT4IA2NQ9tQ2D/AfyIX3Q= +github.com/aws/aws-sdk-go-v2/service/sqs v1.6.0/go.mod h1:8iLn005F6ASRIXmp6U4hfRAk8EHAtRPrx1oHyxxz2xg= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.0 h1:DMi9w+TpUam7eJ8ksL7svfzpqpqem2MkDAJKW8+I2/k= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.0/go.mod h1:qWR+TUuvfji9udM79e4CPe87C5+SjMEb2TFXkZaI0Vc= +github.com/aws/aws-sdk-go-v2/service/sts v1.5.0 h1:Y1K9dHE2CYOWOvaJSIITq4mJfLX43iziThTvqs5FqOg= +github.com/aws/aws-sdk-go-v2/service/sts v1.5.0/go.mod h1:HjDKUmissf6Mlut+WzG2r35r6LeTKmLEDJ6p9NryzLg= +github.com/aws/smithy-go v1.5.0 h1:2grDq7LxZlo8BZUDeqRfQnQWLZpInmh2TLPPkJku3YM= +github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/beanstalkd/go-beanstalk v0.1.0 h1:IiNwYbAoVBDs5xEOmleGoX+DRD3Moz99EpATbl8672w= +github.com/beanstalkd/go-beanstalk v0.1.0/go.mod h1:/G8YTyChOtpOArwLTQPY1CHB+i212+av35bkPXXj56Y= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -208,6 +231,8 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -348,6 +373,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rabbitmq/amqp091-go v0.0.0-20210714180937-de74e8a7d0e0 h1:D5EMs8zL77qXFJ60vl7x5xRxtezkXsmr8mwypRk5Pe4= +github.com/rabbitmq/amqp091-go v0.0.0-20210714180937-de74e8a7d0e0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -388,8 +415,8 @@ github.com/spiral/endure v1.0.2/go.mod h1:/mnduq57eBKgKCwpuLgUp8Fn/c3h6JgWybG+0h github.com/spiral/errors v1.0.10/go.mod h1:SwMSZVdZkkJVgXNNafccqOaxWg0XPzVU/dEdUEInE0o= github.com/spiral/errors v1.0.11 h1:TGG+t3mNouLuRW54Ph7nHo4X3u4WhbxqEQmnIybi7Go= github.com/spiral/errors v1.0.11/go.mod h1:SwMSZVdZkkJVgXNNafccqOaxWg0XPzVU/dEdUEInE0o= -github.com/spiral/goridge/v3 v3.1.4 h1:5egVVTfaD1PO4MRgzU0yyog86pAh+JIOk7xhe7BtG40= -github.com/spiral/goridge/v3 v3.1.4/go.mod h1:swcWZW7nP+KU9rgyRf6w7CfNDCWRC/vePE2+AKtoqjk= +github.com/spiral/goridge/v3 v3.2.0 h1:JS0zcOgp1hxMzu2Uc6feKalt78hBLTufGEbtEXdWW2E= +github.com/spiral/goridge/v3 v3.2.0/go.mod h1:ekGaQYwbWOPVDwIrVxIY9Mwq2/+X/xt5sPsuC+t85Oo= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -653,8 +680,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/protocol.go b/internal/protocol.go index 7487b4f3..78174118 100755 --- a/internal/protocol.go +++ b/internal/protocol.go @@ -39,8 +39,8 @@ func SendControl(rl relay.Relay, payload interface{}) error { fr := getFrame() defer putFrame(fr) - fr.WriteVersion(frame.VERSION_1) - fr.WriteFlags(frame.CONTROL) + fr.WriteVersion(fr.Header(), frame.VERSION_1) + fr.WriteFlags(fr.Header(), frame.CONTROL) if data, ok := payload.([]byte); ok { // check if payload no more that 4Gb @@ -48,9 +48,9 @@ func SendControl(rl relay.Relay, payload interface{}) error { return errors.E(op, errors.Str("payload is more that 4gb")) } - fr.WritePayloadLen(uint32(len(data))) + fr.WritePayloadLen(fr.Header(), uint32(len(data))) fr.WritePayload(data) - fr.WriteCRC() + fr.WriteCRC(fr.Header()) err := rl.Send(fr) if err != nil { @@ -64,9 +64,9 @@ func SendControl(rl relay.Relay, payload interface{}) error { return errors.E(op, errors.Errorf("invalid payload: %s", err)) } - fr.WritePayloadLen(uint32(len(data))) + fr.WritePayloadLen(fr.Header(), uint32(len(data))) fr.WritePayload(data) - fr.WriteCRC() + fr.WriteCRC(fr.Header()) // hold a pointer to a frame // Do we need a copy here???? @@ -89,7 +89,7 @@ func FetchPID(rl relay.Relay) (int64, error) { defer putFrame(fr) err = rl.Receive(fr) - if !fr.VerifyCRC() { + if !fr.VerifyCRC(fr.Header()) { return 0, errors.E(op, errors.Str("CRC mismatch")) } if err != nil { diff --git a/pkg/events/general.go b/pkg/events/general.go index a09a8759..5cf13e10 100755 --- a/pkg/events/general.go +++ b/pkg/events/general.go @@ -4,6 +4,8 @@ import ( "sync" ) +const UnknownEventType string = "Unknown event type" + // HandlerImpl helps to broadcast events to multiple listeners. type HandlerImpl struct { listeners []Listener diff --git a/pkg/events/interface.go b/pkg/events/interface.go index ac6c15a4..7d57e4d0 100644 --- a/pkg/events/interface.go +++ b/pkg/events/interface.go @@ -2,7 +2,7 @@ package events // Handler interface type Handler interface { - // Return number of active listeners + // NumListeners return number of active listeners NumListeners() int // AddListener adds lister to the publisher AddListener(listener Listener) @@ -10,5 +10,5 @@ type Handler interface { Push(e interface{}) } -// Event listener listens for the events produced by worker, worker pool or other service. +// Listener .. (type alias) event listener listens for the events produced by worker, worker pool or other service. type Listener func(event interface{}) diff --git a/pkg/events/jobs_events.go b/pkg/events/jobs_events.go new file mode 100644 index 00000000..c0ee733a --- /dev/null +++ b/pkg/events/jobs_events.go @@ -0,0 +1,88 @@ +package events + +import ( + "time" +) + +const ( + // EventPushOK thrown when new job has been added. JobEvent is passed as context. + EventPushOK = iota + 12000 + + // EventPushError caused when job can not be registered. + EventPushError + + // EventJobStart thrown when new job received. + EventJobStart + + // EventJobOK thrown when job execution is successfully completed. JobEvent is passed as context. + EventJobOK + + // EventJobError thrown on all job related errors. See JobError as context. + EventJobError + + // EventInitialized when pipeline has been initialized, but not started + EventInitialized + + // EventPipeActive when pipeline has started. + EventPipeActive + + // EventPipeStopped when pipeline has been stopped. + EventPipeStopped + + // EventPipePaused when pipeline has been paused. + EventPipePaused + + // EventPipeError when pipeline specific error happen. + EventPipeError + + // EventDriverReady thrown when broken is ready to accept/serve tasks. + EventDriverReady +) + +type J int64 + +func (ev J) String() string { + switch ev { + case EventPushOK: + return "EventPushOK" + case EventPushError: + return "EventPushError" + case EventJobStart: + return "EventJobStart" + case EventJobOK: + return "EventJobOK" + case EventJobError: + return "EventJobError" + case EventInitialized: + return "EventInitialized" + case EventPipeActive: + return "EventPipeActive" + case EventPipeStopped: + return "EventPipeStopped" + case EventPipeError: + return "EventPipeError" + case EventDriverReady: + return "EventDriverReady" + } + return UnknownEventType +} + +// JobEvent represent job event. +type JobEvent struct { + Event J + // String is job id. + ID string + + // Pipeline name + Pipeline string + + // Associated driver name (amqp, ephemeral, etc) + Driver string + + // Error for the jobs/pipes errors + Error error + + // event timings + Start time.Time + Elapsed time.Duration +} diff --git a/pkg/events/pool_events.go b/pkg/events/pool_events.go index e7b451e0..4d4cae5d 100644 --- a/pkg/events/pool_events.go +++ b/pkg/events/pool_events.go @@ -57,7 +57,7 @@ func (ev P) String() string { case EventPoolRestart: return "EventPoolRestart" } - return "Unknown event type" + return UnknownEventType } // PoolEvent triggered by pool on different events. Pool as also trigger WorkerEvent in case of log. diff --git a/pkg/events/worker_events.go b/pkg/events/worker_events.go index 11bd6ab7..39c38e57 100644 --- a/pkg/events/worker_events.go +++ b/pkg/events/worker_events.go @@ -20,7 +20,7 @@ func (ev W) String() string { case EventWorkerStderr: return "EventWorkerStderr" } - return "Unknown event type" + return UnknownEventType } // WorkerEvent wraps worker events. diff --git a/pkg/pool/config.go b/pkg/pool/config.go index 2a3dabe4..3a058956 100644 --- a/pkg/pool/config.go +++ b/pkg/pool/config.go @@ -5,7 +5,7 @@ import ( "time" ) -// Configures the pool behavior. +// Config .. Pool config Configures the pool behavior. type Config struct { // Debug flag creates new fresh worker before every request. Debug bool diff --git a/pkg/pool/interface.go b/pkg/pool/interface.go index bbf7653e..4049122c 100644 --- a/pkg/pool/interface.go +++ b/pkg/pool/interface.go @@ -13,7 +13,7 @@ type Pool interface { GetConfig() interface{} // Exec executes task with payload - Exec(rqs payload.Payload) (payload.Payload, error) + Exec(rqs *payload.Payload) (*payload.Payload, error) // Workers returns worker list associated with the pool. Workers() (workers []worker.BaseProcess) @@ -25,7 +25,7 @@ type Pool interface { Destroy(ctx context.Context) // ExecWithContext executes task with context which is used with timeout - execWithTTL(ctx context.Context, rqs payload.Payload) (payload.Payload, error) + execWithTTL(ctx context.Context, rqs *payload.Payload) (*payload.Payload, error) } // Watcher is an interface for the Sync workers lifecycle @@ -33,11 +33,11 @@ type Watcher interface { // Watch used to add workers to the container Watch(workers []worker.BaseProcess) error - // Get provide first free worker - Get(ctx context.Context) (worker.BaseProcess, error) + // Take takes the first free worker + Take(ctx context.Context) (worker.BaseProcess, error) - // Push enqueues worker back - Push(w worker.BaseProcess) + // Release releases the worker putting it back to the queue + Release(w worker.BaseProcess) // Allocate - allocates new worker and put it into the WorkerWatcher Allocate() error diff --git a/pkg/pool/static_pool.go b/pkg/pool/static_pool.go index 5a6247b5..051e7a8a 100755 --- a/pkg/pool/static_pool.go +++ b/pkg/pool/static_pool.go @@ -18,7 +18,7 @@ import ( const StopRequest = "{\"stop\":true}" // ErrorEncoder encode error or make a decision based on the error type -type ErrorEncoder func(err error, w worker.BaseProcess) (payload.Payload, error) +type ErrorEncoder func(err error, w worker.BaseProcess) (*payload.Payload, error) type Options func(p *StaticPool) @@ -26,7 +26,7 @@ type Command func() *exec.Cmd // StaticPool controls worker creation, destruction and task routing. Pool uses fixed amount of stack. type StaticPool struct { - cfg Config + cfg *Config // worker command creator cmd Command @@ -51,7 +51,7 @@ type StaticPool struct { } // Initialize creates new worker pool and task multiplexer. StaticPool will initiate with one worker. -func Initialize(ctx context.Context, cmd Command, factory transport.Factory, cfg Config, options ...Options) (Pool, error) { +func Initialize(ctx context.Context, cmd Command, factory transport.Factory, cfg *Config, options ...Options) (Pool, error) { const op = errors.Op("static_pool_initialize") if factory == nil { return nil, errors.E(op, errors.Str("no factory initialized")) @@ -135,16 +135,16 @@ func (sp *StaticPool) RemoveWorker(wb worker.BaseProcess) error { } // Exec executes provided payload on the worker -func (sp *StaticPool) Exec(p payload.Payload) (payload.Payload, error) { +func (sp *StaticPool) Exec(p *payload.Payload) (*payload.Payload, error) { const op = errors.Op("static_pool_exec") if sp.cfg.Debug { return sp.execDebug(p) } ctxGetFree, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout) defer cancel() - w, err := sp.getWorker(ctxGetFree, op) + w, err := sp.takeWorker(ctxGetFree, op) if err != nil { - return payload.Payload{}, errors.E(op, err) + return nil, errors.E(op, err) } rsp, err := w.(worker.SyncWorker).Exec(p) @@ -163,12 +163,12 @@ func (sp *StaticPool) Exec(p payload.Payload) (payload.Payload, error) { return rsp, nil } // return worker back - sp.ww.Push(w) + sp.ww.Release(w) return rsp, nil } // Be careful, sync with pool.Exec method -func (sp *StaticPool) execWithTTL(ctx context.Context, p payload.Payload) (payload.Payload, error) { +func (sp *StaticPool) execWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) { const op = errors.Op("static_pool_exec_with_context") if sp.cfg.Debug { return sp.execDebugWithTTL(ctx, p) @@ -176,9 +176,9 @@ func (sp *StaticPool) execWithTTL(ctx context.Context, p payload.Payload) (paylo ctxAlloc, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout) defer cancel() - w, err := sp.getWorker(ctxAlloc, op) + w, err := sp.takeWorker(ctxAlloc, op) if err != nil { - return payload.Payload{}, errors.E(op, err) + return nil, errors.E(op, err) } rsp, err := w.(worker.SyncWorker).ExecWithTTL(ctx, p) @@ -198,7 +198,7 @@ func (sp *StaticPool) execWithTTL(ctx context.Context, p payload.Payload) (paylo } // return worker back - sp.ww.Push(w) + sp.ww.Release(w) return rsp, nil } @@ -216,16 +216,16 @@ func (sp *StaticPool) stopWorker(w worker.BaseProcess) { func (sp *StaticPool) checkMaxJobs(w worker.BaseProcess) { if w.State().NumExecs() >= sp.cfg.MaxJobs { w.State().Set(worker.StateMaxJobsReached) - sp.ww.Push(w) + sp.ww.Release(w) return } - sp.ww.Push(w) + sp.ww.Release(w) } -func (sp *StaticPool) getWorker(ctxGetFree context.Context, op errors.Op) (worker.BaseProcess, error) { +func (sp *StaticPool) takeWorker(ctxGetFree context.Context, op errors.Op) (worker.BaseProcess, error) { // Get function consumes context with timeout - w, err := sp.ww.Get(ctxGetFree) + w, err := sp.ww.Take(ctxGetFree) if err != nil { // if the error is of kind NoFreeWorkers, it means, that we can't get worker from the stack during the allocate timeout if errors.Is(errors.NoFreeWorkers, err) { @@ -244,7 +244,7 @@ func (sp *StaticPool) Destroy(ctx context.Context) { } func defaultErrEncoder(sp *StaticPool) ErrorEncoder { - return func(err error, w worker.BaseProcess) (payload.Payload, error) { + return func(err error, w worker.BaseProcess) (*payload.Payload, error) { const op = errors.Op("error_encoder") // just push event if on any stage was timeout error switch { @@ -253,6 +253,7 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder { case errors.Is(errors.SoftJob, err): if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs { + // TODO suspicious logic, redesign err = sp.ww.Allocate() if err != nil { sp.events.Push(events.PoolEvent{Event: events.EventWorkerConstruct, Payload: errors.E(op, err)}) @@ -265,7 +266,7 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder { } } else { sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: err}) - sp.ww.Push(w) + sp.ww.Release(w) } } @@ -273,10 +274,10 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder { sp.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w}) errS := w.Stop() if errS != nil { - return payload.Payload{}, errors.E(op, err, errS) + return nil, errors.E(op, err, errS) } - return payload.Payload{}, errors.E(op, err) + return nil, errors.E(op, err) } } @@ -289,6 +290,7 @@ func (sp *StaticPool) newPoolAllocator(ctx context.Context, timeout time.Duratio return nil, err } + // wrap sync worker sw := worker.From(w) sp.events.Push(events.PoolEvent{ @@ -300,26 +302,33 @@ func (sp *StaticPool) newPoolAllocator(ctx context.Context, timeout time.Duratio } // execDebug used when debug mode was not set and exec_ttl is 0 -func (sp *StaticPool) execDebug(p payload.Payload) (payload.Payload, error) { +func (sp *StaticPool) execDebug(p *payload.Payload) (*payload.Payload, error) { + const op = errors.Op("static_pool_exec_debug") sw, err := sp.allocator() if err != nil { - return payload.Payload{}, err + return nil, err } - // redirect call to the workers exec method (without ttl) + // redirect call to the workers' exec method (without ttl) r, err := sw.Exec(p) - if stopErr := sw.Stop(); stopErr != nil { + if err != nil { + return nil, errors.E(op, err) + } + + err = sw.Stop() + if err != nil { sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: sw, Payload: err}) + return nil, errors.E(op, err) } - return r, err + return r, nil } // execDebugWithTTL used when user set debug mode and exec_ttl -func (sp *StaticPool) execDebugWithTTL(ctx context.Context, p payload.Payload) (payload.Payload, error) { +func (sp *StaticPool) execDebugWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) { sw, err := sp.allocator() if err != nil { - return payload.Payload{}, err + return nil, err } // redirect call to the worker with TTL @@ -333,7 +342,7 @@ func (sp *StaticPool) execDebugWithTTL(ctx context.Context, p payload.Payload) ( // allocate required number of stack func (sp *StaticPool) allocateWorkers(numWorkers uint64) ([]worker.BaseProcess, error) { - const op = errors.Op("allocate workers") + const op = errors.Op("static_pool_allocate_workers") workers := make([]worker.BaseProcess, 0, numWorkers) // constant number of stack simplify logic diff --git a/pkg/pool/static_pool_test.go b/pkg/pool/static_pool_test.go index 6f875072..2ac2093d 100755 --- a/pkg/pool/static_pool_test.go +++ b/pkg/pool/static_pool_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" ) -var cfg = Config{ +var cfg = &Config{ NumWorkers: uint64(runtime.NumCPU()), AllocateTimeout: time.Second * 5, DestroyTimeout: time.Second * 5, @@ -58,7 +58,7 @@ func Test_ConfigNoErrorInitDefaults(t *testing.T) { context.Background(), func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ AllocateTimeout: time.Second, DestroyTimeout: time.Second, }, @@ -82,7 +82,7 @@ func Test_StaticPool_Echo(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -106,7 +106,7 @@ func Test_StaticPool_Echo_NilContext(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(payload.Payload{Body: []byte("hello"), Context: nil}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello"), Context: nil}) assert.NoError(t, err) assert.NotNil(t, res) @@ -130,7 +130,7 @@ func Test_StaticPool_Echo_Context(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(payload.Payload{Body: []byte("hello"), Context: []byte("world")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello"), Context: []byte("world")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -151,11 +151,10 @@ func Test_StaticPool_JobError(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, p) - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) if errors.Is(errors.SoftJob, err) == false { t.Fatal("error should be of type errors.Exec") @@ -192,10 +191,9 @@ func Test_StaticPool_Broken_Replace(t *testing.T) { assert.NotNil(t, p) time.Sleep(time.Second) - res, err := p.execWithTTL(ctx, payload.Payload{Body: []byte("hello")}) + res, err := p.execWithTTL(ctx, &payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Context) - assert.Nil(t, res.Body) + assert.Nil(t, res) <-block @@ -204,7 +202,7 @@ func Test_StaticPool_Broken_Replace(t *testing.T) { func Test_StaticPool_Broken_FromOutside(t *testing.T) { ctx := context.Background() - // Consume pool events + // Run pool events ev := make(chan struct{}, 1) listener := func(event interface{}) { if pe, ok := event.(events.PoolEvent); ok { @@ -214,7 +212,7 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) { } } - var cfg2 = Config{ + var cfg2 = &Config{ NumWorkers: 1, AllocateTimeout: time.Second * 5, DestroyTimeout: time.Second * 5, @@ -232,7 +230,7 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) { assert.NotNil(t, p) - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -264,7 +262,7 @@ func Test_StaticPool_AllocateTimeout(t *testing.T) { context.Background(), func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 1, AllocateTimeout: time.Nanosecond * 1, DestroyTimeout: time.Second * 2, @@ -283,7 +281,7 @@ func Test_StaticPool_Replace_Worker(t *testing.T) { ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 1, MaxJobs: 1, AllocateTimeout: time.Second, @@ -298,11 +296,11 @@ func Test_StaticPool_Replace_Worker(t *testing.T) { var lastPID string lastPID = strconv.Itoa(int(p.Workers()[0].Pid())) - res, _ := p.Exec(payload.Payload{Body: []byte("hello")}) + res, _ := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.Equal(t, lastPID, string(res.Body)) for i := 0; i < 10; i++ { - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -320,7 +318,7 @@ func Test_StaticPool_Debug_Worker(t *testing.T) { ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ Debug: true, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -334,14 +332,14 @@ func Test_StaticPool_Debug_Worker(t *testing.T) { assert.Len(t, p.Workers(), 0) var lastPID string - res, _ := p.Exec(payload.Payload{Body: []byte("hello")}) + res, _ := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NotEqual(t, lastPID, string(res.Body)) assert.Len(t, p.Workers(), 0) for i := 0; i < 10; i++ { assert.Len(t, p.Workers(), 0) - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -360,7 +358,7 @@ func Test_StaticPool_Stop_Worker(t *testing.T) { ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "stop", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 1, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -374,14 +372,14 @@ func Test_StaticPool_Stop_Worker(t *testing.T) { var lastPID string lastPID = strconv.Itoa(int(p.Workers()[0].Pid())) - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Fatal(err) } assert.Equal(t, lastPID, string(res.Body)) for i := 0; i < 10; i++ { - res, err := p.Exec(payload.Payload{Body: []byte("hello")}) + res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -400,7 +398,7 @@ func Test_Static_Pool_Destroy_And_Close(t *testing.T) { ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 1, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -411,7 +409,7 @@ func Test_Static_Pool_Destroy_And_Close(t *testing.T) { assert.NoError(t, err) p.Destroy(ctx) - _, err = p.Exec(payload.Payload{Body: []byte("100")}) + _, err = p.Exec(&payload.Payload{Body: []byte("100")}) assert.Error(t, err) } @@ -422,7 +420,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) { ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 1, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -433,7 +431,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) { assert.NoError(t, err) go func() { - _, errP := p.Exec(payload.Payload{Body: []byte("100")}) + _, errP := p.Exec(&payload.Payload{Body: []byte("100")}) if errP != nil { t.Errorf("error executing payload: error %v", err) } @@ -441,7 +439,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) { time.Sleep(time.Millisecond * 100) p.Destroy(ctx) - _, err = p.Exec(payload.Payload{Body: []byte("100")}) + _, err = p.Exec(&payload.Payload{Body: []byte("100")}) assert.Error(t, err) } @@ -452,7 +450,7 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) { context.Background(), func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 5, AllocateTimeout: time.Second * 100, DestroyTimeout: time.Second, @@ -465,7 +463,7 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) { p.Workers()[i].State().Set(worker.StateErrored) } - _, err = p.Exec(payload.Payload{Body: []byte("hello")}) + _, err = p.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) p.Destroy(ctx) } @@ -476,7 +474,7 @@ func Test_Static_Pool_Slow_Destroy(t *testing.T) { context.Background(), func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 5, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -506,7 +504,7 @@ func Test_StaticPool_NoFreeWorkers(t *testing.T) { // sleep for the 3 seconds func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ Debug: false, NumWorkers: 1, AllocateTimeout: time.Second, @@ -519,14 +517,13 @@ func Test_StaticPool_NoFreeWorkers(t *testing.T) { assert.NotNil(t, p) go func() { - _, _ = p.execWithTTL(ctx, payload.Payload{Body: []byte("hello")}) + _, _ = p.execWithTTL(ctx, &payload.Payload{Body: []byte("hello")}) }() time.Sleep(time.Second) - res, err := p.execWithTTL(ctx, payload.Payload{Body: []byte("hello")}) + res, err := p.execWithTTL(ctx, &payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Context) - assert.Nil(t, res.Body) + assert.Nil(t, res) <-block @@ -539,7 +536,7 @@ func Test_Static_Pool_WrongCommand1(t *testing.T) { context.Background(), func() *exec.Cmd { return exec.Command("phg", "../../tests/slow-destroy.php", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 5, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -556,7 +553,7 @@ func Test_Static_Pool_WrongCommand2(t *testing.T) { context.Background(), func() *exec.Cmd { return exec.Command("php", "", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 5, AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -567,6 +564,24 @@ func Test_Static_Pool_WrongCommand2(t *testing.T) { assert.Nil(t, p) } +/* PTR: +Benchmark_Pool_Echo-32 49076 29926 ns/op 8016 B/op 20 allocs/op +Benchmark_Pool_Echo-32 47257 30779 ns/op 8047 B/op 20 allocs/op +Benchmark_Pool_Echo-32 46737 29440 ns/op 8065 B/op 20 allocs/op +Benchmark_Pool_Echo-32 51177 29074 ns/op 7981 B/op 20 allocs/op +Benchmark_Pool_Echo-32 51764 28319 ns/op 8012 B/op 20 allocs/op +Benchmark_Pool_Echo-32 54054 30714 ns/op 7987 B/op 20 allocs/op +Benchmark_Pool_Echo-32 54391 30689 ns/op 8055 B/op 20 allocs/op + +VAL: +Benchmark_Pool_Echo-32 47936 28679 ns/op 7942 B/op 19 allocs/op +Benchmark_Pool_Echo-32 49010 29830 ns/op 7970 B/op 19 allocs/op +Benchmark_Pool_Echo-32 46771 29031 ns/op 8014 B/op 19 allocs/op +Benchmark_Pool_Echo-32 47760 30517 ns/op 7955 B/op 19 allocs/op +Benchmark_Pool_Echo-32 48148 29816 ns/op 7950 B/op 19 allocs/op +Benchmark_Pool_Echo-32 52705 29809 ns/op 7979 B/op 19 allocs/op +Benchmark_Pool_Echo-32 54374 27776 ns/op 7947 B/op 19 allocs/op +*/ func Benchmark_Pool_Echo(b *testing.B) { ctx := context.Background() p, err := Initialize( @@ -579,23 +594,33 @@ func Benchmark_Pool_Echo(b *testing.B) { b.Fatal(err) } + bd := make([]byte, 1024) + c := make([]byte, 1024) + + pld := &payload.Payload{ + Context: c, + Body: bd, + } + b.ResetTimer() b.ReportAllocs() for n := 0; n < b.N; n++ { - if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := p.Exec(pld); err != nil { b.Fail() } } } // Benchmark_Pool_Echo_Batched-32 366996 2873 ns/op 1233 B/op 24 allocs/op +// PTR -> Benchmark_Pool_Echo_Batched-32 406839 2900 ns/op 1059 B/op 23 allocs/op +// PTR -> Benchmark_Pool_Echo_Batched-32 413312 2904 ns/op 1067 B/op 23 allocs/op func Benchmark_Pool_Echo_Batched(b *testing.B) { ctx := context.Background() p, err := Initialize( ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: uint64(runtime.NumCPU()), AllocateTimeout: time.Second * 100, DestroyTimeout: time.Second, @@ -604,12 +629,23 @@ func Benchmark_Pool_Echo_Batched(b *testing.B) { assert.NoError(b, err) defer p.Destroy(ctx) + bd := make([]byte, 1024) + c := make([]byte, 1024) + + pld := &payload.Payload{ + Context: c, + Body: bd, + } + + b.ResetTimer() + b.ReportAllocs() + var wg sync.WaitGroup for i := 0; i < b.N; i++ { wg.Add(1) go func() { defer wg.Done() - if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := p.Exec(pld); err != nil { b.Fail() log.Println(err) } @@ -626,7 +662,7 @@ func Benchmark_Pool_Echo_Replaced(b *testing.B) { ctx, func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, pipe.NewPipeFactory(), - Config{ + &Config{ NumWorkers: 1, MaxJobs: 1, AllocateTimeout: time.Second, @@ -639,7 +675,7 @@ func Benchmark_Pool_Echo_Replaced(b *testing.B) { b.ReportAllocs() for n := 0; n < b.N; n++ { - if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := p.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() log.Println(err) } diff --git a/pkg/pool/supervisor_pool.go b/pkg/pool/supervisor_pool.go index 4b990dbe..bdaeade1 100755 --- a/pkg/pool/supervisor_pool.go +++ b/pkg/pool/supervisor_pool.go @@ -43,11 +43,11 @@ func supervisorWrapper(pool Pool, events events.Handler, cfg *SupervisorConfig) return sp } -func (sp *supervised) execWithTTL(_ context.Context, _ payload.Payload) (payload.Payload, error) { +func (sp *supervised) execWithTTL(_ context.Context, _ *payload.Payload) (*payload.Payload, error) { panic("used to satisfy pool interface") } -func (sp *supervised) Exec(rqs payload.Payload) (payload.Payload, error) { +func (sp *supervised) Exec(rqs *payload.Payload) (*payload.Payload, error) { const op = errors.Op("supervised_exec_with_context") if sp.cfg.ExecTTL == 0 { return sp.pool.Exec(rqs) @@ -58,7 +58,7 @@ func (sp *supervised) Exec(rqs payload.Payload) (payload.Payload, error) { res, err := sp.pool.execWithTTL(ctx, rqs) if err != nil { - return payload.Payload{}, errors.E(op, err) + return nil, errors.E(op, err) } return res, nil @@ -136,7 +136,7 @@ func (sp *supervised) control() { //nolint:gocognit /* worker at this point might be in the middle of request execution: - ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Push + ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Release ^ TTL Reached, state - invalid | -----> Worker Stopped here @@ -156,7 +156,7 @@ func (sp *supervised) control() { //nolint:gocognit /* worker at this point might be in the middle of request execution: - ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Push + ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Release ^ TTL Reached, state - invalid | -----> Worker Stopped here @@ -211,7 +211,7 @@ func (sp *supervised) control() { //nolint:gocognit /* worker at this point might be in the middle of request execution: - ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Push + ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Release ^ TTL Reached, state - invalid | -----> Worker Stopped here @@ -221,7 +221,7 @@ func (sp *supervised) control() { //nolint:gocognit workers[i].State().Set(worker.StateInvalid) _ = workers[i].Stop() } - // just to double check + // just to double-check workers[i].State().Set(worker.StateInvalid) sp.events.Push(events.PoolEvent{Event: events.EventIdleTTL, Payload: workers[i]}) } diff --git a/pkg/pool/supervisor_test.go b/pkg/pool/supervisor_test.go index 1cd301ba..0702a71f 100644 --- a/pkg/pool/supervisor_test.go +++ b/pkg/pool/supervisor_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -var cfgSupervised = Config{ +var cfgSupervised = &Config{ NumWorkers: uint64(1), AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -43,7 +43,7 @@ func TestSupervisedPool_Exec(t *testing.T) { for i := 0; i < 100; i++ { time.Sleep(time.Millisecond * 100) - _, err = p.Exec(payload.Payload{ + _, err = p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -73,7 +73,7 @@ func TestSupervisedPool_ExecWithDebugMode(t *testing.T) { for i := 0; i < 100; i++ { time.Sleep(time.Millisecond * 100) - _, err = p.Exec(payload.Payload{ + _, err = p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -84,7 +84,7 @@ func TestSupervisedPool_ExecWithDebugMode(t *testing.T) { } func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) { - var cfgExecTTL = Config{ + var cfgExecTTL = &Config{ NumWorkers: uint64(1), AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -110,14 +110,13 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) { pid := p.Workers()[0].Pid() - resp, err := p.Exec(payload.Payload{ + resp, err := p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) assert.Error(t, err) - assert.Empty(t, resp.Body) - assert.Empty(t, resp.Context) + assert.Empty(t, resp) time.Sleep(time.Second * 1) // should be new worker with new pid @@ -125,7 +124,7 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) { } func TestSupervisedPool_ExecTTL_WorkerRestarted(t *testing.T) { - var cfgExecTTL = Config{ + var cfgExecTTL = &Config{ NumWorkers: uint64(1), Supervisor: &SupervisorConfig{ WatchTick: 1 * time.Second, @@ -145,7 +144,7 @@ func TestSupervisedPool_ExecTTL_WorkerRestarted(t *testing.T) { pid := p.Workers()[0].Pid() - resp, err := p.Exec(payload.Payload{ + resp, err := p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -159,7 +158,7 @@ func TestSupervisedPool_ExecTTL_WorkerRestarted(t *testing.T) { require.Equal(t, p.Workers()[0].State().Value(), worker.StateReady) pid = p.Workers()[0].Pid() - resp, err = p.Exec(payload.Payload{ + resp, err = p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -177,7 +176,7 @@ func TestSupervisedPool_ExecTTL_WorkerRestarted(t *testing.T) { } func TestSupervisedPool_Idle(t *testing.T) { - var cfgExecTTL = Config{ + var cfgExecTTL = &Config{ NumWorkers: uint64(1), AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -202,7 +201,7 @@ func TestSupervisedPool_Idle(t *testing.T) { pid := p.Workers()[0].Pid() - resp, err := p.Exec(payload.Payload{ + resp, err := p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -214,7 +213,7 @@ func TestSupervisedPool_Idle(t *testing.T) { time.Sleep(time.Second * 5) // worker should be marked as invalid and reallocated - _, err = p.Exec(payload.Payload{ + _, err = p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -225,7 +224,7 @@ func TestSupervisedPool_Idle(t *testing.T) { } func TestSupervisedPool_IdleTTL_StateAfterTimeout(t *testing.T) { - var cfgExecTTL = Config{ + var cfgExecTTL = &Config{ NumWorkers: uint64(1), AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -251,7 +250,7 @@ func TestSupervisedPool_IdleTTL_StateAfterTimeout(t *testing.T) { pid := p.Workers()[0].Pid() time.Sleep(time.Millisecond * 100) - resp, err := p.Exec(payload.Payload{ + resp, err := p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -267,7 +266,7 @@ func TestSupervisedPool_IdleTTL_StateAfterTimeout(t *testing.T) { } func TestSupervisedPool_ExecTTL_OK(t *testing.T) { - var cfgExecTTL = Config{ + var cfgExecTTL = &Config{ NumWorkers: uint64(1), AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -294,7 +293,7 @@ func TestSupervisedPool_ExecTTL_OK(t *testing.T) { pid := p.Workers()[0].Pid() time.Sleep(time.Millisecond * 100) - resp, err := p.Exec(payload.Payload{ + resp, err := p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) @@ -309,7 +308,7 @@ func TestSupervisedPool_ExecTTL_OK(t *testing.T) { } func TestSupervisedPool_MaxMemoryReached(t *testing.T) { - var cfgExecTTL = Config{ + var cfgExecTTL = &Config{ NumWorkers: uint64(1), AllocateTimeout: time.Second, DestroyTimeout: time.Second, @@ -346,7 +345,7 @@ func TestSupervisedPool_MaxMemoryReached(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, p) - resp, err := p.Exec(payload.Payload{ + resp, err := p.Exec(&payload.Payload{ Context: []byte(""), Body: []byte("foo"), }) diff --git a/pkg/priority_queue/binary_heap.go b/pkg/priority_queue/binary_heap.go new file mode 100644 index 00000000..fc043927 --- /dev/null +++ b/pkg/priority_queue/binary_heap.go @@ -0,0 +1,125 @@ +/* +binary heap (min-heap) algorithm used as a core for the priority queue +*/ + +package priorityqueue + +import ( + "sync" + "sync/atomic" +) + +type BinHeap struct { + items []Item + // find a way to use pointer to the raw data + len uint64 + maxLen uint64 + cond sync.Cond +} + +func NewBinHeap(maxLen uint64) *BinHeap { + return &BinHeap{ + items: make([]Item, 0, 1000), + len: 0, + maxLen: maxLen, + cond: sync.Cond{L: &sync.Mutex{}}, + } +} + +func (bh *BinHeap) fixUp() { + k := bh.len - 1 + p := (k - 1) >> 1 // k-1 / 2 + + for k > 0 { + cur, par := (bh.items)[k], (bh.items)[p] + + if cur.Priority() < par.Priority() { + bh.swap(k, p) + k = p + p = (k - 1) >> 1 + } else { + return + } + } +} + +func (bh *BinHeap) swap(i, j uint64) { + (bh.items)[i], (bh.items)[j] = (bh.items)[j], (bh.items)[i] +} + +func (bh *BinHeap) fixDown(curr, end int) { + cOneIdx := (curr << 1) + 1 + for cOneIdx <= end { + cTwoIdx := -1 + if (curr<<1)+2 <= end { + cTwoIdx = (curr << 1) + 2 + } + + idxToSwap := cOneIdx + if cTwoIdx > -1 && (bh.items)[cTwoIdx].Priority() < (bh.items)[cOneIdx].Priority() { + idxToSwap = cTwoIdx + } + if (bh.items)[idxToSwap].Priority() < (bh.items)[curr].Priority() { + bh.swap(uint64(curr), uint64(idxToSwap)) + curr = idxToSwap + cOneIdx = (curr << 1) + 1 + } else { + return + } + } +} + +func (bh *BinHeap) Len() uint64 { + return atomic.LoadUint64(&bh.len) +} + +func (bh *BinHeap) Insert(item Item) { + bh.cond.L.Lock() + + // check the binary heap len before insertion + if bh.Len() > bh.maxLen { + // unlock the mutex to proceed to get-max + bh.cond.L.Unlock() + + // signal waiting goroutines + for bh.Len() > 0 { + // signal waiting goroutines + bh.cond.Signal() + } + // lock mutex to proceed inserting into the empty slice + bh.cond.L.Lock() + } + + bh.items = append(bh.items, item) + + // add len to the slice + atomic.AddUint64(&bh.len, 1) + + // fix binary heap up + bh.fixUp() + bh.cond.L.Unlock() + + // signal the goroutine on wait + bh.cond.Signal() +} + +func (bh *BinHeap) ExtractMin() Item { + bh.cond.L.Lock() + + // if len == 0, wait for the signal + for bh.Len() == 0 { + bh.cond.Wait() + } + + bh.swap(0, bh.len-1) + + item := (bh.items)[int(bh.len)-1] + bh.items = (bh).items[0 : int(bh.len)-1] + bh.fixDown(0, int(bh.len-2)) + + // reduce len + atomic.AddUint64(&bh.len, ^uint64(0)) + + bh.cond.L.Unlock() + return item +} diff --git a/pkg/priority_queue/binary_heap_test.go b/pkg/priority_queue/binary_heap_test.go new file mode 100644 index 00000000..fb5b83de --- /dev/null +++ b/pkg/priority_queue/binary_heap_test.go @@ -0,0 +1,128 @@ +package priorityqueue + +import ( + "fmt" + "math/rand" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type Test int + +func (t Test) Ack() error { + return nil +} + +func (t Test) Nack() error { + return nil +} + +func (t Test) Requeue(_ map[string][]string, _ int64) error { + return nil +} + +func (t Test) Body() []byte { + return nil +} + +func (t Test) Context() ([]byte, error) { + return nil, nil +} + +func (t Test) ID() string { + return "none" +} + +func (t Test) Priority() int64 { + return int64(t) +} + +func TestBinHeap_Init(t *testing.T) { + a := []Item{Test(2), Test(23), Test(33), Test(44), Test(1), Test(2), Test(2), Test(2), Test(4), Test(6), Test(99)} + + bh := NewBinHeap(12) + + for i := 0; i < len(a); i++ { + bh.Insert(a[i]) + } + + expected := []Item{Test(1), Test(2), Test(2), Test(2), Test(2), Test(4), Test(6), Test(23), Test(33), Test(44), Test(99)} + + res := make([]Item, 0, 12) + + for i := 0; i < 11; i++ { + item := bh.ExtractMin() + res = append(res, item) + } + + require.Equal(t, expected, res) +} + +func TestNewPriorityQueue(t *testing.T) { + insertsPerSec := uint64(0) + getPerSec := uint64(0) + stopCh := make(chan struct{}, 1) + pq := NewBinHeap(1000) + + go func() { + tt3 := time.NewTicker(time.Millisecond * 10) + for { + select { + case <-tt3.C: + require.Less(t, pq.Len(), uint64(1002)) + case <-stopCh: + return + } + } + }() + + go func() { + tt := time.NewTicker(time.Second) + + for { + select { + case <-tt.C: + fmt.Println(fmt.Sprintf("Insert per second: %d", atomic.LoadUint64(&insertsPerSec))) + atomic.StoreUint64(&insertsPerSec, 0) + fmt.Println(fmt.Sprintf("ExtractMin per second: %d", atomic.LoadUint64(&getPerSec))) + atomic.StoreUint64(&getPerSec, 0) + case <-stopCh: + tt.Stop() + return + } + } + }() + + go func() { + for { + select { + case <-stopCh: + return + default: + pq.ExtractMin() + atomic.AddUint64(&getPerSec, 1) + } + } + }() + + go func() { + for { + select { + case <-stopCh: + return + default: + pq.Insert(Test(rand.Int())) //nolint:gosec + atomic.AddUint64(&insertsPerSec, 1) + } + } + }() + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + stopCh <- struct{}{} + stopCh <- struct{}{} + stopCh <- struct{}{} +} diff --git a/pkg/priority_queue/interface.go b/pkg/priority_queue/interface.go new file mode 100644 index 00000000..9efa4652 --- /dev/null +++ b/pkg/priority_queue/interface.go @@ -0,0 +1,31 @@ +package priorityqueue + +type Queue interface { + Insert(item Item) + ExtractMin() Item + Len() uint64 +} + +// Item represents binary heap item +type Item interface { + // ID is a unique item identifier + ID() string + + // Priority returns the Item's priority to sort + Priority() int64 + + // Body is the Item payload + Body() []byte + + // Context is the Item meta information + Context() ([]byte, error) + + // Ack - acknowledge the Item after processing + Ack() error + + // Nack - discard the Item + Nack() error + + // Requeue - put the message back to the queue with the optional delay + Requeue(headers map[string][]string, delay int64) error +} diff --git a/pkg/process/state.go b/pkg/process/state.go index 652ec77c..bfc3a287 100644 --- a/pkg/process/state.go +++ b/pkg/process/state.go @@ -32,20 +32,20 @@ type State struct { } // WorkerProcessState creates new worker state definition. -func WorkerProcessState(w worker.BaseProcess) (State, error) { +func WorkerProcessState(w worker.BaseProcess) (*State, error) { const op = errors.Op("worker_process_state") p, _ := process.NewProcess(int32(w.Pid())) i, err := p.MemoryInfo() if err != nil { - return State{}, errors.E(op, err) + return nil, errors.E(op, err) } percent, err := p.CPUPercent() if err != nil { - return State{}, err + return nil, err } - return State{ + return &State{ CPUPercent: percent, Pid: int(w.Pid()), Status: w.State().String(), diff --git a/pkg/transport/interface.go b/pkg/transport/interface.go index 7e3e5350..1b072378 100644 --- a/pkg/transport/interface.go +++ b/pkg/transport/interface.go @@ -8,7 +8,7 @@ import ( "github.com/spiral/roadrunner/v2/pkg/worker" ) -// Factory is responsible of wrapping given command into tasks WorkerProcess. +// Factory is responsible for wrapping given command into tasks WorkerProcess. type Factory interface { // SpawnWorkerWithTimeout creates new WorkerProcess process based on given command with context. // Process must not be started. diff --git a/pkg/transport/pipe/pipe_factory.go b/pkg/transport/pipe/pipe_factory.go index 19f4f92d..9433a510 100755 --- a/pkg/transport/pipe/pipe_factory.go +++ b/pkg/transport/pipe/pipe_factory.go @@ -22,42 +22,54 @@ func NewPipeFactory() *Factory { return &Factory{} } -type SpawnResult struct { +type sr struct { w *worker.Process err error } // SpawnWorkerWithTimeout creates new Process and connects it to goridge relay, // method Wait() must be handled on level above. -func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, listeners ...events.Listener) (*worker.Process, error) { - c := make(chan SpawnResult) +func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, listeners ...events.Listener) (*worker.Process, error) { //nolint:gocognit + spCh := make(chan sr) const op = errors.Op("factory_spawn_worker_with_timeout") go func() { w, err := worker.InitBaseWorker(cmd, worker.AddListeners(listeners...)) if err != nil { - c <- SpawnResult{ + select { + case spCh <- sr{ w: nil, err: errors.E(op, err), + }: + return + default: + return } - return } in, err := cmd.StdoutPipe() if err != nil { - c <- SpawnResult{ + select { + case spCh <- sr{ w: nil, err: errors.E(op, err), + }: + return + default: + return } - return } out, err := cmd.StdinPipe() if err != nil { - c <- SpawnResult{ + select { + case spCh <- sr{ w: nil, err: errors.E(op, err), + }: + return + default: + return } - return } // Init new PIPE relay @@ -67,42 +79,69 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, lis // Start the worker err = w.Start() if err != nil { - c <- SpawnResult{ + select { + case spCh <- sr{ w: nil, err: errors.E(op, err), + }: + return + default: + return } - return } - // errors bundle pid, err := internal.FetchPID(relay) - if pid != w.Pid() || err != nil { + if err != nil { err = multierr.Combine( err, w.Kill(), w.Wait(), ) - c <- SpawnResult{ + select { + case spCh <- sr{ w: nil, err: errors.E(op, err), + }: + return + default: + _ = w.Kill() + return } - return } - // everything ok, set ready state - w.State().Set(worker.StateReady) + if pid != w.Pid() { + select { + case spCh <- sr{ + w: nil, + err: errors.E(op, errors.Errorf("pid mismatches, get: %d, want: %d", pid, w.Pid())), + }: + return + default: + _ = w.Kill() + return + } + } + select { + case // return worker - c <- SpawnResult{ + spCh <- sr{ w: w, err: nil, + }: + // everything ok, set ready state + w.State().Set(worker.StateReady) + return + default: + _ = w.Kill() + return } }() select { case <-ctx.Done(): return nil, ctx.Err() - case res := <-c: + case res := <-spCh: if res.err != nil { return nil, res.err } diff --git a/pkg/transport/pipe/pipe_factory_spawn_test.go b/pkg/transport/pipe/pipe_factory_spawn_test.go index 51befb1e..f5e9669b 100644 --- a/pkg/transport/pipe/pipe_factory_spawn_test.go +++ b/pkg/transport/pipe/pipe_factory_spawn_test.go @@ -144,7 +144,7 @@ func Test_Pipe_Echo2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -168,11 +168,10 @@ func Test_Pipe_Broken2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) } func Benchmark_Pipe_SpawnWorker_Stop2(b *testing.B) { @@ -215,7 +214,7 @@ func Benchmark_Pipe_Worker_ExecEcho2(b *testing.B) { }() for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -238,7 +237,7 @@ func Benchmark_Pipe_Worker_ExecEcho4(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -261,7 +260,7 @@ func Benchmark_Pipe_Worker_ExecEchoWithoutContext2(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -287,7 +286,7 @@ func Test_Echo2(t *testing.T) { } }() - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Nil(t, err) assert.NotNil(t, res) @@ -314,11 +313,10 @@ func Test_BadPayload2(t *testing.T) { } }() - res, err := sw.Exec(payload.Payload{}) + res, err := sw.Exec(&payload.Payload{}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) assert.Contains(t, err.Error(), "payload can not be empty") } @@ -358,7 +356,7 @@ func Test_Echo_Slow2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Nil(t, err) assert.NotNil(t, res) @@ -387,10 +385,9 @@ func Test_Broken2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NotNil(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) time.Sleep(time.Second * 3) mu.Lock() @@ -418,10 +415,9 @@ func Test_Error2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NotNil(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) if errors.Is(errors.SoftJob, err) == false { t.Fatal("error should be of type errors.ErrSoftJob") @@ -445,19 +441,19 @@ func Test_NumExecs2(t *testing.T) { sw := worker.From(w) - _, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } assert.Equal(t, uint64(1), w.State().NumExecs()) - _, err = sw.Exec(payload.Payload{Body: []byte("hello")}) + _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } assert.Equal(t, uint64(2), w.State().NumExecs()) - _, err = sw.Exec(payload.Payload{Body: []byte("hello")}) + _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } diff --git a/pkg/transport/pipe/pipe_factory_test.go b/pkg/transport/pipe/pipe_factory_test.go index 3ef65be8..d243a93f 100755 --- a/pkg/transport/pipe/pipe_factory_test.go +++ b/pkg/transport/pipe/pipe_factory_test.go @@ -102,6 +102,7 @@ func Test_Pipe_PipeError(t *testing.T) { func Test_Pipe_PipeError2(t *testing.T) { cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") + // error cause _, err := cmd.StdinPipe() if err != nil { t.Errorf("error creating the STDIN pipe: error %v", err) @@ -159,7 +160,7 @@ func Test_Pipe_Echo(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -184,11 +185,10 @@ func Test_Pipe_Broken(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) } func Benchmark_Pipe_SpawnWorker_Stop(b *testing.B) { @@ -231,7 +231,7 @@ func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) { }() for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -255,7 +255,7 @@ func Benchmark_Pipe_Worker_ExecEcho3(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -279,7 +279,7 @@ func Benchmark_Pipe_Worker_ExecEchoWithoutContext(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -305,7 +305,7 @@ func Test_Echo(t *testing.T) { } }() - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Nil(t, err) assert.NotNil(t, res) @@ -333,11 +333,10 @@ func Test_BadPayload(t *testing.T) { } }() - res, err := sw.Exec(payload.Payload{}) + res, err := sw.Exec(&payload.Payload{}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) assert.Contains(t, err.Error(), "payload can not be empty") } @@ -379,7 +378,7 @@ func Test_Echo_Slow(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Nil(t, err) assert.NotNil(t, res) @@ -409,10 +408,9 @@ func Test_Broken(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NotNil(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) time.Sleep(time.Second * 3) mu.Lock() @@ -441,10 +439,9 @@ func Test_Error(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NotNil(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) if errors.Is(errors.SoftJob, err) == false { t.Fatal("error should be of type errors.ErrSoftJob") @@ -469,19 +466,19 @@ func Test_NumExecs(t *testing.T) { sw := worker.From(w) - _, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } assert.Equal(t, uint64(1), w.State().NumExecs()) - _, err = sw.Exec(payload.Payload{Body: []byte("hello")}) + _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } assert.Equal(t, uint64(2), w.State().NumExecs()) - _, err = sw.Exec(payload.Payload{Body: []byte("hello")}) + _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) if err != nil { t.Errorf("fail to execute payload: error %v", err) } diff --git a/pkg/transport/socket/socket_factory.go b/pkg/transport/socket/socket_factory.go index 965a0f30..dc2b75cf 100755 --- a/pkg/transport/socket/socket_factory.go +++ b/pkg/transport/socket/socket_factory.go @@ -2,6 +2,7 @@ package socket import ( "context" + "fmt" "net" "os/exec" "sync" @@ -29,8 +30,6 @@ type Factory struct { // sockets which are waiting for process association relays sync.Map - - ErrCh chan error } // NewSocketServer returns Factory attached to a given socket listener. @@ -40,14 +39,17 @@ func NewSocketServer(ls net.Listener, tout time.Duration) *Factory { ls: ls, tout: tout, relays: sync.Map{}, - ErrCh: make(chan error, 10), } // Be careful // https://github.com/go101/go101/wiki/About-memory-ordering-guarantees-made-by-atomic-operations-in-Go // https://github.com/golang/go/issues/5045 go func() { - f.ErrCh <- f.listen() + err := f.listen() + // there is no logger here, use fmt + if err != nil { + fmt.Printf("[WARN]: socket server listen, error: %v\n", err) + } }() return f @@ -90,20 +92,28 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, lis defer cancel() w, err := worker.InitBaseWorker(cmd, worker.AddListeners(listeners...)) if err != nil { - c <- socketSpawn{ + select { + case c <- socketSpawn{ w: nil, - err: err, + err: errors.E(op, err), + }: + return + default: + return } - return } err = w.Start() if err != nil { - c <- socketSpawn{ + select { + case c <- socketSpawn{ w: nil, err: errors.E(op, err), + }: + return + default: + return } - return } rl, err := f.findRelayWithContext(ctxT, w) @@ -114,19 +124,31 @@ func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, lis w.Wait(), ) - c <- socketSpawn{ + select { + // try to write result + case c <- socketSpawn{ w: nil, err: errors.E(op, err), + }: + return + // if no receivers - return + default: + return } - return } w.AttachRelay(rl) w.State().Set(worker.StateReady) - c <- socketSpawn{ + select { + case c <- socketSpawn{ w: w, err: nil, + }: + return + default: + _ = w.Kill() + return } }() @@ -165,6 +187,17 @@ func (f *Factory) SpawnWorker(cmd *exec.Cmd, listeners ...events.Listener) (*wor } w.AttachRelay(rl) + + // errors bundle + if pid, err := internal.FetchPID(rl); pid != w.Pid() { + err = multierr.Combine( + err, + w.Kill(), + w.Wait(), + ) + return nil, errors.E(op, err) + } + w.State().Set(worker.StateReady) return w, nil diff --git a/pkg/transport/socket/socket_factory_spawn_test.go b/pkg/transport/socket/socket_factory_spawn_test.go index b875e2c8..905a3b6b 100644 --- a/pkg/transport/socket/socket_factory_spawn_test.go +++ b/pkg/transport/socket/socket_factory_spawn_test.go @@ -16,7 +16,7 @@ import ( ) func Test_Tcp_Start2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { errC := ls.Close() @@ -45,7 +45,7 @@ func Test_Tcp_Start2(t *testing.T) { } func Test_Tcp_StartCloseFactory2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { } else { t.Skip("socket is busy") @@ -72,7 +72,7 @@ func Test_Tcp_StartCloseFactory2(t *testing.T) { } func Test_Tcp_StartError2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { errC := ls.Close() @@ -96,7 +96,7 @@ func Test_Tcp_StartError2(t *testing.T) { } func Test_Tcp_Failboot2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err3 := ls.Close() @@ -128,7 +128,7 @@ func Test_Tcp_Failboot2(t *testing.T) { } func Test_Tcp_Invalid2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { errC := ls.Close() @@ -148,7 +148,7 @@ func Test_Tcp_Invalid2(t *testing.T) { } func Test_Tcp_Broken2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { errC := ls.Close() @@ -194,16 +194,15 @@ func Test_Tcp_Broken2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) wg.Wait() <-finish } func Test_Tcp_Echo2(t *testing.T) { - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { errC := ls.Close() @@ -230,7 +229,7 @@ func Test_Tcp_Echo2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -363,11 +362,10 @@ func Test_Unix_Broken2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Context) - assert.Nil(t, res.Body) + assert.Nil(t, res) wg.Wait() <-finish } @@ -398,7 +396,7 @@ func Test_Unix_Echo2(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -459,7 +457,7 @@ func Benchmark_Tcp_Worker_ExecEcho2(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -528,7 +526,7 @@ func Benchmark_Unix_Worker_ExecEcho2(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } diff --git a/pkg/transport/socket/socket_factory_test.go b/pkg/transport/socket/socket_factory_test.go index 34fe088b..f9bb2178 100755 --- a/pkg/transport/socket/socket_factory_test.go +++ b/pkg/transport/socket/socket_factory_test.go @@ -19,7 +19,7 @@ func Test_Tcp_Start(t *testing.T) { ctx := context.Background() time.Sleep(time.Millisecond * 10) // to ensure free socket - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err = ls.Close() @@ -50,7 +50,7 @@ func Test_Tcp_Start(t *testing.T) { func Test_Tcp_StartCloseFactory(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { } else { t.Skip("socket is busy") @@ -79,7 +79,7 @@ func Test_Tcp_StartCloseFactory(t *testing.T) { func Test_Tcp_StartError(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err = ls.Close() @@ -106,7 +106,7 @@ func Test_Tcp_Failboot(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err3 := ls.Close() @@ -140,7 +140,7 @@ func Test_Tcp_Failboot(t *testing.T) { func Test_Tcp_Timeout(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err = ls.Close() @@ -163,7 +163,7 @@ func Test_Tcp_Timeout(t *testing.T) { func Test_Tcp_Invalid(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err = ls.Close() @@ -185,7 +185,7 @@ func Test_Tcp_Invalid(t *testing.T) { func Test_Tcp_Broken(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { errC := ls.Close() @@ -231,10 +231,9 @@ func Test_Tcp_Broken(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) wg.Wait() <-finish } @@ -242,7 +241,7 @@ func Test_Tcp_Broken(t *testing.T) { func Test_Tcp_Echo(t *testing.T) { time.Sleep(time.Millisecond * 10) // to ensure free socket ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if assert.NoError(t, err) { defer func() { err = ls.Close() @@ -269,7 +268,7 @@ func Test_Tcp_Echo(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -434,11 +433,10 @@ func Test_Unix_Broken(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Context) - assert.Nil(t, res.Body) + assert.Nil(t, res) <-block wg.Wait() } @@ -475,7 +473,7 @@ func Test_Unix_Echo(t *testing.T) { sw := worker.From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.NoError(t, err) assert.NotNil(t, res) @@ -487,7 +485,7 @@ func Test_Unix_Echo(t *testing.T) { func Benchmark_Tcp_SpawnWorker_Stop(b *testing.B) { ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if err == nil { defer func() { err = ls.Close() @@ -520,7 +518,7 @@ func Benchmark_Tcp_SpawnWorker_Stop(b *testing.B) { func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) { ctx := context.Background() - ls, err := net.Listen("tcp", "localhost:9007") + ls, err := net.Listen("tcp", "127.0.0.1:9007") if err == nil { defer func() { err = ls.Close() @@ -548,7 +546,7 @@ func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } @@ -613,7 +611,7 @@ func Benchmark_Unix_Worker_ExecEcho(b *testing.B) { sw := worker.From(w) for n := 0; n < b.N; n++ { - if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil { + if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { b.Fail() } } diff --git a/pkg/worker/interface.go b/pkg/worker/interface.go index d2cfe2cd..ed8704bb 100644 --- a/pkg/worker/interface.go +++ b/pkg/worker/interface.go @@ -68,7 +68,7 @@ type SyncWorker interface { // BaseProcess provides basic functionality for the SyncWorker BaseProcess // Exec used to execute payload on the SyncWorker, there is no TIMEOUTS - Exec(rqs payload.Payload) (payload.Payload, error) + Exec(rqs *payload.Payload) (*payload.Payload, error) // ExecWithTTL used to handle Exec with TTL - ExecWithTTL(ctx context.Context, p payload.Payload) (payload.Payload, error) + ExecWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) } diff --git a/pkg/worker/sync_worker.go b/pkg/worker/sync_worker.go index 02f11d0b..74e29b71 100755 --- a/pkg/worker/sync_worker.go +++ b/pkg/worker/sync_worker.go @@ -23,7 +23,7 @@ type SyncWorkerImpl struct { } // From creates SyncWorker from BaseProcess -func From(process *Process) SyncWorker { +func From(process *Process) *SyncWorkerImpl { return &SyncWorkerImpl{ process: process, fPool: sync.Pool{New: func() interface{} { @@ -36,14 +36,14 @@ func From(process *Process) SyncWorker { } // Exec payload without TTL timeout. -func (tw *SyncWorkerImpl) Exec(p payload.Payload) (payload.Payload, error) { +func (tw *SyncWorkerImpl) Exec(p *payload.Payload) (*payload.Payload, error) { const op = errors.Op("sync_worker_exec") if len(p.Body) == 0 && len(p.Context) == 0 { - return payload.Payload{}, errors.E(op, errors.Str("payload can not be empty")) + return nil, errors.E(op, errors.Str("payload can not be empty")) } if tw.process.State().Value() != StateReady { - return payload.Payload{}, errors.E(op, errors.Errorf("Process is not ready (%s)", tw.process.State().String())) + return nil, errors.E(op, errors.Errorf("Process is not ready (%s)", tw.process.State().String())) } // set last used time @@ -57,7 +57,7 @@ func (tw *SyncWorkerImpl) Exec(p payload.Payload) (payload.Payload, error) { tw.process.State().Set(StateErrored) tw.process.State().RegisterExec() } - return payload.Payload{}, errors.E(op, err) + return nil, errors.E(op, err) } // supervisor may set state of the worker during the work @@ -74,28 +74,26 @@ func (tw *SyncWorkerImpl) Exec(p payload.Payload) (payload.Payload, error) { } type wexec struct { - payload payload.Payload + payload *payload.Payload err error } // ExecWithTTL executes payload without TTL timeout. -func (tw *SyncWorkerImpl) ExecWithTTL(ctx context.Context, p payload.Payload) (payload.Payload, error) { +func (tw *SyncWorkerImpl) ExecWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) { const op = errors.Op("sync_worker_exec_worker_with_timeout") c := make(chan wexec, 1) go func() { if len(p.Body) == 0 && len(p.Context) == 0 { c <- wexec{ - payload: payload.Payload{}, - err: errors.E(op, errors.Str("payload can not be empty")), + err: errors.E(op, errors.Str("payload can not be empty")), } return } if tw.process.State().Value() != StateReady { c <- wexec{ - payload: payload.Payload{}, - err: errors.E(op, errors.Errorf("Process is not ready (%s)", tw.process.State().String())), + err: errors.E(op, errors.Errorf("Process is not ready (%s)", tw.process.State().String())), } return } @@ -112,8 +110,7 @@ func (tw *SyncWorkerImpl) ExecWithTTL(ctx context.Context, p payload.Payload) (p tw.process.State().RegisterExec() } c <- wexec{ - payload: payload.Payload{}, - err: errors.E(op, err), + err: errors.E(op, err), } return } @@ -143,18 +140,18 @@ func (tw *SyncWorkerImpl) ExecWithTTL(ctx context.Context, p payload.Payload) (p if err != nil { // append timeout error err = multierr.Append(err, errors.E(op, errors.ExecTTL)) - return payload.Payload{}, multierr.Append(err, ctx.Err()) + return nil, multierr.Append(err, ctx.Err()) } - return payload.Payload{}, errors.E(op, errors.ExecTTL, ctx.Err()) + return nil, errors.E(op, errors.ExecTTL, ctx.Err()) case res := <-c: if res.err != nil { - return payload.Payload{}, res.err + return nil, res.err } return res.payload, nil } } -func (tw *SyncWorkerImpl) execPayload(p payload.Payload) (payload.Payload, error) { +func (tw *SyncWorkerImpl) execPayload(p *payload.Payload) (*payload.Payload, error) { const op = errors.Op("sync_worker_exec_payload") // get a frame @@ -162,7 +159,7 @@ func (tw *SyncWorkerImpl) execPayload(p payload.Payload) (payload.Payload, error defer tw.putFrame(fr) // can be 0 here - fr.WriteVersion(frame.VERSION_1) + fr.WriteVersion(fr.Header(), frame.VERSION_1) // obtain a buffer buf := tw.get() @@ -171,18 +168,18 @@ func (tw *SyncWorkerImpl) execPayload(p payload.Payload) (payload.Payload, error buf.Write(p.Body) // Context offset - fr.WriteOptions(uint32(len(p.Context))) - fr.WritePayloadLen(uint32(buf.Len())) + fr.WriteOptions(fr.HeaderPtr(), uint32(len(p.Context))) + fr.WritePayloadLen(fr.Header(), uint32(buf.Len())) fr.WritePayload(buf.Bytes()) - fr.WriteCRC() + fr.WriteCRC(fr.Header()) // return buffer tw.put(buf) err := tw.Relay().Send(fr) if err != nil { - return payload.Payload{}, errors.E(op, errors.Network, err) + return nil, errors.E(op, errors.Network, err) } frameR := tw.getFrame() @@ -190,34 +187,34 @@ func (tw *SyncWorkerImpl) execPayload(p payload.Payload) (payload.Payload, error err = tw.process.Relay().Receive(frameR) if err != nil { - return payload.Payload{}, errors.E(op, errors.Network, err) + return nil, errors.E(op, errors.Network, err) } if frameR == nil { - return payload.Payload{}, errors.E(op, errors.Network, errors.Str("nil fr received")) + return nil, errors.E(op, errors.Network, errors.Str("nil fr received")) } - if !frameR.VerifyCRC() { - return payload.Payload{}, errors.E(op, errors.Network, errors.Str("failed to verify CRC")) + if !frameR.VerifyCRC(frameR.Header()) { + return nil, errors.E(op, errors.Network, errors.Str("failed to verify CRC")) } flags := frameR.ReadFlags() if flags&frame.ERROR != byte(0) { - return payload.Payload{}, errors.E(op, errors.SoftJob, errors.Str(string(frameR.Payload()))) + return nil, errors.E(op, errors.SoftJob, errors.Str(string(frameR.Payload()))) } - options := frameR.ReadOptions() + options := frameR.ReadOptions(frameR.Header()) if len(options) != 1 { - return payload.Payload{}, errors.E(op, errors.Decode, errors.Str("options length should be equal 1 (body offset)")) + return nil, errors.E(op, errors.Decode, errors.Str("options length should be equal 1 (body offset)")) } - pld := payload.Payload{ + pld := &payload.Payload{ Body: make([]byte, len(frameR.Payload()[options[0]:])), Context: make([]byte, len(frameR.Payload()[:options[0]])), } // by copying we free frame's payload slice - // so we do not hold the pointer from the smaller slice to the initial (which is should be in the sync.Pool) + // we do not hold the pointer from the smaller slice to the initial (which should be in the sync.Pool) // https://blog.golang.org/slices-intro#TOC_6. copy(pld.Body, frameR.Payload()[options[0]:]) copy(pld.Context, frameR.Payload()[:options[0]]) diff --git a/pkg/worker/sync_worker_test.go b/pkg/worker/sync_worker_test.go index df556e93..64580f9f 100755 --- a/pkg/worker/sync_worker_test.go +++ b/pkg/worker/sync_worker_test.go @@ -24,11 +24,10 @@ func Test_NotStarted_Exec(t *testing.T) { sw := From(w) - res, err := sw.Exec(payload.Payload{Body: []byte("hello")}) + res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) assert.Error(t, err) - assert.Nil(t, res.Body) - assert.Nil(t, res.Context) + assert.Nil(t, res) assert.Contains(t, err.Error(), "Process is not ready (inactive)") } diff --git a/pkg/worker_handler/request.go b/pkg/worker_handler/request.go index 44c466bb..3d60897b 100644 --- a/pkg/worker_handler/request.go +++ b/pkg/worker_handler/request.go @@ -138,18 +138,18 @@ func (r *Request) Close(log logger.Logger) { // Payload request marshaled RoadRunner payload based on PSR7 data. values encode method is JSON. Make sure to open // files prior to calling this method. -func (r *Request) Payload() (payload.Payload, error) { +func (r *Request) Payload() (*payload.Payload, error) { const op = errors.Op("marshal_payload") - p := payload.Payload{} + p := &payload.Payload{} var err error if p.Context, err = json.Marshal(r); err != nil { - return payload.Payload{}, errors.E(op, errors.Encode, err) + return nil, errors.E(op, errors.Encode, err) } if r.Parsed { if p.Body, err = json.Marshal(r.body); err != nil { - return payload.Payload{}, errors.E(op, errors.Encode, err) + return nil, errors.E(op, errors.Encode, err) } } else if r.body != nil { p.Body = r.body.([]byte) diff --git a/pkg/worker_handler/response.go b/pkg/worker_handler/response.go index cbf22794..d22f09d4 100644 --- a/pkg/worker_handler/response.go +++ b/pkg/worker_handler/response.go @@ -22,7 +22,7 @@ type Response struct { } // NewResponse creates new response based on given pool payload. -func NewResponse(p payload.Payload) (*Response, error) { +func NewResponse(p *payload.Payload) (*Response, error) { const op = errors.Op("http_response") r := &Response{Body: p.Body} if err := json.Unmarshal(p.Context, r); err != nil { diff --git a/pkg/worker_watcher/container/channel/vec.go b/pkg/worker_watcher/container/channel/vec.go new file mode 100644 index 00000000..51093978 --- /dev/null +++ b/pkg/worker_watcher/container/channel/vec.go @@ -0,0 +1,99 @@ +package channel + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/worker" +) + +type Vec struct { + sync.RWMutex + // destroy signal + destroy uint64 + // channel with the workers + workers chan worker.BaseProcess + + len uint64 +} + +func NewVector(len uint64) *Vec { + vec := &Vec{ + destroy: 0, + len: len, + workers: make(chan worker.BaseProcess, len), + } + + return vec +} + +// Push is O(1) operation +// In case of TTL and full channel O(n) worst case, where n is len of the channel +func (v *Vec) Push(w worker.BaseProcess) { + // Non-blocking channel send + select { + case v.workers <- w: + // default select branch is only possible when dealing with TTL + // because in that case, workers in the v.workers channel can be TTL-ed and killed + // but presenting in the channel + default: + v.Lock() + defer v.Unlock() + + /* + we can be in the default branch by the following reasons: + 1. TTL is set with no requests during the TTL + 2. Violated Get <-> Release operation (how ??) + */ + for i := uint64(0); i < v.len; i++ { + wrk := <-v.workers + switch wrk.State().Value() { + // skip good states + case worker.StateWorking, worker.StateReady: + // put the worker back + // generally, while send and receive operations are concurrent (from the channel), channel behave + // like a FIFO, but when re-sending from the same goroutine it behaves like a FILO + v.workers <- wrk + continue + default: + // kill the current worker (just to be sure it's dead) + _ = wrk.Kill() + // replace with the new one + v.workers <- w + return + } + } + } +} + +func (v *Vec) Remove(_ int64) {} + +func (v *Vec) Pop(ctx context.Context) (worker.BaseProcess, error) { + /* + if *addr == old { + *addr = new + return true + } + */ + + if atomic.CompareAndSwapUint64(&v.destroy, 1, 1) { + return nil, errors.E(errors.WatcherStopped) + } + + // used only for the TTL-ed workers + v.RLock() + defer v.RUnlock() + + select { + case w := <-v.workers: + return w, nil + case <-ctx.Done(): + return nil, errors.E(ctx.Err(), errors.NoFreeWorkers) + } +} + +func (v *Vec) Destroy() { + atomic.StoreUint64(&v.destroy, 1) +} diff --git a/pkg/worker_watcher/container/interface.go b/pkg/worker_watcher/container/interface.go deleted file mode 100644 index e10ecdae..00000000 --- a/pkg/worker_watcher/container/interface.go +++ /dev/null @@ -1,17 +0,0 @@ -package container - -import ( - "context" - - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -// Vector interface represents vector container -type Vector interface { - // Enqueue used to put worker to the vector - Enqueue(worker.BaseProcess) - // Dequeue used to get worker from the vector - Dequeue(ctx context.Context) (worker.BaseProcess, error) - // Destroy used to stop releasing the workers - Destroy() -} diff --git a/pkg/worker_watcher/container/queue/queue.go b/pkg/worker_watcher/container/queue/queue.go new file mode 100644 index 00000000..edf81d60 --- /dev/null +++ b/pkg/worker_watcher/container/queue/queue.go @@ -0,0 +1,102 @@ +package queue + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/spiral/roadrunner/v2/pkg/worker" +) + +const ( + initialSize = 1 + maxInitialSize = 8 + maxInternalSliceSize = 10 +) + +type Node struct { + w []worker.BaseProcess + // LL + n *Node +} + +type Queue struct { + mu sync.Mutex + + head *Node + tail *Node + + curr uint64 + len uint64 + + sliceSize uint64 +} + +func NewQueue() *Queue { + q := &Queue{ + mu: sync.Mutex{}, + head: nil, + tail: nil, + curr: 0, + len: 0, + sliceSize: 0, + } + + return q +} + +func (q *Queue) Push(w worker.BaseProcess) { + q.mu.Lock() + + if q.head == nil { + h := newNode(initialSize) + q.head = h + q.tail = h + q.sliceSize = maxInitialSize + } else if uint64(len(q.tail.w)) >= atomic.LoadUint64(&q.sliceSize) { + n := newNode(maxInternalSliceSize) + q.tail.n = n + q.tail = n + q.sliceSize = maxInternalSliceSize + } + + q.tail.w = append(q.tail.w, w) + + atomic.AddUint64(&q.len, 1) + + q.mu.Unlock() +} + +func (q *Queue) Pop(ctx context.Context) (worker.BaseProcess, error) { + q.mu.Lock() + + if q.head == nil { + return nil, nil + } + + w := q.head.w[q.curr] + q.head.w[q.curr] = nil + atomic.AddUint64(&q.len, ^uint64(0)) + atomic.AddUint64(&q.curr, 1) + + if atomic.LoadUint64(&q.curr) >= uint64(len(q.head.w)) { + n := q.head.n + q.head.n = nil + q.head = n + q.curr = 0 + } + + q.mu.Unlock() + + return w, nil +} + +func (q *Queue) Replace(oldPid int64, newWorker worker.BaseProcess) { + +} + +func (q *Queue) Destroy() {} + +func newNode(capacity int) *Node { + return &Node{w: make([]worker.BaseProcess, 0, capacity)} +} diff --git a/pkg/worker_watcher/container/vec.go b/pkg/worker_watcher/container/vec.go deleted file mode 100644 index 24b5fa6d..00000000 --- a/pkg/worker_watcher/container/vec.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "context" - "sync/atomic" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -type Vec struct { - destroy uint64 - workers chan worker.BaseProcess -} - -func NewVector(initialNumOfWorkers uint64) *Vec { - vec := &Vec{ - destroy: 0, - workers: make(chan worker.BaseProcess, initialNumOfWorkers), - } - - return vec -} - -func (v *Vec) Enqueue(w worker.BaseProcess) { - v.workers <- w -} - -func (v *Vec) Dequeue(ctx context.Context) (worker.BaseProcess, error) { - /* - if *addr == old { - *addr = new - return true - } - */ - - if atomic.CompareAndSwapUint64(&v.destroy, 1, 1) { - return nil, errors.E(errors.WatcherStopped) - } - - select { - case w := <-v.workers: - return w, nil - case <-ctx.Done(): - return nil, errors.E(ctx.Err(), errors.NoFreeWorkers) - } -} - -func (v *Vec) Destroy() { - atomic.StoreUint64(&v.destroy, 1) -} diff --git a/pkg/worker_watcher/worker_watcher.go b/pkg/worker_watcher/worker_watcher.go index b2d61d48..348be199 100755 --- a/pkg/worker_watcher/worker_watcher.go +++ b/pkg/worker_watcher/worker_watcher.go @@ -8,45 +8,54 @@ import ( "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/pkg/events" "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/spiral/roadrunner/v2/pkg/worker_watcher/container" + "github.com/spiral/roadrunner/v2/pkg/worker_watcher/container/channel" ) // Vector interface represents vector container type Vector interface { - // Enqueue used to put worker to the vector - Enqueue(worker.BaseProcess) - // Dequeue used to get worker from the vector - Dequeue(ctx context.Context) (worker.BaseProcess, error) + // Push used to put worker to the vector + Push(worker.BaseProcess) + // Pop used to get worker from the vector + Pop(ctx context.Context) (worker.BaseProcess, error) + // Remove worker with provided pid + Remove(pid int64) // Destroy used to stop releasing the workers Destroy() + + // TODO Add Replace method, and remove `Remove` method. Replace will do removal and allocation + // Replace(prevPid int64, newWorker worker.BaseProcess) +} + +type workerWatcher struct { + sync.RWMutex + container Vector + // used to control Destroy stage (that all workers are in the container) + numWorkers uint64 + + workers []worker.BaseProcess + + allocator worker.Allocator + events events.Handler } // NewSyncWorkerWatcher is a constructor for the Watcher func NewSyncWorkerWatcher(allocator worker.Allocator, numWorkers uint64, events events.Handler) *workerWatcher { ww := &workerWatcher{ - container: container.NewVector(numWorkers), + container: channel.NewVector(numWorkers), numWorkers: numWorkers, - workers: make([]worker.BaseProcess, 0, numWorkers), - allocator: allocator, - events: events, + + workers: make([]worker.BaseProcess, 0, numWorkers), + + allocator: allocator, + events: events, } return ww } -type workerWatcher struct { - sync.RWMutex - container Vector - // used to control the Destroy stage (that all workers are in the container) - numWorkers uint64 - workers []worker.BaseProcess - allocator worker.Allocator - events events.Handler -} - func (ww *workerWatcher) Watch(workers []worker.BaseProcess) error { for i := 0; i < len(workers); i++ { - ww.container.Enqueue(workers[i]) + ww.container.Push(workers[i]) // add worker to watch slice ww.workers = append(ww.workers, workers[i]) @@ -57,12 +66,12 @@ func (ww *workerWatcher) Watch(workers []worker.BaseProcess) error { return nil } -// Get is not a thread safe operation -func (ww *workerWatcher) Get(ctx context.Context) (worker.BaseProcess, error) { +// Take is not a thread safe operation +func (ww *workerWatcher) Take(ctx context.Context) (worker.BaseProcess, error) { const op = errors.Op("worker_watcher_get_free_worker") // thread safe operation - w, err := ww.container.Dequeue(ctx) + w, err := ww.container.Pop(ctx) if errors.Is(errors.WatcherStopped, err) { return nil, errors.E(op, errors.WatcherStopped) } @@ -78,11 +87,11 @@ func (ww *workerWatcher) Get(ctx context.Context) (worker.BaseProcess, error) { // ========================================================= // SLOW PATH - _ = w.Kill() // how the worker get here??????? - // no free workers in the container + _ = w.Kill() + // no free workers in the container or worker not in the ReadyState (TTL-ed) // try to continuously get free one for { - w, err = ww.container.Dequeue(ctx) + w, err = ww.container.Pop(ctx) if errors.Is(errors.WatcherStopped, err) { return nil, errors.E(op, errors.WatcherStopped) @@ -98,7 +107,7 @@ func (ww *workerWatcher) Get(ctx context.Context) (worker.BaseProcess, error) { case worker.StateReady: return w, nil case worker.StateWorking: // how?? - ww.container.Enqueue(w) // put it back, let worker finish the work + ww.container.Push(w) // put it back, let worker finish the work continue case // all the possible wrong states @@ -135,7 +144,7 @@ func (ww *workerWatcher) Allocate() error { // unlock Allocate mutex ww.Unlock() // push the worker to the container - ww.Push(sw) + ww.Release(sw) return nil } @@ -158,11 +167,11 @@ func (ww *workerWatcher) Remove(wb worker.BaseProcess) { } } -// Push O(1) operation -func (ww *workerWatcher) Push(w worker.BaseProcess) { +// Release O(1) operation +func (ww *workerWatcher) Release(w worker.BaseProcess) { switch w.State().Value() { case worker.StateReady: - ww.container.Enqueue(w) + ww.container.Push(w) default: _ = w.Kill() } @@ -226,13 +235,18 @@ func (ww *workerWatcher) wait(w worker.BaseProcess) { }) } + // remove worker + ww.Remove(w) + if w.State().Value() == worker.StateDestroyed { // worker was manually destroyed, no need to replace ww.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w}) return } - ww.Remove(w) + // set state as stopped + w.State().Set(worker.StateStopped) + err = ww.Allocate() if err != nil { ww.events.Push(events.PoolEvent{ diff --git a/plugins/broadcast/interface.go b/plugins/broadcast/interface.go index 46709d71..eda3572f 100644 --- a/plugins/broadcast/interface.go +++ b/plugins/broadcast/interface.go @@ -1,6 +1,6 @@ package broadcast -import "github.com/spiral/roadrunner/v2/pkg/pubsub" +import "github.com/spiral/roadrunner/v2/common/pubsub" type Broadcaster interface { GetDriver(key string) (pubsub.SubReader, error) diff --git a/plugins/broadcast/plugin.go b/plugins/broadcast/plugin.go index 6ddef806..889dc2fa 100644 --- a/plugins/broadcast/plugin.go +++ b/plugins/broadcast/plugin.go @@ -7,7 +7,7 @@ import ( "github.com/google/uuid" endure "github.com/spiral/endure/pkg/container" "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/config" "github.com/spiral/roadrunner/v2/plugins/logger" ) diff --git a/plugins/broadcast/rpc.go b/plugins/broadcast/rpc.go index 2ee211f8..475076a0 100644 --- a/plugins/broadcast/rpc.go +++ b/plugins/broadcast/rpc.go @@ -2,7 +2,7 @@ package broadcast import ( "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/logger" websocketsv1 "github.com/spiral/roadrunner/v2/proto/websockets/v1beta" ) diff --git a/plugins/http/attributes/attributes.go b/plugins/http/attributes/attributes.go index 4c453766..81d9f01d 100644 --- a/plugins/http/attributes/attributes.go +++ b/plugins/http/attributes/attributes.go @@ -7,7 +7,7 @@ import ( ) // contextKey is a value for use with context.WithValue. It's used as -// a pointer so it fits in an interface{} without allocation. +// a pointer fits an interface{} without allocation. type contextKey struct { name string } diff --git a/plugins/http/config/ssl.go b/plugins/http/config/ssl.go index d44703f9..0e3c0caf 100644 --- a/plugins/http/config/ssl.go +++ b/plugins/http/config/ssl.go @@ -36,7 +36,7 @@ func (s *SSL) Valid() error { parts := strings.Split(s.Address, ":") switch len(parts) { // :443 form - // localhost:443 form + // 127.0.0.1:443 form // use 0.0.0.0 as host and 443 as port case 2: if parts[0] == "" { diff --git a/plugins/http/config/ssl_config_test.go b/plugins/http/config/ssl_config_test.go index 1f5fef0a..8f6cf40e 100644 --- a/plugins/http/config/ssl_config_test.go +++ b/plugins/http/config/ssl_config_test.go @@ -101,7 +101,7 @@ func TestSSL_Valid6(t *testing.T) { func TestSSL_Valid7(t *testing.T) { conf := &SSL{ - Address: "localhost:555:1", + Address: "127.0.0.1:555:1", Redirect: false, Key: "../../../tests/plugins/http/fixtures/server.key", Cert: "../../../tests/plugins/http/fixtures/server.crt", diff --git a/plugins/http/plugin.go b/plugins/http/plugin.go index bec01ac3..2ee83384 100644 --- a/plugins/http/plugin.go +++ b/plugins/http/plugin.go @@ -143,7 +143,7 @@ func (p *Plugin) Serve() chan error { func (p *Plugin) serve(errCh chan error) { var err error const op = errors.Op("http_plugin_serve") - p.pool, err = p.server.NewWorkerPool(context.Background(), pool.Config{ + p.pool, err = p.server.NewWorkerPool(context.Background(), &pool.Config{ Debug: p.cfg.Pool.Debug, NumWorkers: p.cfg.Pool.NumWorkers, MaxJobs: p.cfg.Pool.MaxJobs, @@ -285,13 +285,13 @@ func (p *Plugin) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // Workers returns slice with the process states for the workers -func (p *Plugin) Workers() []process.State { +func (p *Plugin) Workers() []*process.State { p.RLock() defer p.RUnlock() workers := p.workers() - ps := make([]process.State, 0, len(workers)) + ps := make([]*process.State, 0, len(workers)) for i := 0; i < len(workers); i++ { state, err := process.WorkerProcessState(workers[i]) if err != nil { @@ -323,7 +323,7 @@ func (p *Plugin) Reset() error { p.pool = nil var err error - p.pool, err = p.server.NewWorkerPool(context.Background(), pool.Config{ + p.pool, err = p.server.NewWorkerPool(context.Background(), &pool.Config{ Debug: p.cfg.Pool.Debug, NumWorkers: p.cfg.Pool.NumWorkers, MaxJobs: p.cfg.Pool.MaxJobs, diff --git a/plugins/informer/interface.go b/plugins/informer/interface.go index 316c7bc1..bbc1a048 100644 --- a/plugins/informer/interface.go +++ b/plugins/informer/interface.go @@ -11,7 +11,7 @@ Because Availabler implementation should present in every plugin // Informer used to get workers from particular plugin or set of plugins type Informer interface { - Workers() []process.State + Workers() []*process.State } // Availabler interface should be implemented by every plugin which wish to report to the PHP worker that it available in the RR runtime @@ -19,3 +19,7 @@ type Availabler interface { // Available method needed to collect all plugins which are available in the runtime. Available() } + +type JobsStat interface { + Stat() +} diff --git a/plugins/informer/plugin.go b/plugins/informer/plugin.go index f8725ed7..c613af58 100644 --- a/plugins/informer/plugin.go +++ b/plugins/informer/plugin.go @@ -19,7 +19,7 @@ func (p *Plugin) Init() error { } // Workers provides BaseProcess slice with workers for the requested plugin -func (p *Plugin) Workers(name string) []process.State { +func (p *Plugin) Workers(name string) []*process.State { svc, ok := p.withWorkers[name] if !ok { return nil diff --git a/plugins/informer/rpc.go b/plugins/informer/rpc.go index 3925ef64..02254865 100644 --- a/plugins/informer/rpc.go +++ b/plugins/informer/rpc.go @@ -11,7 +11,7 @@ type rpc struct { // WorkerList contains list of workers. type WorkerList struct { // Workers is list of workers. - Workers []process.State `json:"workers"` + Workers []*process.State `json:"workers"` } // List all resettable services. @@ -38,3 +38,17 @@ func (rpc *rpc) Workers(service string, list *WorkerList) error { return nil } + +// sort.Sort + +func (w *WorkerList) Len() int { + return len(w.Workers) +} + +func (w *WorkerList) Less(i, j int) bool { + return w.Workers[i].Pid < w.Workers[j].Pid +} + +func (w *WorkerList) Swap(i, j int) { + w.Workers[i], w.Workers[j] = w.Workers[j], w.Workers[i] +} diff --git a/plugins/jobs/config.go b/plugins/jobs/config.go new file mode 100644 index 00000000..454256b9 --- /dev/null +++ b/plugins/jobs/config.go @@ -0,0 +1,62 @@ +package jobs + +import ( + "runtime" + + poolImpl "github.com/spiral/roadrunner/v2/pkg/pool" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" +) + +const ( + // name used to set pipeline name + pipelineName string = "name" +) + +// Config defines settings for job broker, workers and job-pipeline mapping. +type Config struct { + // NumPollers configures number of priority queue pollers + // Should be no more than 255 + // Default - num logical cores + NumPollers uint8 `mapstructure:"num_pollers"` + + // PipelineSize is the limit of a main jobs queue which consume Items from the drivers pipeline + // Driver pipeline might be much larger than a main jobs queue + PipelineSize uint64 `mapstructure:"pipeline_size"` + + // Timeout in seconds is the per-push limit to put the job into queue + Timeout int `mapstructure:"timeout"` + + // Pool configures roadrunner workers pool. + Pool *poolImpl.Config `mapstructure:"Pool"` + + // Pipelines defines mapping between PHP job pipeline and associated job broker. + Pipelines map[string]*pipeline.Pipeline `mapstructure:"pipelines"` + + // Consuming specifies names of pipelines to be consumed on service start. + Consume []string `mapstructure:"consume"` +} + +func (c *Config) InitDefaults() { + if c.Pool == nil { + c.Pool = &poolImpl.Config{} + } + + if c.PipelineSize == 0 { + c.PipelineSize = 1_000_000 + } + + if c.NumPollers == 0 { + c.NumPollers = uint8(runtime.NumCPU()) + } + + for k := range c.Pipelines { + // set the pipeline name + c.Pipelines[k].With(pipelineName, k) + } + + if c.Timeout == 0 { + c.Timeout = 60 + } + + c.Pool.InitDefaults() +} diff --git a/plugins/jobs/doc/jobs_arch.drawio b/plugins/jobs/doc/jobs_arch.drawio new file mode 100644 index 00000000..aaed82c7 --- /dev/null +++ b/plugins/jobs/doc/jobs_arch.drawio @@ -0,0 +1 @@ +<mxfile host="Electron" modified="2021-07-09T07:14:41.096Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/91.0.4472.124 Electron/13.1.6 Safari/537.36" etag="0gh7yhPcQUpxg5xU25Ad" version="14.6.13" type="device"><diagram id="AFQlLRRq6yGg9IpTzkrs" name="Page-1">7R1pc9q69tcwk96ZZGzL68csNLdt2tCQtM398sZgEdwYTG2ThP76J8mysaUDmGADWbhzG5A3+eybjlrodPR0HrmT4dfQw0FLU7ynFjpraZqKDI38oSOzdMSyzXTgLvI9ftJ8oOv/xXxQ4aNT38Nx6cQkDIPEn5QH++F4jPtJacyNovCxfNogDMpPnbh3WBro9t1AHv3pe8kwmx35zI/8i/27YSIdGrnZ+XwgHrpe+FgYQu0WOo3CMEm/jZ5OcUDhl4Emve7jgqP53CI8Tqpc0O3E5u3p15n7OdSH6t23b+376FDnk3twgyl/6avLy2sy8vnypEv+dC5uzj9942+QzDLIROF07GF6Z6WFTh6HfoK7E7dPjz4SWiBjw2QUkF8q+ToIx8lHd+QHlAy6H8mdvobjkBxwA/9uTMaiFIIngdvDQSeM/cQP6XiAB3T4AUeJT7ByIRxOwknh6DG/WS9MknBEH+sHwWkYhBGbMhoMsNnvk/E4icJ7XDjiWU5Poe8hg5RDmT4DPxWGOIjPcTjCSTQjp/CjumJpR0Z6Fad61Tb5wOOciDRTT8eGRfLRHU68nHDv8vvPEUu+cNzCeP41Hj/9q5LrHv4mY+v8O/5P6R9qEgaxR0id/wyjZBjehWM3aM9HT8o4np9zEVK4M8z+xkky43zrTpOwjHcCxWj2i15PIMJ/3haPnT3xm6e/ZvzXQnpZiJ84nEZ9vOz9OQASN7rDyZIT+Q0pdJaiO8KBm/gPZUEBoY1f2gl9MuecTBxbK9GIpdgZ1WQ3SafKryvytXgryyrfSpNulYJHutVxFLmzwmkTekIs0Vr+1s8nP7QT8nvykwL1kV+3GbmR73Paoz9mRULcD5JVK1KsviWKFchMd6pSbF1ElMnigqrqkDsph1RNTeMhuSn9c+Im/eGGCqusOTwD254OaQ5b6yHT5PgvjCvs8yy6qK5qbAEjhpFjpKhoMoOkqGh0uyE9o75z+vqcrlfVTea2WN08sh3TMFRL01VkOSUyQ0aDjH/5t4O+DNH/HpxDEyXnXYWYnxnj74imihSV0xdMU6LFafcxbHH2bEM35hangNBmqSg1heonGkUwQ1StIpksNkPgJ1mmoIksnT9pkYkkXjGf25xS01nUqrB0SWGdYzqriT/BgT/GtSoprBI1ZUHE5pgWcnelpBxVQBaopFQNUFIINaSkDAkvLc0MqKc5KeHD/DOlzvnJiNCtT1zLY3JUmTyRfxlslHT8kLmh9JheOEbAkxxy/5Ye6xMQ4qh82MP9MHJTZ5aeQ5EfpYSRPZp8u+N/2QR72cDnsEfActxNomk/mUY4O4EApCdeRMYm4tgwEkee9e6ph04P2vSgOG0yS3bFGbt35I/v8hfJn99xZ0HoegvOg+bexEw/eZhJjOVzvZxQbMX5eflvaLICc4txijR6kYVAeKgjJGcNAhYmovzNmZMrKVVbxqyLRcPAoP9BosFkH1k0pJ96JEDO27PMIEUy/5sA/6ua3pAAMF+BAMhIb+9lQCfXdjtl8E7kh5GfzPJpTImJQINwIo+fETtotmCy87OOkwSPJslcFDCDY7uvdEWZoTzdwix6Ena3Na9rf4TDafKcSb3LTFBmIlupKDNRUzIzs+PePfs1fLLMY1/t2tv74Npr5tZjerIqTn2kPtFt0xFTmAdeRF44+sDe/s6PiRrF1FwchPRoMsQll0rpzcq/x+7oTXhatlNOKSAN9LQsQGqYjQkN511orC807IpCI1MSuxUaapPxwKUAKgiNr5g8kQz9Zg5nmLtpjz4hfEFuFGQDP/GIoo3fIRwHFGTUuJvJN6xRjAwGA21BPtrsmcbOxIhRDthoKphV0CExojQkRjS5SoFmeg4oJlvaaYomqh+IJCCPJ/+fUJBGteLLc7E9APFl9m3cG+yJ2FcVrarYN8ym8CWn6hYghLrBZahnxj33l+X6jpHveamewLH/1+2xW1Eo87gxua9x0jLO6L2Iaoi5HyAhbhyOccO4cRyxEkQBcGMYgB1vNYUaTUKNhBTiu1H5eJpXVSkybZP/PtLHntxFrufj+bEMrGXmyU8HceC58TDnykXVPnldz3KnMKcboQBJ9AczG4Q+0o0n6YsO/Cc6j5SUcNR+wClFMeoZuhN6wejpjla7HbmPsX40jdmzGiUiZJeIyLQhBrdsmYisGrK8f82B94jsj/r96Pj2fPrUsZx/s3KOncTPgMCYO6IEEMi/GBIHVL5nw+VLypE1KTSRB9FOC85IjwmyDzuKtYGgkqIyy+DIzpKjY1SdprbOPzGLJ8ZHn1OdiqOIOlo1h5CaepErbukdFKy8f7LvR1ks8uW9FyfB1/ZaFwRZB3Taqc6WX+nFvMkWwq6NMb9L1FiRstKg94cX8wJXWOSNpW8gmDuZYo+YDVC0G+TIMuQyLFT+ex1SNiw5pIwgL0G1a8jDg2bEe3BoaXAIhJkhx4bA82qPJ2+EabnionPT/XdD97xBm1s1BW5xdCgDA5VWohqCIMswWoDhn6PftAqE2MZDl7hqwT7DUxXgiRwAnmAVUA3wjK/Oesrv+98/rfvvXffX6U/z+iYrAnhB0kctyp6CKGpE+lw5Nxex8dDt2sF/t7+02P3pX0A16SBsm6lJX7dQUHWssuusGrZVpBv5CjMrTnn+FaZmCbS5WWnhMkQUhMG34+vuhgLgWWku2PJZEAitQ5Q4mgjwrPK0KEpUQJToTYkSe5eiRC0KkhUFynNDZpuiBITZzkTJRph+eSbrVpXGRphG+6E0NFG+IHt5dTmRQBtfYaj1Kg0YEcr+UK/6gql3mUbeO+q1neUGjEyL619Rt8kDI2K3y4COrNdCvuB52p6QryESo76CGJXNr7DQNshXstgLFcnfb9o37Vqtd9NVlMEAst6RpSinp7KRPmCfpgMBsuyArHcoENCY9b5Hha0VxcpWw5DLxMULs941iQe/tr9eXt3Wynkvxm+21F37zcb+cN5yb6oBDkK74qCaQmm6uiIwpouSXt+GjyOXphx//d55mwyuO7tmcHkFdAPdpYQyTVtRLARh59hQFB0oZduO4YPsKoYPtKanMeyosj78NJoQkUqx0R9iYN3E667XVFVUTlOpKGtoUkr7WTKS6ijYhJEkSzQCkYDWKpC3jcIH38ORXJn+yhFlKkJlrWEC+URkNoMosJWe7OCpdF3BHFkuuweXfAXMLaob8vwHcSieuONsTGN3T2/6aewnPkHw33QFA7UXCDApwHzG0Om6B/5IOpUxHYgx+8OLXVu8fD5dUNX5XqidKT62MAxMcMWcEZ1zhy0N9fMpe2VwFGcxcietfBHHPZ7FKeLYYZ/OmC7uitnd8mv64Xjg35EvB+7ozyRdHtDD7jhO3OA+/YmT/occCgxh9B6HDFt5pWexeHQdQFRmRKG74bbZUNCCH9mnJvaUopCO4kjs6TiAHNVrWIoGsqes68qmSLd99aP9VhWfZuqi4kOAPLXQNuXpjuO9m8dl5pGYJuIyINAArxI8jyNt+3GZZbMu8Cbnxo38glVdZ3PGfW7brs1YLkuzzN01oE0sXBLVFMvtUUnUi+E4oO3aMhrfE46THfN9yEZs206ReRBlfXm3kY1YJpmLmLkE4lfDcNSbxmsjgkY6+gqEiMFg1WJVSW7WgQGxP5xqGLIUtLeJALnOlrz9+nKxusGfL2wk7tgx7VpP7znB43SEy037eTh7bib6+fLQqlkeckLJ3OsNWxkgVTHE9uyGJayDrqlPJVLmy33nz1peGQRdo9u6QNSbxc1BdDgS1c+wHFWqk+zTZbtN0vyaxkHzNJ/F3rdM9JpjmCJRmZbgaNfXswN+9Xfbcn26yuhlNWEZe2VdZvMuSJMfxxcb25SVDZMlfdoXdHZvxJg07YqlLXU0bYMRIUe9OiENqRJZyxtPUppIphHrljMK4zSrkLsC6YVvLRimSVu4GJocDkMqtIVLY+EwOUjy0/WTN4ojpDhqFRzZW0WR7FX/SHGj+GmSwsPExvFY2oXmIeTcjz+g82dt6zBLjByffslG+jT3oLgeHWaIX5E6AtZnL162XsxsFBIw+fxfeTKkDrHhSPJflZMemgo0amks6ZFtwPZuda1jdQFrrOEgWu37cWwmfuRV1j8vr760r1ilzeXlxbbMLyDmZIO+HDKRg7z6zC9RI1iOWtEAM5rCiRzOa/9qn95c01TjwZ8jL2TdT4rp62kUtVjePvjwxpS65piSn2plWxUVZWhTHdNgFMoBwTPWkJaq9C6OHli/kbeYNUa2JWaNjZwHiyxnADqvOXzt0RrNfaznh6sjqkYaUDP1yFLMStcUXTLwUbVAbV0xK6DtaMr5rbxcZDcKdSsJal3VJQzomcmzsxR1TtEvlrt3YNFqWkXurr+j9GbIBsJXUpqa93Ic9+IJ5HJulLZWXJqfBBhNO7PM5VmIRUXmDXEmMoFC5a0mrjU5hfSGA1R6vu93rr2AfY+2Gp5CcgSx48YxCzOxTRNo4+y8GPiNYUtc3oTyTQXLlqy6TYTtUc6sYreR3es6VLkga79yZkgOHtPmqJhFBrK29ulOKIe0XbU/8Ptk5M8UA7K1ITvU82mPzLQd9yOOE1ljVmiS35ACNHSgYBlSgHXsUgCHA99usHX9DbM3YPCq4Vnk7BeDA00wrz5dXn26vp0bs0qDfTFXuZUVrd0tGrWWUrEiWq9hJwsYaXL89gr3MSEWj4tkbtymq42OzvCDn668gvN7bMuqz5e0tfHBCI/CaPYhPZUdcMcUIP1wTMHGDKaGk3yE19gt8g0k09f58J7lW2wpquI63szxKUaooUbKjWX5kByhpv38W1lKISNRvo6uUN5RyPROVtV/v3oPQPDXrOwmxZIPe6v2v+xRs1pwZRCFoxxxdKVmo/qCM62gLXqq5w3ASnJVsZCDm9MKjmocaVUVg7Q3WX3m3uJdUChIM+n4OW3R/OizO/YYxi47nfZZq5T244sRJ8GUNtCfy9nird4aSyJDFzCPZJbUsv2vtsKSuuylVd8RId3YQJn/u4u9bD4lmMqOg4K8Zz5kYzvawFs4pEkKZSl40hFpp/YzljJ/7i7SdU8nC0izSZX3lN7dpE5Cb1bYVqU3S56xkUrdkzolsoLcks3roDgxtiSfblvzjB1H6p7kcf/+YINtdOqezjd39XwEpdDYdiYr05I73d8EmYL9Bu6DqCmgtmhMXcjbG3NEihbbq13PZ1ugtbbVJX267MW/r+lrVQ+56cA25pd/O+jLEP3vwTk0UXLeVS5uziu3Fah7UZ/Ytsl2BKu/tiV9opeYPWnxgj7hCiKBHIGgG1jOp8shgff1fK+F3jXHsAWaEpMaNdE7eZIgzrOBhd2V5bnxAspG6T3Lyxb7PsbMchLLQ0QHKV1VwfzycXjIFkcNWDA2DmlBJ0+5vbUGa5olilQHKsTVgEJc1FQCxZB978w8fkdqJRNZsUWkIiB2vV2kAjFOITJBHxvcyfba7pFVB6PZitgAY14Ev6pvax1hrqf48Xr64+EPunBubi87ivX552jJBs+Lkk7rbZJaTCqxLonZtr1F3lQm/sRnG3rGrfXyUU1NNW25mLYITOe37nq4pmbGGit2Eybp+vkED7yQSUCqnz0c4GTtzF5T89VZI0gGv8J85zHJvZikkU2SbypbCJkeeHjgTgMKWMqYNILFtoKncB6Qs719AbR5tMgEqju3+4KELlJtsXzWVqFokQEIXaeGKAXoTGiS0G2nsSKFrQqmEQslnvaHXPKkRLmn6JFwAWBsIXoss6wSdR3QhzqAGq0p1EA9mPNGsbRvLFvf7TLXLlVkrD5ENkwLW1TzbPy8rG8NZG6njqIGVKqKVcYlMqCwLNj3vI6dX0FsbpLDq7qruSixOfuVDnu4H/IuHOwcmrSP+Kb3i3aXz9NOOSE1k7Vb8c7z3dptKG1B+zYbJzxFZ5zNc5fWScs6q5TGgMNLAgPIGQ3qP/Fwk6ot86eWxNQ3zWNsxjHiuoEFeQygFa+h1lHsALJM5mns/xani3bYmEcRW8+ImgsRvpVhRRCIQOEqDGwbppBNw4ZiKBqZxpGCdN3WLMM2NUXwJutbXbkMGgUpzDtxU9XKKhJDIEKzVmnTggzXZvzpKAIUUcVapDoKz0FIyknG0rJ093VaiaqJxNWpkKR0tmknynnGM9i0W5dwn1tgvxGEpY5mxNnVq9F6Y7Zb1uRk/xXRIoWjVdkFdPEeUbVoIruiJjI2VEQboVpOYbYntMtG5Abr8tMikDaiIExBQWiwywOzTXMWnP7SGWcP+MZ5CXwjZ1TALfD2mWUQcWWqlnhrVnM8s9MFuHXwjLUHTAN1sd0/rgF61p60j791r48vvrwo3tGBBvow4zTGNrLm5vuU0cUQuXu3z1Clbl5ZIunKkalVA6xuH1nrw5b8jEIaRZs72QQEw6+hh+kZ/wc=</diagram></mxfile>
\ No newline at end of file diff --git a/plugins/jobs/drivers/amqp/config.go b/plugins/jobs/drivers/amqp/config.go new file mode 100644 index 00000000..73482d4d --- /dev/null +++ b/plugins/jobs/drivers/amqp/config.go @@ -0,0 +1,63 @@ +package amqp + +// pipeline rabbitmq info +const ( + exchangeKey string = "exchange" + exchangeType string = "exchange-type" + queue string = "queue" + routingKey string = "routing-key" + prefetch string = "prefetch" + exclusive string = "exclusive" + priority string = "priority" + multipleAsk string = "multiple_ask" + requeueOnFail string = "requeue_on_fail" + + dlx string = "x-dead-letter-exchange" + dlxRoutingKey string = "x-dead-letter-routing-key" + dlxTTL string = "x-message-ttl" + dlxExpires string = "x-expires" + + contentType string = "application/octet-stream" +) + +type GlobalCfg struct { + Addr string `mapstructure:"addr"` +} + +// Config is used to parse pipeline configuration +type Config struct { + Prefetch int `mapstructure:"prefetch"` + Queue string `mapstructure:"queue"` + Priority int64 `mapstructure:"priority"` + Exchange string `mapstructure:"exchange"` + ExchangeType string `mapstructure:"exchange_type"` + RoutingKey string `mapstructure:"routing_key"` + Exclusive bool `mapstructure:"exclusive"` + MultipleAck bool `mapstructure:"multiple_ask"` + RequeueOnFail bool `mapstructure:"requeue_on_fail"` +} + +func (c *Config) InitDefault() { + // all options should be in sync with the pipeline defaults in the FromPipeline method + if c.ExchangeType == "" { + c.ExchangeType = "direct" + } + + if c.Exchange == "" { + c.Exchange = "amqp.default" + } + + if c.Prefetch == 0 { + c.Prefetch = 10 + } + + if c.Priority == 0 { + c.Priority = 10 + } +} + +func (c *GlobalCfg) InitDefault() { + if c.Addr == "" { + c.Addr = "amqp://guest:[email protected]:5672/" + } +} diff --git a/plugins/jobs/drivers/amqp/consumer.go b/plugins/jobs/drivers/amqp/consumer.go new file mode 100644 index 00000000..429953e1 --- /dev/null +++ b/plugins/jobs/drivers/amqp/consumer.go @@ -0,0 +1,472 @@ +package amqp + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +type JobConsumer struct { + sync.Mutex + log logger.Logger + pq priorityqueue.Queue + eh events.Handler + + pipeline atomic.Value + + // amqp connection + conn *amqp.Connection + consumeChan *amqp.Channel + publishChan chan *amqp.Channel + consumeID string + connStr string + + retryTimeout time.Duration + // + // prefetch QoS AMQP + // + prefetch int + // + // pipeline's priority + // + priority int64 + exchangeName string + queue string + exclusive bool + exchangeType string + routingKey string + multipleAck bool + requeueOnFail bool + + delayCache map[string]struct{} + + listeners uint32 + stopCh chan struct{} +} + +// NewAMQPConsumer initializes rabbitmq pipeline +func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_amqp_consumer") + // we need to obtain two parts of the amqp information here. + // firs part - address to connect, it is located in the global section under the amqp pluginName + // second part - queues and other pipeline information + // if no such key - error + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) + } + + // PARSE CONFIGURATION START ------- + var pipeCfg Config + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + // PARSE CONFIGURATION END ------- + + jb := &JobConsumer{ + log: log, + pq: pq, + eh: e, + consumeID: uuid.NewString(), + stopCh: make(chan struct{}), + // TODO to config + retryTimeout: time.Minute * 5, + delayCache: make(map[string]struct{}, 100), + priority: pipeCfg.Priority, + + publishChan: make(chan *amqp.Channel, 1), + routingKey: pipeCfg.RoutingKey, + queue: pipeCfg.Queue, + exchangeType: pipeCfg.ExchangeType, + exchangeName: pipeCfg.Exchange, + prefetch: pipeCfg.Prefetch, + exclusive: pipeCfg.Exclusive, + multipleAck: pipeCfg.MultipleAck, + requeueOnFail: pipeCfg.RequeueOnFail, + } + + jb.conn, err = amqp.Dial(globalCfg.Addr) + if err != nil { + return nil, errors.E(op, err) + } + + // save address + jb.connStr = globalCfg.Addr + + err = jb.initRabbitMQ() + if err != nil { + return nil, errors.E(op, err) + } + + pch, err := jb.conn.Channel() + if err != nil { + return nil, errors.E(op, err) + } + + jb.publishChan <- pch + + // run redialer and requeue listener for the connection + jb.redialer() + + return jb, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_amqp_consumer_from_pipeline") + // we need to obtain two parts of the amqp information here. + // firs part - address to connect, it is located in the global section under the amqp pluginName + // second part - queues and other pipeline information + + // only global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) + } + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + jb := &JobConsumer{ + log: log, + eh: e, + pq: pq, + consumeID: uuid.NewString(), + stopCh: make(chan struct{}), + retryTimeout: time.Minute * 5, + delayCache: make(map[string]struct{}, 100), + + publishChan: make(chan *amqp.Channel, 1), + routingKey: pipeline.String(routingKey, ""), + queue: pipeline.String(queue, "default"), + exchangeType: pipeline.String(exchangeType, "direct"), + exchangeName: pipeline.String(exchangeKey, "amqp.default"), + prefetch: pipeline.Int(prefetch, 10), + priority: int64(pipeline.Int(priority, 10)), + exclusive: pipeline.Bool(exclusive, false), + multipleAck: pipeline.Bool(multipleAsk, false), + requeueOnFail: pipeline.Bool(requeueOnFail, false), + } + + jb.conn, err = amqp.Dial(globalCfg.Addr) + if err != nil { + return nil, errors.E(op, err) + } + + // save address + jb.connStr = globalCfg.Addr + + err = jb.initRabbitMQ() + if err != nil { + return nil, errors.E(op, err) + } + + pch, err := jb.conn.Channel() + if err != nil { + return nil, errors.E(op, err) + } + + jb.publishChan <- pch + + // register the pipeline + // error here is always nil + _ = jb.Register(context.Background(), pipeline) + + // run redialer for the connection + jb.redialer() + + return jb, nil +} + +func (j *JobConsumer) Push(ctx context.Context, job *job.Job) error { + const op = errors.Op("rabbitmq_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != job.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) + } + + err := j.handleItem(ctx, fromJob(job)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +// handleItem +func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { + const op = errors.Op("rabbitmq_handle_item") + select { + case pch := <-j.publishChan: + // return the channel back + defer func() { + j.publishChan <- pch + }() + + // convert + table, err := pack(msg.ID(), msg) + if err != nil { + return errors.E(op, err) + } + + const op = errors.Op("amqp_handle_item") + // handle timeouts + if msg.Options.DelayDuration() > 0 { + // TODO declare separate method for this if condition + // TODO dlx cache channel?? + delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) + tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, j.exchangeName, j.queue) + _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ + dlx: j.exchangeName, + dlxRoutingKey: j.routingKey, + dlxTTL: delayMs, + dlxExpires: delayMs * 2, + }) + if err != nil { + return errors.E(op, err) + } + + err = pch.QueueBind(tmpQ, tmpQ, j.exchangeName, false, nil) + if err != nil { + return errors.E(op, err) + } + + // insert to the local, limited pipeline + err = pch.Publish(j.exchangeName, tmpQ, false, false, amqp.Publishing{ + Headers: table, + ContentType: contentType, + Timestamp: time.Now().UTC(), + DeliveryMode: amqp.Persistent, + Body: msg.Body(), + }) + + if err != nil { + return errors.E(op, err) + } + + j.delayCache[tmpQ] = struct{}{} + + return nil + } + + // insert to the local, limited pipeline + err = pch.Publish(j.exchangeName, j.routingKey, false, false, amqp.Publishing{ + Headers: table, + ContentType: contentType, + Timestamp: time.Now(), + DeliveryMode: amqp.Persistent, + Body: msg.Body(), + }) + + if err != nil { + return errors.E(op, err) + } + + return nil + case <-ctx.Done(): + return errors.E(op, errors.TimeOut, ctx.Err()) + } +} + +func (j *JobConsumer) Register(_ context.Context, p *pipeline.Pipeline) error { + j.pipeline.Store(p) + return nil +} + +func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("rabbit_consume") + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) + } + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + var err error + j.consumeChan, err = j.conn.Channel() + if err != nil { + return errors.E(op, err) + } + + err = j.consumeChan.Qos(j.prefetch, 0, false) + if err != nil { + return errors.E(op, err) + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // run listener + j.listener(deliv) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *JobConsumer) Pause(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested pause on: ", p) + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + err := j.consumeChan.Cancel(j.consumeID, true) + if err != nil { + j.log.Error("cancel publish channel, forcing close", "error", err) + errCl := j.consumeChan.Close() + if errCl != nil { + j.log.Error("force close failed", "error", err) + return + } + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *JobConsumer) Resume(_ context.Context, p string) { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested resume on: ", p) + } + + // protect connection (redial) + j.Lock() + defer j.Unlock() + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("amqp listener already in the active state") + return + } + + var err error + j.consumeChan, err = j.conn.Channel() + if err != nil { + j.log.Error("create channel on rabbitmq connection", "error", err) + return + } + + err = j.consumeChan.Qos(j.prefetch, 0, false) + if err != nil { + j.log.Error("qos set failed", "error", err) + return + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + j.log.Error("consume operation failed", "error", err) + return + } + + // run listener + j.listener(deliv) + + // increase number of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *JobConsumer) Stop(context.Context) error { + j.stopCh <- struct{}{} + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} diff --git a/plugins/jobs/drivers/amqp/item.go b/plugins/jobs/drivers/amqp/item.go new file mode 100644 index 00000000..5990d137 --- /dev/null +++ b/plugins/jobs/drivers/amqp/item.go @@ -0,0 +1,228 @@ +package amqp + +import ( + "context" + "fmt" + "time" + + json "github.com/json-iterator/go" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // private + // Ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery + ack func(multiply bool) error + + // Nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server. + // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel. + // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue. + // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time + nack func(multiply bool, requeue bool) error + + // requeueFn used as a pointer to the push function + requeueFn func(context.Context, *Item) error + + multipleAsk bool + requeue bool +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the amqp, amqp.Table used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + return i.Options.ack(i.Options.multipleAsk) +} + +func (i *Item) Nack() error { + return i.Options.nack(false, i.Options.requeue) +} + +// Requeue with the provided delay, handled by the Nack +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + errNack := i.Options.nack(false, true) + if errNack != nil { + return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack) + } + + return err + } + + // ack the job + err = i.Options.ack(false) + if err != nil { + return err + } + + return nil +} + +// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ +func (j *JobConsumer) fromDelivery(d amqp.Delivery) (*Item, error) { + const op = errors.Op("from_delivery_convert") + item, err := j.unpack(d) + if err != nil { + return nil, errors.E(op, err) + } + + i := &Item{ + Job: item.Job, + Ident: item.Ident, + Payload: item.Payload, + Headers: item.Headers, + Options: item.Options, + } + + item.Options.ack = d.Ack + item.Options.nack = d.Nack + + // requeue func + item.Options.requeueFn = j.handleItem + return i, nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +// pack job metadata into headers +func pack(id string, j *Item) (amqp.Table, error) { + headers, err := json.Marshal(j.Headers) + if err != nil { + return nil, err + } + return amqp.Table{ + job.RRID: id, + job.RRJob: j.Job, + job.RRPipeline: j.Options.Pipeline, + job.RRHeaders: headers, + job.RRDelay: j.Options.Delay, + job.RRPriority: j.Options.Priority, + }, nil +} + +// unpack restores jobs.Options +func (j *JobConsumer) unpack(d amqp.Delivery) (*Item, error) { + item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ + multipleAsk: j.multipleAck, + requeue: j.requeueOnFail, + requeueFn: j.handleItem, + }} + + if _, ok := d.Headers[job.RRID].(string); !ok { + return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID)) + } + + item.Ident = d.Headers[job.RRID].(string) + + if _, ok := d.Headers[job.RRJob].(string); !ok { + return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob)) + } + + item.Job = d.Headers[job.RRJob].(string) + + if _, ok := d.Headers[job.RRPipeline].(string); ok { + item.Options.Pipeline = d.Headers[job.RRPipeline].(string) + } + + if h, ok := d.Headers[job.RRHeaders].([]byte); ok { + err := json.Unmarshal(h, &item.Headers) + if err != nil { + return nil, err + } + } + + if _, ok := d.Headers[job.RRDelay].(int64); ok { + item.Options.Delay = d.Headers[job.RRDelay].(int64) + } + + if _, ok := d.Headers[job.RRPriority]; !ok { + // set pipe's priority + item.Options.Priority = j.priority + } else { + item.Options.Priority = d.Headers[job.RRPriority].(int64) + } + + return item, nil +} diff --git a/plugins/jobs/drivers/amqp/listener.go b/plugins/jobs/drivers/amqp/listener.go new file mode 100644 index 00000000..0b1cd2dc --- /dev/null +++ b/plugins/jobs/drivers/amqp/listener.go @@ -0,0 +1,25 @@ +package amqp + +import amqp "github.com/rabbitmq/amqp091-go" + +func (j *JobConsumer) listener(deliv <-chan amqp.Delivery) { + go func() { + for { //nolint:gosimple + select { + case msg, ok := <-deliv: + if !ok { + j.log.Info("delivery channel closed, leaving the rabbit listener") + return + } + + d, err := j.fromDelivery(msg) + if err != nil { + j.log.Error("amqp delivery convert", "error", err) + continue + } + // insert job into the main priority queue + j.pq.Insert(d) + } + } + }() +} diff --git a/plugins/jobs/drivers/amqp/plugin.go b/plugins/jobs/drivers/amqp/plugin.go new file mode 100644 index 00000000..624f4405 --- /dev/null +++ b/plugins/jobs/drivers/amqp/plugin.go @@ -0,0 +1,40 @@ +package amqp + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + pluginName string = "amqp" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Name() string { + return pluginName +} + +func (p *Plugin) Available() {} + +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewAMQPConsumer(configKey, p.log, p.cfg, e, pq) +} + +// FromPipeline constructs AMQP driver from pipeline +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipe, p.log, p.cfg, e, pq) +} diff --git a/plugins/jobs/drivers/amqp/rabbit_init.go b/plugins/jobs/drivers/amqp/rabbit_init.go new file mode 100644 index 00000000..570498e9 --- /dev/null +++ b/plugins/jobs/drivers/amqp/rabbit_init.go @@ -0,0 +1,65 @@ +package amqp + +import ( + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" +) + +func (j *JobConsumer) initRabbitMQ() error { + const op = errors.Op("jobs_plugin_rmq_init") + // Channel opens a unique, concurrent server channel to process the bulk of AMQP + // messages. Any error from methods on this receiver will render the receiver + // invalid and a new Channel should be opened. + channel, err := j.conn.Channel() + if err != nil { + return errors.E(op, err) + } + + // declare an exchange (idempotent operation) + err = channel.ExchangeDeclare( + j.exchangeName, + j.exchangeType, + true, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // verify or declare a queue + q, err := channel.QueueDeclare( + j.queue, + false, + false, + j.exclusive, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // bind queue to the exchange + err = channel.QueueBind( + q.Name, + j.routingKey, + j.exchangeName, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + j.eh.Push(events.JobEvent{ + Event: events.EventInitialized, + Driver: "amqp", + Start: time.Now(), + }) + return channel.Close() +} diff --git a/plugins/jobs/drivers/amqp/redial.go b/plugins/jobs/drivers/amqp/redial.go new file mode 100644 index 00000000..8dc18b8f --- /dev/null +++ b/plugins/jobs/drivers/amqp/redial.go @@ -0,0 +1,141 @@ +package amqp + +import ( + "time" + + "github.com/cenkalti/backoff/v4" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" +) + +// redialer used to redial to the rabbitmq in case of the connection interrupts +func (j *JobConsumer) redialer() { //nolint:gocognit + go func() { + const op = errors.Op("rabbitmq_redial") + + for { + select { + case err := <-j.conn.NotifyClose(make(chan *amqp.Error)): + if err == nil { + return + } + + j.Lock() + + // trash the broken publishing channel + <-j.publishChan + + t := time.Now() + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeError, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Error: err, + Start: time.Now(), + }) + + expb := backoff.NewExponentialBackOff() + // set the retry timeout (minutes) + expb.MaxElapsedTime = j.retryTimeout + operation := func() error { + j.log.Warn("rabbitmq reconnecting, caused by", "error", err) + var dialErr error + j.conn, dialErr = amqp.Dial(j.connStr) + if dialErr != nil { + return errors.E(op, dialErr) + } + + j.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") + + // re-init connection + errInit := j.initRabbitMQ() + if errInit != nil { + j.log.Error("rabbitmq dial", "error", errInit) + return errInit + } + + // redeclare consume channel + var errConnCh error + j.consumeChan, errConnCh = j.conn.Channel() + if errConnCh != nil { + return errors.E(op, errConnCh) + } + + // redeclare publish channel + pch, errPubCh := j.conn.Channel() + if errPubCh != nil { + return errors.E(op, errPubCh) + } + + // start reading messages from the channel + deliv, err := j.consumeChan.Consume( + j.queue, + j.consumeID, + false, + false, + false, + false, + nil, + ) + if err != nil { + return errors.E(op, err) + } + + // put the fresh publishing channel + j.publishChan <- pch + // restart listener + j.listener(deliv) + + j.log.Info("queues and subscribers redeclared successfully") + + return nil + } + + retryErr := backoff.Retry(operation, expb) + if retryErr != nil { + j.Unlock() + j.log.Error("backoff failed", "error", retryErr) + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Start: t, + Elapsed: time.Since(t), + }) + + j.Unlock() + + case <-j.stopCh: + if j.publishChan != nil { + pch := <-j.publishChan + err := pch.Close() + if err != nil { + j.log.Error("publish channel close", "error", err) + } + } + + if j.consumeChan != nil { + err := j.consumeChan.Close() + if err != nil { + j.log.Error("consume channel close", "error", err) + } + } + if j.conn != nil { + err := j.conn.Close() + if err != nil { + j.log.Error("amqp connection close", "error", err) + } + } + + return + } + } + }() +} diff --git a/plugins/jobs/drivers/beanstalk/config.go b/plugins/jobs/drivers/beanstalk/config.go new file mode 100644 index 00000000..a8069f5d --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/config.go @@ -0,0 +1,53 @@ +package beanstalk + +import ( + "time" + + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + tubePriority string = "tube_priority" + tube string = "tube" + reserveTimeout string = "reserve_timeout" +) + +type GlobalCfg struct { + Addr string `mapstructure:"addr"` + Timeout time.Duration `mapstructure:"timeout"` +} + +func (c *GlobalCfg) InitDefault() { + if c.Addr == "" { + c.Addr = "tcp://127.0.0.1:11300" + } + + if c.Timeout == 0 { + c.Timeout = time.Second * 30 + } +} + +type Config struct { + PipePriority int64 `mapstructure:"priority"` + TubePriority *uint32 `mapstructure:"tube_priority"` + Tube string `mapstructure:"tube"` + ReserveTimeout time.Duration `mapstructure:"reserve_timeout"` +} + +func (c *Config) InitDefault() { + if c.Tube == "" { + c.Tube = "default" + } + + if c.ReserveTimeout == 0 { + c.ReserveTimeout = time.Second * 1 + } + + if c.TubePriority == nil { + c.TubePriority = utils.Uint32(0) + } + + if c.PipePriority == 0 { + c.PipePriority = 10 + } +} diff --git a/plugins/jobs/drivers/beanstalk/connection.go b/plugins/jobs/drivers/beanstalk/connection.go new file mode 100644 index 00000000..32ca4188 --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/connection.go @@ -0,0 +1,206 @@ +package beanstalk + +import ( + "context" + "net" + "sync" + "time" + + "github.com/beanstalkd/go-beanstalk" + "github.com/cenkalti/backoff/v4" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +type ConnPool struct { + sync.RWMutex + + log logger.Logger + + conn *beanstalk.Conn + connT *beanstalk.Conn + ts *beanstalk.TubeSet + t *beanstalk.Tube + + network string + address string + tName string + tout time.Duration +} + +func NewConnPool(network, address, tName string, tout time.Duration, log logger.Logger) (*ConnPool, error) { + connT, err := beanstalk.DialTimeout(network, address, tout) + if err != nil { + return nil, err + } + + connTS, err := beanstalk.DialTimeout(network, address, tout) + if err != nil { + return nil, err + } + + tube := beanstalk.NewTube(connT, tName) + ts := beanstalk.NewTubeSet(connTS, tName) + + return &ConnPool{ + log: log, + network: network, + address: address, + tName: tName, + tout: tout, + conn: connTS, + connT: connT, + ts: ts, + t: tube, + }, nil +} + +// Put the payload +// TODO use the context ?? +func (cp *ConnPool) Put(_ context.Context, body []byte, pri uint32, delay, ttr time.Duration) (uint64, error) { + cp.RLock() + defer cp.RUnlock() + + // TODO(rustatian): redial based on the token + id, err := cp.t.Put(body, pri, delay, ttr) + if err != nil { + // errN contains both, err and internal checkAndRedial error + errN := cp.checkAndRedial(err) + if errN != nil { + return 0, errN + } else { + // retry put only when we redialed + return cp.t.Put(body, pri, delay, ttr) + } + } + + return id, nil +} + +// Reserve reserves and returns a job from one of the tubes in t. If no +// job is available before time timeout has passed, Reserve returns a +// ConnError recording ErrTimeout. +// +// Typically, a client will reserve a job, perform some work, then delete +// the job with Conn.Delete. +func (cp *ConnPool) Reserve(reserveTimeout time.Duration) (uint64, []byte, error) { + cp.RLock() + defer cp.RUnlock() + + id, body, err := cp.ts.Reserve(reserveTimeout) + if err != nil { + // errN contains both, err and internal checkAndRedial error + errN := cp.checkAndRedial(err) + if errN != nil { + return 0, nil, errN + } else { + // retry Reserve only when we redialed + return cp.ts.Reserve(reserveTimeout) + } + } + + return id, body, nil +} + +func (cp *ConnPool) Delete(ctx context.Context, id uint64) error { + cp.RLock() + defer cp.RUnlock() + + err := cp.conn.Delete(id) + if err != nil { + // errN contains both, err and internal checkAndRedial error + errN := cp.checkAndRedial(err) + if errN != nil { + return errN + } else { + // retry Delete only when we redialed + return cp.conn.Delete(id) + } + } + return nil +} + +func (cp *ConnPool) redial() error { + const op = errors.Op("connection_pool_redial") + + cp.Lock() + // backoff here + expb := backoff.NewExponentialBackOff() + // TODO(rustatian) set via config + expb.MaxElapsedTime = time.Minute + + operation := func() error { + connT, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) + if err != nil { + return err + } + if connT == nil { + return errors.E(op, errors.Str("connectionT is nil")) + } + + connTS, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) + if err != nil { + return err + } + + if connTS == nil { + return errors.E(op, errors.Str("connectionTS is nil")) + } + + cp.t = beanstalk.NewTube(connT, cp.tName) + cp.ts = beanstalk.NewTubeSet(connTS, cp.tName) + cp.conn = connTS + cp.connT = connT + + cp.log.Info("beanstalk redial was successful") + return nil + } + + retryErr := backoff.Retry(operation, expb) + if retryErr != nil { + cp.Unlock() + return retryErr + } + cp.Unlock() + + return nil +} + +var connErrors = map[string]struct{}{"EOF": {}} + +func (cp *ConnPool) checkAndRedial(err error) error { + const op = errors.Op("connection_pool_check_redial") + switch et := err.(type) { //nolint:gocritic + // check if the error + case beanstalk.ConnError: + switch bErr := et.Err.(type) { + case *net.OpError: + cp.RUnlock() + errR := cp.redial() + cp.RLock() + // if redial failed - return + if errR != nil { + return errors.E(op, errors.Errorf("%v:%v", bErr, errR)) + } + + // if redial was successful -> continue listening + return nil + default: + if _, ok := connErrors[et.Err.Error()]; ok { + // if error is related to the broken connection - redial + cp.RUnlock() + errR := cp.redial() + cp.RLock() + // if redial failed - return + if errR != nil { + return errors.E(op, errors.Errorf("%v:%v", err, errR)) + } + // if redial was successful -> continue listening + return nil + } + } + } + + // return initial error + return err +} diff --git a/plugins/jobs/drivers/beanstalk/consumer.go b/plugins/jobs/drivers/beanstalk/consumer.go new file mode 100644 index 00000000..eaf99be1 --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/consumer.go @@ -0,0 +1,317 @@ +package beanstalk + +import ( + "bytes" + "context" + "strings" + "sync/atomic" + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/utils" +) + +type JobConsumer struct { + log logger.Logger + eh events.Handler + pq priorityqueue.Queue + + pipeline atomic.Value + listeners uint32 + + // beanstalk + pool *ConnPool + addr string + network string + reserveTimeout time.Duration + reconnectCh chan struct{} + tout time.Duration + // tube name + tName string + tubePriority *uint32 + priority int64 + + stopCh chan struct{} + requeueCh chan *Item +} + +func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_beanstalk_consumer") + + // PARSE CONFIGURATION ------- + var pipeCfg Config + var globalCfg GlobalCfg + + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) + } + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + dsn := strings.Split(globalCfg.Addr, "://") + if len(dsn) != 2 { + return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) + } + + cPool, err := NewConnPool(dsn[0], dsn[1], pipeCfg.Tube, globalCfg.Timeout, log) + if err != nil { + return nil, errors.E(op, err) + } + + // initialize job consumer + jc := &JobConsumer{ + pq: pq, + log: log, + eh: e, + pool: cPool, + network: dsn[0], + addr: dsn[1], + tout: globalCfg.Timeout, + tName: pipeCfg.Tube, + reserveTimeout: pipeCfg.ReserveTimeout, + tubePriority: pipeCfg.TubePriority, + priority: pipeCfg.PipePriority, + + // buffered with two because jobs root plugin can call Stop at the same time as Pause + stopCh: make(chan struct{}, 2), + requeueCh: make(chan *Item, 1000), + reconnectCh: make(chan struct{}, 2), + } + + return jc, nil +} + +func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_beanstalk_consumer") + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) + } + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // PARSE CONFIGURATION ------- + + dsn := strings.Split(globalCfg.Addr, "://") + if len(dsn) != 2 { + return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) + } + + cPool, err := NewConnPool(dsn[0], dsn[1], pipe.String(tube, "default"), globalCfg.Timeout, log) + if err != nil { + return nil, errors.E(op, err) + } + + // initialize job consumer + jc := &JobConsumer{ + pq: pq, + log: log, + eh: e, + pool: cPool, + network: dsn[0], + addr: dsn[1], + tout: globalCfg.Timeout, + tName: pipe.String(tube, "default"), + reserveTimeout: time.Second * time.Duration(pipe.Int(reserveTimeout, 5)), + tubePriority: utils.Uint32(uint32(pipe.Int(tubePriority, 1))), + priority: pipe.Priority(), + + // buffered with two because jobs root plugin can call Stop at the same time as Pause + stopCh: make(chan struct{}, 2), + requeueCh: make(chan *Item, 1000), + reconnectCh: make(chan struct{}, 2), + } + + return jc, nil +} +func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { + const op = errors.Op("beanstalk_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != jb.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) + } + + err := j.handleItem(ctx, fromJob(jb)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (j *JobConsumer) handleItem(ctx context.Context, item *Item) error { + const op = errors.Op("beanstalk_handle_item") + + bb := new(bytes.Buffer) + bb.Grow(64) + err := item.pack(bb) + if err != nil { + return errors.E(op, err) + } + + // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L458 + // <pri> is an integer < 2**32. Jobs with smaller priority values will be + // scheduled before jobs with larger priorities. The most urgent priority is 0; + // the least urgent priority is 4,294,967,295. + // + // <delay> is an integer number of seconds to wait before putting the job in + // the ready queue. The job will be in the "delayed" state during this time. + // Maximum delay is 2**32-1. + // + // <ttr> -- time to run -- is an integer number of seconds to allow a worker + // to run this job. This time is counted from the moment a worker reserves + // this job. If the worker does not delete, release, or bury the job within + // <ttr> seconds, the job will time out and the server will release the job. + // The minimum ttr is 1. If the client sends 0, the server will silently + // increase the ttr to 1. Maximum ttr is 2**32-1. + id, err := j.pool.Put(ctx, bb.Bytes(), *j.tubePriority, item.Options.DelayDuration(), j.tout) + if err != nil { + errD := j.pool.Delete(ctx, id) + if errD != nil { + return errors.E(op, errors.Errorf("%s:%s", err.Error(), errD.Error())) + } + return errors.E(op, err) + } + + return nil +} + +func (j *JobConsumer) Register(ctx context.Context, p *pipeline.Pipeline) error { + // register the pipeline + j.pipeline.Store(p) + return nil +} + +func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("beanstalk_run") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", p.Name(), pipe.Name())) + } + + atomic.AddUint32(&j.listeners, 1) + + go j.listen() + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *JobConsumer) Stop(context.Context) error { + pipe := j.pipeline.Load().(*pipeline.Pipeline) + + if atomic.LoadUint32(&j.listeners) == 1 { + j.stopCh <- struct{}{} + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *JobConsumer) Pause(ctx context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + j.stopCh <- struct{}{} + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *JobConsumer) Resume(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("sqs listener already in the active state") + return + } + + // start listener + go j.listen() + + // increase num of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} diff --git a/plugins/jobs/drivers/beanstalk/encode_test.go b/plugins/jobs/drivers/beanstalk/encode_test.go new file mode 100644 index 00000000..e43207eb --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/encode_test.go @@ -0,0 +1,75 @@ +package beanstalk + +import ( + "bytes" + "crypto/rand" + "encoding/gob" + "testing" + + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/utils" +) + +func BenchmarkEncodeGob(b *testing.B) { + tb := make([]byte, 1024*10) + _, err := rand.Read(tb) + if err != nil { + b.Fatal(err) + } + + item := &Item{ + Job: "/super/test/php/class/loooooong", + Ident: "12341234-asdfasdfa-1234234-asdfasdfas", + Payload: utils.AsString(tb), + Headers: map[string][]string{"Test": {"test1", "test2"}}, + Options: &Options{ + Priority: 10, + Pipeline: "test-local-pipe", + Delay: 10, + }, + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + bb := new(bytes.Buffer) + err := gob.NewEncoder(bb).Encode(item) + if err != nil { + b.Fatal(err) + } + _ = bb.Bytes() + bb.Reset() + } +} + +func BenchmarkEncodeJsonIter(b *testing.B) { + tb := make([]byte, 1024*10) + _, err := rand.Read(tb) + if err != nil { + b.Fatal(err) + } + + item := &Item{ + Job: "/super/test/php/class/loooooong", + Ident: "12341234-asdfasdfa-1234234-asdfasdfas", + Payload: utils.AsString(tb), + Headers: map[string][]string{"Test": {"test1", "test2"}}, + Options: &Options{ + Priority: 10, + Pipeline: "test-local-pipe", + Delay: 10, + }, + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + bb, err := json.Marshal(item) + if err != nil { + b.Fatal(err) + } + _ = bb + } +} diff --git a/plugins/jobs/drivers/beanstalk/item.go b/plugins/jobs/drivers/beanstalk/item.go new file mode 100644 index 00000000..f1d7ac76 --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/item.go @@ -0,0 +1,147 @@ +package beanstalk + +import ( + "bytes" + "context" + "encoding/gob" + "time" + + "github.com/beanstalkd/go-beanstalk" + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // Private ================ + id uint64 + conn *beanstalk.Conn + requeueFn func(context.Context, *Item) error +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the sqs, MessageAttributes used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + return i.Options.conn.Delete(i.Options.id) +} + +func (i *Item) Nack() error { + return i.Options.conn.Delete(i.Options.id) +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + // delete old job + err = i.Options.conn.Delete(i.Options.id) + if err != nil { + return err + } + + return nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +func (i *Item) pack(b *bytes.Buffer) error { + err := gob.NewEncoder(b).Encode(i) + if err != nil { + return err + } + + return nil +} + +func (j *JobConsumer) unpack(id uint64, data []byte, out *Item) error { + err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(out) + if err != nil { + return err + } + out.Options.conn = j.pool.conn + out.Options.id = id + out.Options.requeueFn = j.handleItem + + return nil +} diff --git a/plugins/jobs/drivers/beanstalk/listen.go b/plugins/jobs/drivers/beanstalk/listen.go new file mode 100644 index 00000000..f1385e70 --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/listen.go @@ -0,0 +1,39 @@ +package beanstalk + +import ( + "github.com/beanstalkd/go-beanstalk" +) + +func (j *JobConsumer) listen() { + for { + select { + case <-j.stopCh: + j.log.Warn("beanstalk listener stopped") + return + default: + id, body, err := j.pool.Reserve(j.reserveTimeout) + if err != nil { + if errB, ok := err.(beanstalk.ConnError); ok { + switch errB.Err { //nolint:gocritic + case beanstalk.ErrTimeout: + j.log.Info("beanstalk reserve timeout", "warn", errB.Op) + continue + } + } + // in case of other error - continue + j.log.Error("beanstalk reserve", "error", err) + continue + } + + item := &Item{} + err = j.unpack(id, body, item) + if err != nil { + j.log.Error("beanstalk unpack item", "error", err) + continue + } + + // insert job into the priority queue + j.pq.Insert(item) + } + } +} diff --git a/plugins/jobs/drivers/beanstalk/plugin.go b/plugins/jobs/drivers/beanstalk/plugin.go new file mode 100644 index 00000000..529d1474 --- /dev/null +++ b/plugins/jobs/drivers/beanstalk/plugin.go @@ -0,0 +1,47 @@ +package beanstalk + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + pluginName string = "beanstalk" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Serve() chan error { + return make(chan error) +} + +func (p *Plugin) Stop() error { + return nil +} + +func (p *Plugin) Name() string { + return pluginName +} + +func (p *Plugin) Available() {} + +func (p *Plugin) JobsConstruct(configKey string, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewBeanstalkConsumer(configKey, p.log, p.cfg, eh, pq) +} + +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipe, p.log, p.cfg, eh, pq) +} diff --git a/plugins/jobs/drivers/ephemeral/consumer.go b/plugins/jobs/drivers/ephemeral/consumer.go new file mode 100644 index 00000000..95ad6ecd --- /dev/null +++ b/plugins/jobs/drivers/ephemeral/consumer.go @@ -0,0 +1,244 @@ +package ephemeral + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + prefetch string = "prefetch" + goroutinesMax uint64 = 1000 +) + +type Config struct { + Prefetch uint64 `mapstructure:"prefetch"` +} + +type JobConsumer struct { + cfg *Config + log logger.Logger + eh events.Handler + pipeline sync.Map + pq priorityqueue.Queue + localPrefetch chan *Item + + // time.sleep goroutines max number + goroutines uint64 + + stopCh chan struct{} +} + +func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_ephemeral_pipeline") + + jb := &JobConsumer{ + log: log, + pq: pq, + eh: eh, + goroutines: 0, + stopCh: make(chan struct{}, 1), + } + + err := cfg.UnmarshalKey(configKey, &jb.cfg) + if err != nil { + return nil, errors.E(op, err) + } + + if jb.cfg.Prefetch == 0 { + jb.cfg.Prefetch = 100_000 + } + + // initialize a local queue + jb.localPrefetch = make(chan *Item, jb.cfg.Prefetch) + + // consume from the queue + go jb.consume() + + return jb, nil +} + +func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + jb := &JobConsumer{ + log: log, + pq: pq, + eh: eh, + goroutines: 0, + stopCh: make(chan struct{}, 1), + } + + // initialize a local queue + jb.localPrefetch = make(chan *Item, pipeline.Int(prefetch, 100_000)) + + // consume from the queue + go jb.consume() + + return jb, nil +} + +func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { + const op = errors.Op("ephemeral_push") + + // check if the pipeline registered + b, ok := j.pipeline.Load(jb.Options.Pipeline) + if !ok { + return errors.E(op, errors.Errorf("no such pipeline: %s", jb.Options.Pipeline)) + } + + if !b.(bool) { + return errors.E(op, errors.Errorf("pipeline disabled: %s", jb.Options.Pipeline)) + } + + err := j.handleItem(ctx, fromJob(jb)) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { + const op = errors.Op("ephemeral_handle_request") + // handle timeouts + // theoretically, some bad user may send millions requests with a delay and produce a billion (for example) + // goroutines here. We should limit goroutines here. + if msg.Options.Delay > 0 { + // if we have 1000 goroutines waiting on the delay - reject 1001 + if atomic.LoadUint64(&j.goroutines) >= goroutinesMax { + return errors.E(op, errors.Str("max concurrency number reached")) + } + + go func(jj *Item) { + atomic.AddUint64(&j.goroutines, 1) + time.Sleep(jj.Options.DelayDuration()) + + // send the item after timeout expired + j.localPrefetch <- jj + + atomic.AddUint64(&j.goroutines, ^uint64(0)) + }(msg) + + return nil + } + + // insert to the local, limited pipeline + select { + case j.localPrefetch <- msg: + return nil + case <-ctx.Done(): + return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", j.cfg.Prefetch, ctx.Err())) + } +} + +func (j *JobConsumer) consume() { + // redirect + for { + select { + case item, ok := <-j.localPrefetch: + if !ok { + j.log.Warn("ephemeral local prefetch queue was closed") + return + } + + // set requeue channel + item.Options.requeueFn = j.handleItem + + j.pq.Insert(item) + case <-j.stopCh: + return + } + } +} + +func (j *JobConsumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { + const op = errors.Op("ephemeral_register") + if _, ok := j.pipeline.Load(pipeline.Name()); ok { + return errors.E(op, errors.Errorf("queue %s has already been registered", pipeline)) + } + + j.pipeline.Store(pipeline.Name(), true) + + return nil +} + +func (j *JobConsumer) Pause(_ context.Context, pipeline string) { + if q, ok := j.pipeline.Load(pipeline); ok { + if q == true { + // mark pipeline as turned off + j.pipeline.Store(pipeline, false) + } + // if not true - do not send the EventPipeStopped, because pipe already stopped + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Pipeline: pipeline, + Start: time.Now(), + Elapsed: 0, + }) +} + +func (j *JobConsumer) Resume(_ context.Context, pipeline string) { + if q, ok := j.pipeline.Load(pipeline); ok { + if q == false { + // mark pipeline as turned on + j.pipeline.Store(pipeline, true) + } + + // if not true - do not send the EventPipeActive, because pipe already active + return + } + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Pipeline: pipeline, + Start: time.Now(), + Elapsed: 0, + }) +} + +// Run is no-op for the ephemeral +func (j *JobConsumer) Run(_ context.Context, pipe *pipeline.Pipeline) error { + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} + +func (j *JobConsumer) Stop(ctx context.Context) error { + const op = errors.Op("ephemeral_plugin_stop") + var pipe string + j.pipeline.Range(func(key, _ interface{}) bool { + pipe = key.(string) + j.pipeline.Delete(key) + return true + }) + + select { + // return from the consumer + case j.stopCh <- struct{}{}: + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Pipeline: pipe, + Start: time.Now(), + Elapsed: 0, + }) + return nil + + case <-ctx.Done(): + return errors.E(op, ctx.Err()) + } +} diff --git a/plugins/jobs/drivers/ephemeral/item.go b/plugins/jobs/drivers/ephemeral/item.go new file mode 100644 index 00000000..1a61d7e9 --- /dev/null +++ b/plugins/jobs/drivers/ephemeral/item.go @@ -0,0 +1,115 @@ +package ephemeral + +import ( + "context" + "time" + + json "github.com/json-iterator/go" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +type Item struct { + // Job contains name of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // private + requeueFn func(context.Context, *Item) error +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + // noop for the in-memory + return nil +} + +func (i *Item) Nack() error { + // noop for the in-memory + return nil +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + return nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} diff --git a/plugins/jobs/drivers/ephemeral/plugin.go b/plugins/jobs/drivers/ephemeral/plugin.go new file mode 100644 index 00000000..28495abb --- /dev/null +++ b/plugins/jobs/drivers/ephemeral/plugin.go @@ -0,0 +1,41 @@ +package ephemeral + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + PluginName string = "ephemeral" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Name() string { + return PluginName +} + +func (p *Plugin) Available() {} + +// JobsConstruct creates new ephemeral consumer from the configuration +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewJobBroker(configKey, p.log, p.cfg, e, pq) +} + +// FromPipeline creates new ephemeral consumer from the provided pipeline +func (p *Plugin) FromPipeline(pipeline *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipeline, p.log, e, pq) +} diff --git a/plugins/jobs/drivers/sqs/config.go b/plugins/jobs/drivers/sqs/config.go new file mode 100644 index 00000000..9b2a1ca8 --- /dev/null +++ b/plugins/jobs/drivers/sqs/config.go @@ -0,0 +1,114 @@ +package sqs + +import "github.com/aws/aws-sdk-go-v2/aws" + +const ( + attributes string = "attributes" + tags string = "tags" + queue string = "queue" + pref string = "prefetch" + visibility string = "visibility_timeout" + waitTime string = "wait_time" +) + +type GlobalCfg struct { + Key string `mapstructure:"key"` + Secret string `mapstructure:"secret"` + Region string `mapstructure:"region"` + SessionToken string `mapstructure:"session_token"` + Endpoint string `mapstructure:"endpoint"` +} + +// Config is used to parse pipeline configuration +type Config struct { + // The duration (in seconds) that the received messages are hidden from subsequent + // retrieve requests after being retrieved by a ReceiveMessage request. + VisibilityTimeout int32 `mapstructure:"visibility_timeout"` + // The duration (in seconds) for which the call waits for a message to arrive + // in the queue before returning. If a message is available, the call returns + // sooner than WaitTimeSeconds. If no messages are available and the wait time + // expires, the call returns successfully with an empty list of messages. + WaitTimeSeconds int32 `mapstructure:"wait_time_seconds"` + // Prefetch is the maximum number of messages to return. Amazon SQS never returns more messages + // than this value (however, fewer messages might be returned). Valid values: 1 to + // 10. Default: 1. + Prefetch int32 `mapstructure:"prefetch"` + // The name of the new queue. The following limits apply to this name: + // + // * A queue + // name can have up to 80 characters. + // + // * Valid values: alphanumeric characters, + // hyphens (-), and underscores (_). + // + // * A FIFO queue name must end with the .fifo + // suffix. + // + // Queue URLs and names are case-sensitive. + // + // This member is required. + Queue *string `mapstructure:"queue"` + + // A map of attributes with their corresponding values. The following lists the + // names, descriptions, and values of the special request parameters that the + // CreateQueue action uses. + // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html + Attributes map[string]string `mapstructure:"attributes"` + + // From amazon docs: + // Add cost allocation tags to the specified Amazon SQS queue. For an overview, see + // Tagging Your Amazon SQS Queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html) + // in the Amazon SQS Developer Guide. When you use queue tags, keep the following + // guidelines in mind: + // + // * Adding more than 50 tags to a queue isn't recommended. + // + // * + // Tags don't have any semantic meaning. Amazon SQS interprets tags as character + // strings. + // + // * Tags are case-sensitive. + // + // * A new tag with a key identical to that + // of an existing tag overwrites the existing tag. + // + // For a full list of tag + // restrictions, see Quotas related to queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues) + // in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you + // must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account + // permissions don't apply to this action. For more information, see Grant + // cross-account permissions to a role and a user name + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) + // in the Amazon SQS Developer Guide. + Tags map[string]string `mapstructure:"tags"` +} + +func (c *GlobalCfg) InitDefault() { + if c.Endpoint == "" { + c.Endpoint = "http://127.0.0.1:9324" + } +} + +func (c *Config) InitDefault() { + if c.Queue == nil { + c.Queue = aws.String("default") + } + + if c.Prefetch == 0 || c.Prefetch > 10 { + c.Prefetch = 10 + } + + if c.WaitTimeSeconds == 0 { + c.WaitTimeSeconds = 5 + } + + if c.Attributes == nil { + c.Attributes = make(map[string]string) + } + + if c.Tags == nil { + c.Tags = make(map[string]string) + } +} diff --git a/plugins/jobs/drivers/sqs/consumer.go b/plugins/jobs/drivers/sqs/consumer.go new file mode 100644 index 00000000..9ce37543 --- /dev/null +++ b/plugins/jobs/drivers/sqs/consumer.go @@ -0,0 +1,376 @@ +package sqs + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/google/uuid" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + cfgPlugin "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +type JobConsumer struct { + sync.Mutex + pq priorityqueue.Queue + log logger.Logger + eh events.Handler + pipeline atomic.Value + + // connection info + key string + secret string + sessionToken string + region string + endpoint string + queue *string + messageGroupID string + waitTime int32 + prefetch int32 + visibilityTimeout int32 + + // if user invoke several resume operations + listeners uint32 + + // queue optional parameters + attributes map[string]string + tags map[string]string + + client *sqs.Client + queueURL *string + + pauseCh chan struct{} +} + +func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_sqs_consumer") + + // if no such key - error + if !cfg.Has(configKey) { + return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) + } + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section")) + } + + // PARSE CONFIGURATION ------- + var pipeCfg Config + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(configKey, &pipeCfg) + if err != nil { + return nil, errors.E(op, err) + } + + pipeCfg.InitDefault() + + err = cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + // initialize job consumer + jb := &JobConsumer{ + pq: pq, + log: log, + eh: e, + messageGroupID: uuid.NewString(), + attributes: pipeCfg.Attributes, + tags: pipeCfg.Tags, + queue: pipeCfg.Queue, + prefetch: pipeCfg.Prefetch, + visibilityTimeout: pipeCfg.VisibilityTimeout, + waitTime: pipeCfg.WaitTimeSeconds, + region: globalCfg.Region, + key: globalCfg.Key, + sessionToken: globalCfg.SessionToken, + secret: globalCfg.Secret, + endpoint: globalCfg.Endpoint, + pauseCh: make(chan struct{}, 1), + } + + // PARSE CONFIGURATION ------- + + awsConf, err := config.LoadDefaultConfig(context.Background(), + config.WithRegion(globalCfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken))) + if err != nil { + return nil, errors.E(op, err) + } + + // config with retries + jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) { + o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) { + opts.MaxAttempts = 60 + }) + }) + + out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags}) + if err != nil { + return nil, errors.E(op, err) + } + + // assign a queue URL + jb.queueURL = out.QueueUrl + + // To successfully create a new queue, you must provide a + // queue name that adheres to the limits related to queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) + // and is unique within the scope of your queues. After you create a queue, you + // must wait at least one second after the queue is created to be able to use the <------------ + // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require + time.Sleep(time.Second * 2) + + return jb, nil +} + +func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*JobConsumer, error) { + const op = errors.Op("new_sqs_consumer") + + // if no global section + if !cfg.Has(pluginName) { + return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section")) + } + + // PARSE CONFIGURATION ------- + var globalCfg GlobalCfg + + err := cfg.UnmarshalKey(pluginName, &globalCfg) + if err != nil { + return nil, errors.E(op, err) + } + + globalCfg.InitDefault() + + res := make(map[string]interface{}) + pipe.Map(attributes, res) + + attr := make(map[string]string) + // accept only string values + for i := range res { + if v, ok := res[i].(string); ok { + attr[i] = v + } + } + + // delete all values with map.clear to reuse for the tags + for k := range res { + delete(res, k) + } + + pipe.Map(tags, res) + + tg := make(map[string]string) + // accept only string values + for i := range res { + if v, ok := res[i].(string); ok { + attr[i] = v + } + } + + // initialize job consumer + jb := &JobConsumer{ + pq: pq, + log: log, + eh: e, + messageGroupID: uuid.NewString(), + attributes: attr, + tags: tg, + queue: aws.String(pipe.String(queue, "default")), + prefetch: int32(pipe.Int(pref, 10)), + visibilityTimeout: int32(pipe.Int(visibility, 0)), + waitTime: int32(pipe.Int(waitTime, 0)), + region: globalCfg.Region, + key: globalCfg.Key, + sessionToken: globalCfg.SessionToken, + secret: globalCfg.Secret, + endpoint: globalCfg.Endpoint, + pauseCh: make(chan struct{}, 1), + } + + // PARSE CONFIGURATION ------- + + awsConf, err := config.LoadDefaultConfig(context.Background(), + config.WithRegion(globalCfg.Region), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken))) + if err != nil { + return nil, errors.E(op, err) + } + + // config with retries + jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) { + o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) { + opts.MaxAttempts = 60 + }) + }) + + out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags}) + if err != nil { + return nil, errors.E(op, err) + } + + // assign a queue URL + jb.queueURL = out.QueueUrl + + // To successfully create a new queue, you must provide a + // queue name that adheres to the limits related to queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) + // and is unique within the scope of your queues. After you create a queue, you + // must wait at least one second after the queue is created to be able to use the <------------ + // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require + time.Sleep(time.Second * 2) + + return jb, nil +} + +func (j *JobConsumer) Push(ctx context.Context, jb *job.Job) error { + const op = errors.Op("sqs_push") + // check if the pipeline registered + + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != jb.Options.Pipeline { + return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) + } + + // The length of time, in seconds, for which to delay a specific message. Valid + // values: 0 to 900. Maximum: 15 minutes. + if jb.Options.Delay > 900 { + return errors.E(op, errors.Errorf("unable to push, maximum possible delay is 900 seconds (15 minutes), provided: %d", jb.Options.Delay)) + } + + err := j.handleItem(ctx, fromJob(jb)) + if err != nil { + return errors.E(op, err) + } + return nil +} + +func (j *JobConsumer) handleItem(ctx context.Context, msg *Item) error { + d, err := msg.pack(j.queueURL) + if err != nil { + return err + } + _, err = j.client.SendMessage(ctx, d) + if err != nil { + return err + } + + return nil +} + +func (j *JobConsumer) Register(_ context.Context, p *pipeline.Pipeline) error { + j.pipeline.Store(p) + return nil +} + +func (j *JobConsumer) Run(_ context.Context, p *pipeline.Pipeline) error { + const op = errors.Op("rabbit_consume") + + j.Lock() + defer j.Unlock() + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p.Name() { + return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) + } + + atomic.AddUint32(&j.listeners, 1) + + // start listener + go j.listen(context.Background()) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + + return nil +} + +func (j *JobConsumer) Stop(context.Context) error { + j.pauseCh <- struct{}{} + + pipe := j.pipeline.Load().(*pipeline.Pipeline) + j.eh.Push(events.JobEvent{ + Event: events.EventPipeStopped, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) + return nil +} + +func (j *JobConsumer) Pause(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 0 { + j.log.Warn("no active listeners, nothing to pause") + return + } + + atomic.AddUint32(&j.listeners, ^uint32(0)) + + // stop consume + j.pauseCh <- struct{}{} + + j.eh.Push(events.JobEvent{ + Event: events.EventPipePaused, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} + +func (j *JobConsumer) Resume(_ context.Context, p string) { + // load atomic value + pipe := j.pipeline.Load().(*pipeline.Pipeline) + if pipe.Name() != p { + j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) + return + } + + l := atomic.LoadUint32(&j.listeners) + // no active listeners + if l == 1 { + j.log.Warn("sqs listener already in the active state") + return + } + + // start listener + go j.listen(context.Background()) + + // increase num of listeners + atomic.AddUint32(&j.listeners, 1) + + j.eh.Push(events.JobEvent{ + Event: events.EventPipeActive, + Driver: pipe.Driver(), + Pipeline: pipe.Name(), + Start: time.Now(), + }) +} diff --git a/plugins/jobs/drivers/sqs/item.go b/plugins/jobs/drivers/sqs/item.go new file mode 100644 index 00000000..df72b2e5 --- /dev/null +++ b/plugins/jobs/drivers/sqs/item.go @@ -0,0 +1,247 @@ +package sqs + +import ( + "context" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + json "github.com/json-iterator/go" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/utils" +) + +const ( + StringType string = "String" + NumberType string = "Number" + BinaryType string = "Binary" + ApproximateReceiveCount string = "ApproximateReceiveCount" +) + +var itemAttributes = []string{ + job.RRJob, + job.RRDelay, + job.RRPriority, + job.RRHeaders, +} + +type Item struct { + // Job contains pluginName of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-values pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` + + // Private ================ + approxReceiveCount int64 + queue *string + receiptHandler *string + client *sqs.Client + requeueFn func(context.Context, *Item) error +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} + +func (i *Item) ID() string { + return i.Ident +} + +func (i *Item) Priority() int64 { + return i.Options.Priority +} + +// Body packs job payload into binary payload. +func (i *Item) Body() []byte { + return utils.AsBytes(i.Payload) +} + +// Context packs job context (job, id) into binary payload. +// Not used in the sqs, MessageAttributes used instead +func (i *Item) Context() ([]byte, error) { + ctx, err := json.Marshal( + struct { + ID string `json:"id"` + Job string `json:"job"` + Headers map[string][]string `json:"headers"` + Pipeline string `json:"pipeline"` + }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, + ) + + if err != nil { + return nil, err + } + + return ctx, nil +} + +func (i *Item) Ack() error { + _, err := i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: i.Options.queue, + ReceiptHandle: i.Options.receiptHandler, + }) + + if err != nil { + return err + } + + return nil +} + +func (i *Item) Nack() error { + // requeue message + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: i.Options.queue, + ReceiptHandle: i.Options.receiptHandler, + }) + + if err != nil { + return err + } + + return nil +} + +func (i *Item) Requeue(headers map[string][]string, delay int64) error { + // overwrite the delay + i.Options.Delay = delay + i.Headers = headers + + // requeue message + err := i.Options.requeueFn(context.Background(), i) + if err != nil { + return err + } + + // Delete job from the queue only after successful requeue + _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: i.Options.queue, + ReceiptHandle: i.Options.receiptHandler, + }) + + if err != nil { + return err + } + + return nil +} + +func fromJob(job *job.Job) *Item { + return &Item{ + Job: job.Job, + Ident: job.Ident, + Payload: job.Payload, + Headers: job.Headers, + Options: &Options{ + Priority: job.Options.Priority, + Pipeline: job.Options.Pipeline, + Delay: job.Options.Delay, + }, + } +} + +func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) { + // pack headers map + data, err := json.Marshal(i.Headers) + if err != nil { + return nil, err + } + + return &sqs.SendMessageInput{ + MessageBody: aws.String(i.Payload), + QueueUrl: queue, + DelaySeconds: int32(i.Options.Delay), + MessageAttributes: map[string]types.MessageAttributeValue{ + job.RRJob: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Job)}, + job.RRDelay: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Delay)))}, + job.RRHeaders: {DataType: aws.String(BinaryType), BinaryValue: data, BinaryListValues: nil, StringListValues: nil, StringValue: nil}, + job.RRPriority: {DataType: aws.String(NumberType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Priority)))}, + }, + }, nil +} + +func (j *JobConsumer) unpack(msg *types.Message) (*Item, error) { + const op = errors.Op("sqs_unpack") + // reserved + if _, ok := msg.Attributes[ApproximateReceiveCount]; !ok { + return nil, errors.E(op, errors.Str("failed to unpack the ApproximateReceiveCount attribute")) + } + + for i := 0; i < len(itemAttributes); i++ { + if _, ok := msg.MessageAttributes[itemAttributes[i]]; !ok { + return nil, errors.E(op, errors.Errorf("missing queue attribute: %s", itemAttributes[i])) + } + } + + var h map[string][]string + err := json.Unmarshal(msg.MessageAttributes[job.RRHeaders].BinaryValue, &h) + if err != nil { + return nil, err + } + + delay, err := strconv.Atoi(*msg.MessageAttributes[job.RRDelay].StringValue) + if err != nil { + return nil, errors.E(op, err) + } + + priority, err := strconv.Atoi(*msg.MessageAttributes[job.RRPriority].StringValue) + if err != nil { + return nil, errors.E(op, err) + } + + recCount, err := strconv.Atoi(msg.Attributes[ApproximateReceiveCount]) + if err != nil { + return nil, errors.E(op, err) + } + + item := &Item{ + Job: *msg.MessageAttributes[job.RRJob].StringValue, + Payload: *msg.Body, + Headers: h, + Options: &Options{ + Delay: int64(delay), + Priority: int64(priority), + + // private + approxReceiveCount: int64(recCount), + client: j.client, + queue: j.queueURL, + receiptHandler: msg.ReceiptHandle, + requeueFn: j.handleItem, + }, + } + + return item, nil +} diff --git a/plugins/jobs/drivers/sqs/listener.go b/plugins/jobs/drivers/sqs/listener.go new file mode 100644 index 00000000..9efef90d --- /dev/null +++ b/plugins/jobs/drivers/sqs/listener.go @@ -0,0 +1,87 @@ +package sqs + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/aws/smithy-go" +) + +const ( + // All - get all message attribute names + All string = "All" + + // NonExistentQueue AWS error code + NonExistentQueue string = "AWS.SimpleQueueService.NonExistentQueue" +) + +func (j *JobConsumer) listen(ctx context.Context) { //nolint:gocognit + for { + select { + case <-j.pauseCh: + j.log.Warn("sqs listener stopped") + return + default: + message, err := j.client.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ + QueueUrl: j.queueURL, + MaxNumberOfMessages: j.prefetch, + AttributeNames: []types.QueueAttributeName{types.QueueAttributeName(ApproximateReceiveCount)}, + MessageAttributeNames: []string{All}, + // The new value for the message's visibility timeout (in seconds). Values range: 0 + // to 43200. Maximum: 12 hours. + VisibilityTimeout: j.visibilityTimeout, + WaitTimeSeconds: j.waitTime, + }) + + if err != nil { + if oErr, ok := (err).(*smithy.OperationError); ok { + if rErr, ok := oErr.Err.(*http.ResponseError); ok { + if apiErr, ok := rErr.Err.(*smithy.GenericAPIError); ok { + // in case of NonExistentQueue - recreate the queue + if apiErr.Code == NonExistentQueue { + j.log.Error("receive message", "error code", apiErr.ErrorCode(), "message", apiErr.ErrorMessage(), "error fault", apiErr.ErrorFault()) + _, err = j.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: j.queue, Attributes: j.attributes, Tags: j.tags}) + if err != nil { + j.log.Error("create queue", "error", err) + } + // To successfully create a new queue, you must provide a + // queue name that adheres to the limits related to the queues + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) + // and is unique within the scope of your queues. After you create a queue, you + // must wait at least one second after the queue is created to be able to use the <------------ + // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require + time.Sleep(time.Second * 2) + continue + } + } + } + } + + j.log.Error("receive message", "error", err) + continue + } + + for i := 0; i < len(message.Messages); i++ { + m := message.Messages[i] + item, err := j.unpack(&m) + if err != nil { + _, errD := j.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{ + QueueUrl: j.queueURL, + ReceiptHandle: m.ReceiptHandle, + }) + if errD != nil { + j.log.Error("message unpack, failed to delete the message from the queue", "error", err) + } + + j.log.Error("message unpack", "error", err) + continue + } + + j.pq.Insert(item) + } + } + } +} diff --git a/plugins/jobs/drivers/sqs/plugin.go b/plugins/jobs/drivers/sqs/plugin.go new file mode 100644 index 00000000..54f61ff5 --- /dev/null +++ b/plugins/jobs/drivers/sqs/plugin.go @@ -0,0 +1,39 @@ +package sqs + +import ( + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +const ( + pluginName string = "sqs" +) + +type Plugin struct { + log logger.Logger + cfg config.Configurer +} + +func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { + p.log = log + p.cfg = cfg + return nil +} + +func (p *Plugin) Available() {} + +func (p *Plugin) Name() string { + return pluginName +} + +func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return NewSQSConsumer(configKey, p.log, p.cfg, e, pq) +} + +func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { + return FromPipeline(pipe, p.log, p.cfg, e, pq) +} diff --git a/plugins/jobs/job/general.go b/plugins/jobs/job/general.go new file mode 100644 index 00000000..390f44b5 --- /dev/null +++ b/plugins/jobs/job/general.go @@ -0,0 +1,29 @@ +package job + +// constant keys to pack/unpack messages from different drivers +const ( + RRID string = "rr_id" + RRJob string = "rr_job" + RRHeaders string = "rr_headers" + RRPipeline string = "rr_pipeline" + RRDelay string = "rr_delay" + RRPriority string = "rr_priority" +) + +// Job carries information about single job. +type Job struct { + // Job contains name of job broker (usually PHP class). + Job string `json:"job"` + + // Ident is unique identifier of the job, should be provided from outside + Ident string `json:"id"` + + // Payload is string data (usually JSON) passed to Job broker. + Payload string `json:"payload"` + + // Headers with key-value pairs + Headers map[string][]string `json:"headers"` + + // Options contains set of PipelineOptions specific to job execution. Can be empty. + Options *Options `json:"options,omitempty"` +} diff --git a/plugins/jobs/job/job_options.go b/plugins/jobs/job/job_options.go new file mode 100644 index 00000000..b7e4ed36 --- /dev/null +++ b/plugins/jobs/job/job_options.go @@ -0,0 +1,32 @@ +package job + +import "time" + +// Options carry information about how to handle given job. +type Options struct { + // Priority is job priority, default - 10 + // pointer to distinguish 0 as a priority and nil as priority not set + Priority int64 `json:"priority"` + + // Pipeline manually specified pipeline. + Pipeline string `json:"pipeline,omitempty"` + + // Delay defines time duration to delay execution for. Defaults to none. + Delay int64 `json:"delay,omitempty"` +} + +// Merge merges job options. +func (o *Options) Merge(from *Options) { + if o.Pipeline == "" { + o.Pipeline = from.Pipeline + } + + if o.Delay == 0 { + o.Delay = from.Delay + } +} + +// DelayDuration returns delay duration in a form of time.Duration. +func (o *Options) DelayDuration() time.Duration { + return time.Second * time.Duration(o.Delay) +} diff --git a/plugins/jobs/job/job_options_test.go b/plugins/jobs/job/job_options_test.go new file mode 100644 index 00000000..a47151a3 --- /dev/null +++ b/plugins/jobs/job/job_options_test.go @@ -0,0 +1,45 @@ +package job + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestOptions_DelayDuration(t *testing.T) { + opts := &Options{Delay: 0} + assert.Equal(t, time.Duration(0), opts.DelayDuration()) +} + +func TestOptions_DelayDuration2(t *testing.T) { + opts := &Options{Delay: 1} + assert.Equal(t, time.Second, opts.DelayDuration()) +} + +func TestOptions_Merge(t *testing.T) { + opts := &Options{} + + opts.Merge(&Options{ + Pipeline: "pipeline", + Delay: 2, + }) + + assert.Equal(t, "pipeline", opts.Pipeline) + assert.Equal(t, int64(2), opts.Delay) +} + +func TestOptions_MergeKeepOriginal(t *testing.T) { + opts := &Options{ + Pipeline: "default", + Delay: 10, + } + + opts.Merge(&Options{ + Pipeline: "pipeline", + Delay: 2, + }) + + assert.Equal(t, "default", opts.Pipeline) + assert.Equal(t, int64(10), opts.Delay) +} diff --git a/plugins/jobs/pipeline/pipeline.go b/plugins/jobs/pipeline/pipeline.go new file mode 100644 index 00000000..2f4671d3 --- /dev/null +++ b/plugins/jobs/pipeline/pipeline.go @@ -0,0 +1,90 @@ +package pipeline + +// Pipeline defines pipeline options. +type Pipeline map[string]interface{} + +const ( + priority string = "priority" + driver string = "driver" + name string = "name" +) + +// With pipeline value +func (p *Pipeline) With(name string, value interface{}) { + (*p)[name] = value +} + +// Name returns pipeline name. +func (p Pipeline) Name() string { + return p.String(name, "") +} + +// Driver associated with the pipeline. +func (p Pipeline) Driver() string { + return p.String(driver, "") +} + +// Has checks if value presented in pipeline. +func (p Pipeline) Has(name string) bool { + if _, ok := p[name]; ok { + return true + } + + return false +} + +// String must return option value as string or return default value. +func (p Pipeline) String(name string, d string) string { + if value, ok := p[name]; ok { + if str, ok := value.(string); ok { + return str + } + } + + return d +} + +// Int must return option value as string or return default value. +func (p Pipeline) Int(name string, d int) int { + if value, ok := p[name]; ok { + if i, ok := value.(int); ok { + return i + } + } + + return d +} + +// Bool must return option value as bool or return default value. +func (p Pipeline) Bool(name string, d bool) bool { + if value, ok := p[name]; ok { + if i, ok := value.(bool); ok { + return i + } + } + + return d +} + +// Map must return nested map value or empty config. +// Here might be sqs attributes or tags for example +func (p Pipeline) Map(name string, out map[string]interface{}) { + if value, ok := p[name]; ok { + if m, ok := value.(map[string]interface{}); ok { + for k, v := range m { + out[k] = v + } + } + } +} + +// Priority returns default pipeline priority +func (p Pipeline) Priority() int64 { + if value, ok := p[priority]; ok { + if v, ok := value.(int64); ok { + return v + } + } + + return 10 +} diff --git a/plugins/jobs/pipeline/pipeline_test.go b/plugins/jobs/pipeline/pipeline_test.go new file mode 100644 index 00000000..4482c70d --- /dev/null +++ b/plugins/jobs/pipeline/pipeline_test.go @@ -0,0 +1,21 @@ +package pipeline + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPipeline_String(t *testing.T) { + pipe := Pipeline{"value": "value"} + + assert.Equal(t, "value", pipe.String("value", "")) + assert.Equal(t, "value", pipe.String("other", "value")) +} + +func TestPipeline_Has(t *testing.T) { + pipe := Pipeline{"options": map[string]interface{}{"ttl": 10}} + + assert.Equal(t, true, pipe.Has("options")) + assert.Equal(t, false, pipe.Has("other")) +} diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go new file mode 100644 index 00000000..26015516 --- /dev/null +++ b/plugins/jobs/plugin.go @@ -0,0 +1,573 @@ +package jobs + +import ( + "context" + "fmt" + "sync" + "time" + + endure "github.com/spiral/endure/pkg/container" + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/jobs" + "github.com/spiral/roadrunner/v2/pkg/events" + "github.com/spiral/roadrunner/v2/pkg/payload" + "github.com/spiral/roadrunner/v2/pkg/pool" + priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/plugins/server" +) + +const ( + // RrMode env variable + RrMode string = "RR_MODE" + RrModeJobs string = "jobs" + + PluginName string = "jobs" + pipelines string = "pipelines" +) + +type Plugin struct { + sync.RWMutex + + // Jobs plugin configuration + cfg *Config `structure:"jobs"` + log logger.Logger + workersPool pool.Pool + server server.Server + + jobConstructors map[string]jobs.Constructor + consumers map[string]jobs.Consumer + + // events handler + events events.Handler + + // priority queue implementation + queue priorityqueue.Queue + + // parent config for broken options. keys are pipelines names, values - pointers to the associated pipeline + pipelines sync.Map + + // initial set of the pipelines to consume + consume map[string]struct{} + + // signal channel to stop the pollers + stopCh chan struct{} + + // internal payloads pool + pldPool sync.Pool +} + +func (p *Plugin) Init(cfg config.Configurer, log logger.Logger, server server.Server) error { + const op = errors.Op("jobs_plugin_init") + if !cfg.Has(PluginName) { + return errors.E(op, errors.Disabled) + } + + err := cfg.UnmarshalKey(PluginName, &p.cfg) + if err != nil { + return errors.E(op, err) + } + + p.cfg.InitDefaults() + + p.server = server + + p.events = events.NewEventsHandler() + p.events.AddListener(p.collectJobsEvents) + + p.jobConstructors = make(map[string]jobs.Constructor) + p.consumers = make(map[string]jobs.Consumer) + p.consume = make(map[string]struct{}) + p.stopCh = make(chan struct{}, 1) + + p.pldPool = sync.Pool{New: func() interface{} { + // with nil fields + return &payload.Payload{} + }} + + // initial set of pipelines + for i := range p.cfg.Pipelines { + p.pipelines.Store(i, p.cfg.Pipelines[i]) + } + + if len(p.cfg.Consume) > 0 { + for i := 0; i < len(p.cfg.Consume); i++ { + p.consume[p.cfg.Consume[i]] = struct{}{} + } + } + + // initialize priority queue + p.queue = priorityqueue.NewBinHeap(p.cfg.PipelineSize) + p.log = log + + return nil +} + +func (p *Plugin) Serve() chan error { //nolint:gocognit + errCh := make(chan error, 1) + const op = errors.Op("jobs_plugin_serve") + + // register initial pipelines + p.pipelines.Range(func(key, value interface{}) bool { + t := time.Now() + // pipeline name (ie test-local, sqs-aws, etc) + name := key.(string) + + // pipeline associated with the name + pipe := value.(*pipeline.Pipeline) + // driver for the pipeline (ie amqp, ephemeral, etc) + dr := pipe.Driver() + + // jobConstructors contains constructors for the drivers + // we need here to initialize these drivers for the pipelines + if c, ok := p.jobConstructors[dr]; ok { + // config key for the particular sub-driver jobs.pipelines.test-local + configKey := fmt.Sprintf("%s.%s.%s", PluginName, pipelines, name) + + // init the driver + initializedDriver, err := c.JobsConstruct(configKey, p.events, p.queue) + if err != nil { + errCh <- errors.E(op, err) + return false + } + + // add driver to the set of the consumers (name - pipeline name, value - associated driver) + p.consumers[name] = initializedDriver + + // register pipeline for the initialized driver + err = initializedDriver.Register(context.Background(), pipe) + if err != nil { + errCh <- errors.E(op, errors.Errorf("pipe register failed for the driver: %s with pipe name: %s", pipe.Driver(), pipe.Name())) + return false + } + + // if pipeline initialized to be consumed, call Run on it + if _, ok := p.consume[name]; ok { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + defer cancel() + err = initializedDriver.Run(ctx, pipe) + if err != nil { + errCh <- errors.E(op, err) + return false + } + return true + } + + return true + } + + p.events.Push(events.JobEvent{ + Event: events.EventDriverReady, + Pipeline: pipe.Name(), + Driver: pipe.Driver(), + Start: t, + Elapsed: t.Sub(t), + }) + + return true + }) + + var err error + p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: "jobs"}) + if err != nil { + errCh <- err + return errCh + } + + // start listening + go func() { + for i := uint8(0); i < p.cfg.NumPollers; i++ { + go func() { + for { + select { + case <-p.stopCh: + p.log.Info("------> job poller stopped <------") + return + default: + // get prioritized JOB from the queue + jb := p.queue.ExtractMin() + + // parse the context + // for each job, context contains: + /* + 1. Job class + 2. Job ID provided from the outside + 3. Job Headers map[string][]string + 4. Timeout in seconds + 5. Pipeline name + */ + + ctx, err := jb.Context() + if err != nil { + errNack := jb.Nack() + if errNack != nil { + p.log.Error("negatively acknowledge failed", "error", errNack) + } + p.log.Error("job marshal context", "error", err) + continue + } + + // get payload from the sync.Pool + exec := p.getPayload(jb.Body(), ctx) + + // protect from the pool reset + p.RLock() + resp, err := p.workersPool.Exec(exec) + p.RUnlock() + if err != nil { + // RR protocol level error, Nack the job + errNack := jb.Nack() + if errNack != nil { + p.log.Error("negatively acknowledge failed", "error", errNack) + } + + p.log.Error("job execute failed", "error", err) + + p.putPayload(exec) + continue + } + + // if response is nil or body is nil, just acknowledge the job + if resp == nil || resp.Body == nil { + p.putPayload(exec) + err = jb.Ack() + if err != nil { + p.log.Error("acknowledge error, job might be missed", "error", err) + continue + } + } + + // handle the response protocol + err = handleResponse(resp.Body, jb, p.log) + if err != nil { + p.putPayload(exec) + errNack := jb.Nack() + if errNack != nil { + p.log.Error("negatively acknowledge failed, job might be lost", "root error", err, "error nack", errNack) + continue + } + + p.log.Error("job negatively acknowledged", "error", err) + continue + } + + // return payload + p.putPayload(exec) + } + } + }() + } + }() + + return errCh +} + +func (p *Plugin) Stop() error { + for k, v := range p.consumers { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + err := v.Stop(ctx) + if err != nil { + cancel() + p.log.Error("stop job driver", "driver", k) + continue + } + cancel() + } + + // this function can block forever, but we don't care, because we might have a chance to exit from the pollers, + // but if not, this is not a problem at all. + // The main target is to stop the drivers + go func() { + for i := uint8(0); i < p.cfg.NumPollers; i++ { + // stop jobs plugin pollers + p.stopCh <- struct{}{} + } + }() + + // just wait pollers for 5 seconds before exit + time.Sleep(time.Second * 5) + + return nil +} + +func (p *Plugin) Collects() []interface{} { + return []interface{}{ + p.CollectMQBrokers, + } +} + +func (p *Plugin) CollectMQBrokers(name endure.Named, c jobs.Constructor) { + p.jobConstructors[name.Name()] = c +} + +func (p *Plugin) Available() {} + +func (p *Plugin) Name() string { + return PluginName +} + +func (p *Plugin) Reset() error { + p.Lock() + defer p.Unlock() + + const op = errors.Op("jobs_plugin_reset") + p.log.Info("JOBS plugin received restart request. Restarting...") + p.workersPool.Destroy(context.Background()) + p.workersPool = nil + + var err error + p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: RrModeJobs}, p.collectJobsEvents) + if err != nil { + return errors.E(op, err) + } + + p.log.Info("JOBS workers pool successfully restarted") + + return nil +} + +func (p *Plugin) Push(j *job.Job) error { + const op = errors.Op("jobs_plugin_push") + + // get the pipeline for the job + pipe, ok := p.pipelines.Load(j.Options.Pipeline) + if !ok { + return errors.E(op, errors.Errorf("no such pipeline, requested: %s", j.Options.Pipeline)) + } + + // type conversion + ppl := pipe.(*pipeline.Pipeline) + + d, ok := p.consumers[ppl.Name()] + if !ok { + return errors.E(op, errors.Errorf("consumer not registered for the requested driver: %s", ppl.Driver())) + } + + // if job has no priority, inherit it from the pipeline + // TODO(rustatian) merge all options, not only priority + if j.Options.Priority == 0 { + j.Options.Priority = ppl.Priority() + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + defer cancel() + + err := d.Push(ctx, j) + if err != nil { + cancel() + return errors.E(op, err) + } + + cancel() + + return nil +} + +func (p *Plugin) PushBatch(j []*job.Job) error { + const op = errors.Op("jobs_plugin_push") + + for i := 0; i < len(j); i++ { + // get the pipeline for the job + pipe, ok := p.pipelines.Load(j[i].Options.Pipeline) + if !ok { + return errors.E(op, errors.Errorf("no such pipeline, requested: %s", j[i].Options.Pipeline)) + } + + ppl := pipe.(*pipeline.Pipeline) + + d, ok := p.consumers[ppl.Name()] + if !ok { + return errors.E(op, errors.Errorf("consumer not registered for the requested driver: %s", ppl.Driver())) + } + + // if job has no priority, inherit it from the pipeline + if j[i].Options.Priority == 0 { + j[i].Options.Priority = ppl.Priority() + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + err := d.Push(ctx, j[i]) + if err != nil { + cancel() + return errors.E(op, err) + } + + cancel() + } + + return nil +} + +func (p *Plugin) Pause(pp string) { + pipe, ok := p.pipelines.Load(pp) + + if !ok { + p.log.Error("no such pipeline", "requested", pp) + } + + ppl := pipe.(*pipeline.Pipeline) + + d, ok := p.consumers[ppl.Name()] + if !ok { + p.log.Warn("driver for the pipeline not found", "pipeline", pp) + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + defer cancel() + // redirect call to the underlying driver + d.Pause(ctx, ppl.Name()) +} + +func (p *Plugin) Resume(pp string) { + pipe, ok := p.pipelines.Load(pp) + if !ok { + p.log.Error("no such pipeline", "requested", pp) + } + + ppl := pipe.(*pipeline.Pipeline) + + d, ok := p.consumers[ppl.Name()] + if !ok { + p.log.Warn("driver for the pipeline not found", "pipeline", pp) + return + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + defer cancel() + // redirect call to the underlying driver + d.Resume(ctx, ppl.Name()) +} + +// Declare a pipeline. +func (p *Plugin) Declare(pipeline *pipeline.Pipeline) error { + const op = errors.Op("jobs_plugin_declare") + // driver for the pipeline (ie amqp, ephemeral, etc) + dr := pipeline.Driver() + if dr == "" { + return errors.E(op, errors.Errorf("no associated driver with the pipeline, pipeline name: %s", pipeline.Name())) + } + + // jobConstructors contains constructors for the drivers + // we need here to initialize these drivers for the pipelines + if c, ok := p.jobConstructors[dr]; ok { + // init the driver from pipeline + initializedDriver, err := c.FromPipeline(pipeline, p.events, p.queue) + if err != nil { + return errors.E(op, err) + } + + // add driver to the set of the consumers (name - pipeline name, value - associated driver) + p.consumers[pipeline.Name()] = initializedDriver + + // register pipeline for the initialized driver + err = initializedDriver.Register(context.Background(), pipeline) + if err != nil { + return errors.E(op, errors.Errorf("pipe register failed for the driver: %s with pipe name: %s", pipeline.Driver(), pipeline.Name())) + } + + // if pipeline initialized to be consumed, call Run on it + // but likely for the dynamic pipelines it should be started manually + if _, ok := p.consume[pipeline.Name()]; ok { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + defer cancel() + err = initializedDriver.Run(ctx, pipeline) + if err != nil { + return errors.E(op, err) + } + } + } + + // save the pipeline + p.pipelines.Store(pipeline.Name(), pipeline) + + return nil +} + +// Destroy pipeline and release all associated resources. +func (p *Plugin) Destroy(pp string) error { + const op = errors.Op("jobs_plugin_destroy") + pipe, ok := p.pipelines.Load(pp) + if !ok { + return errors.E(op, errors.Errorf("no such pipeline, requested: %s", pp)) + } + + // type conversion + ppl := pipe.(*pipeline.Pipeline) + + d, ok := p.consumers[ppl.Name()] + if !ok { + return errors.E(op, errors.Errorf("consumer not registered for the requested driver: %s", ppl.Driver())) + } + + // delete consumer + delete(p.consumers, ppl.Name()) + p.pipelines.Delete(pp) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout)) + defer cancel() + + return d.Stop(ctx) +} + +func (p *Plugin) List() []string { + out := make([]string, 0, 10) + + p.pipelines.Range(func(key, _ interface{}) bool { + // we can safely convert value here as we know that we store keys as strings + out = append(out, key.(string)) + return true + }) + + return out +} + +func (p *Plugin) RPC() interface{} { + return &rpc{ + log: p.log, + p: p, + } +} + +func (p *Plugin) collectJobsEvents(event interface{}) { + if jev, ok := event.(events.JobEvent); ok { + switch jev.Event { + case events.EventPipePaused: + p.log.Info("pipeline paused", "pipeline", jev.Pipeline, "driver", jev.Driver, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventJobStart: + p.log.Info("job started", "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventJobOK: + p.log.Info("job OK", "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventPushOK: + p.log.Info("job pushed to the queue", "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventPushError: + p.log.Error("job push error", "error", jev.Error, "pipeline", jev.Pipeline, "ID", jev.ID, "Driver", jev.Driver, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventJobError: + p.log.Error("job error", "error", jev.Error, "pipeline", jev.Pipeline, "ID", jev.ID, "Driver", jev.Driver, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventPipeActive: + p.log.Info("pipeline active", "pipeline", jev.Pipeline, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventPipeStopped: + p.log.Warn("pipeline stopped", "pipeline", jev.Pipeline, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventPipeError: + p.log.Error("pipeline error", "pipeline", jev.Pipeline, "error", jev.Error, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventDriverReady: + p.log.Info("driver ready", "pipeline", jev.Pipeline, "start", jev.Start.UTC(), "elapsed", jev.Elapsed) + case events.EventInitialized: + p.log.Info("driver initialized", "driver", jev.Driver, "start", jev.Start.UTC()) + } + } +} + +func (p *Plugin) getPayload(body, context []byte) *payload.Payload { + pld := p.pldPool.Get().(*payload.Payload) + pld.Body = body + pld.Context = context + return pld +} + +func (p *Plugin) putPayload(pld *payload.Payload) { + pld.Body = nil + pld.Context = nil + p.pldPool.Put(pld) +} diff --git a/plugins/jobs/protocol.go b/plugins/jobs/protocol.go new file mode 100644 index 00000000..9d769fdf --- /dev/null +++ b/plugins/jobs/protocol.go @@ -0,0 +1,78 @@ +package jobs + +import ( + json "github.com/json-iterator/go" + "github.com/spiral/errors" + pq "github.com/spiral/roadrunner/v2/pkg/priority_queue" + "github.com/spiral/roadrunner/v2/plugins/logger" +) + +type Type uint32 + +const ( + NoError Type = iota + Error +) + +// internal worker protocol (jobs mode) +type protocol struct { + // message type, see Type + T Type `json:"type"` + // Payload + Data json.RawMessage `json:"data"` +} + +type errorResp struct { + Msg string `json:"message"` + Requeue bool `json:"requeue"` + Delay int64 `json:"delay_seconds"` + Headers map[string][]string `json:"headers"` +} + +func handleResponse(resp []byte, jb pq.Item, log logger.Logger) error { + const op = errors.Op("jobs_handle_response") + // TODO(rustatian) to sync.Pool + p := &protocol{} + + err := json.Unmarshal(resp, p) + if err != nil { + return errors.E(op, err) + } + + switch p.T { + // likely case + case NoError: + err = jb.Ack() + if err != nil { + return errors.E(op, err) + } + case Error: + // TODO(rustatian) to sync.Pool + er := &errorResp{} + + err = json.Unmarshal(p.Data, er) + if err != nil { + return errors.E(op, err) + } + + log.Error("jobs protocol error", "error", er.Msg, "delay", er.Delay, "requeue", er.Requeue) + + if er.Requeue { + err = jb.Requeue(er.Headers, er.Delay) + if err != nil { + return errors.E(op, err) + } + return nil + } + + return errors.E(op, errors.Errorf("jobs response error: %v", er.Msg)) + + default: + err = jb.Ack() + if err != nil { + return errors.E(op, err) + } + } + + return nil +} diff --git a/plugins/jobs/response_protocol.md b/plugins/jobs/response_protocol.md new file mode 100644 index 00000000..c89877e3 --- /dev/null +++ b/plugins/jobs/response_protocol.md @@ -0,0 +1,54 @@ +Response protocol used to communicate between worker and RR. When a worker completes its job, it should send a typed +response. The response should contain: + +1. `type` field with the message type. Can be treated as enums. +2. `data` field with the dynamic response related to the type. + +Types are: + +``` +0 - NO_ERROR +1 - ERROR +2 - ... +``` + +- `NO_ERROR`: contains only `type` and empty `data`. +- `ERROR` : contains `type`: 1, and `data` field with: `message` describing the error, `requeue` flag to requeue the + job, + `dalay_seconds`: to delay a queue for a provided amount of seconds, `headers` - job's headers represented as hashmap + with string key and array of strings as a value. + +For example: + +`NO_ERROR`: +For example: + +```json +{ + "type": 0, + "data": {} +} + +``` + +`ERROR`: + +```json +{ + "type": 1, + "data": { + "message": "internal worker error", + "requeue": true, + "headers": [ + { + "test": [ + "1", + "2", + "3" + ] + } + ], + "delay_seconds": 10 + } +} +``` diff --git a/plugins/jobs/rpc.go b/plugins/jobs/rpc.go new file mode 100644 index 00000000..7f9859fb --- /dev/null +++ b/plugins/jobs/rpc.go @@ -0,0 +1,136 @@ +package jobs + +import ( + "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/plugins/jobs/job" + "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" + "github.com/spiral/roadrunner/v2/plugins/logger" + jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta" +) + +type rpc struct { + log logger.Logger + p *Plugin +} + +func (r *rpc) Push(j *jobsv1beta.PushRequest, _ *jobsv1beta.Empty) error { + const op = errors.Op("jobs_rpc_push") + + // convert transport entity into domain + // how we can do this quickly + + if j.GetJob().GetId() == "" { + return errors.E(op, errors.Str("empty ID field not allowed")) + } + + err := r.p.Push(r.from(j.GetJob())) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (r *rpc) PushBatch(j *jobsv1beta.PushBatchRequest, _ *jobsv1beta.Empty) error { + const op = errors.Op("jobs_rpc_push") + + l := len(j.GetJobs()) + + batch := make([]*job.Job, l) + + for i := 0; i < l; i++ { + // convert transport entity into domain + // how we can do this quickly + batch[i] = r.from(j.GetJobs()[i]) + } + + err := r.p.PushBatch(batch) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (r *rpc) Pause(req *jobsv1beta.Pipelines, _ *jobsv1beta.Empty) error { + for i := 0; i < len(req.GetPipelines()); i++ { + r.p.Pause(req.GetPipelines()[i]) + } + + return nil +} + +func (r *rpc) Resume(req *jobsv1beta.Pipelines, _ *jobsv1beta.Empty) error { + for i := 0; i < len(req.GetPipelines()); i++ { + r.p.Resume(req.GetPipelines()[i]) + } + + return nil +} + +func (r *rpc) List(_ *jobsv1beta.Empty, resp *jobsv1beta.Pipelines) error { + resp.Pipelines = r.p.List() + return nil +} + +// Declare pipeline used to dynamically declare any type of the pipeline +// Mandatory fields: +// 1. Driver +// 2. Pipeline name +// 3. Options related to the particular pipeline +func (r *rpc) Declare(req *jobsv1beta.DeclareRequest, _ *jobsv1beta.Empty) error { + const op = errors.Op("rcp_declare_pipeline") + pipe := &pipeline.Pipeline{} + + for i := range req.GetPipeline() { + (*pipe)[i] = req.GetPipeline()[i] + } + + err := r.p.Declare(pipe) + if err != nil { + return errors.E(op, err) + } + + return nil +} + +func (r *rpc) Destroy(req *jobsv1beta.Pipelines, resp *jobsv1beta.Pipelines) error { + const op = errors.Op("rcp_declare_pipeline") + + var destroyed []string //nolint:prealloc + for i := 0; i < len(req.GetPipelines()); i++ { + err := r.p.Destroy(req.GetPipelines()[i]) + if err != nil { + return errors.E(op, err) + } + destroyed = append(destroyed, req.GetPipelines()[i]) + } + + // return destroyed pipelines + resp.Pipelines = destroyed + + return nil +} + +// from converts from transport entity to domain +func (r *rpc) from(j *jobsv1beta.Job) *job.Job { + headers := map[string][]string{} + + for k, v := range j.GetHeaders() { + headers[k] = v.GetValue() + } + + jb := &job.Job{ + Job: j.GetJob(), + Headers: headers, + Ident: j.GetId(), + Payload: j.GetPayload(), + Options: &job.Options{ + Priority: j.GetOptions().GetPriority(), + Pipeline: j.GetOptions().GetPipeline(), + Delay: j.GetOptions().GetDelay(), + }, + } + + return jb +} diff --git a/plugins/kv/drivers/boltdb/driver.go b/plugins/kv/drivers/boltdb/driver.go index 47d37cc2..15a5674f 100644 --- a/plugins/kv/drivers/boltdb/driver.go +++ b/plugins/kv/drivers/boltdb/driver.go @@ -10,7 +10,6 @@ import ( "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" "github.com/spiral/roadrunner/v2/utils" @@ -34,7 +33,7 @@ type Driver struct { stop chan struct{} } -func NewBoltDBDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stop chan struct{}) (kv.Storage, error) { +func NewBoltDBDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stop chan struct{}) (*Driver, error) { const op = errors.Op("new_boltdb_driver") d := &Driver{ diff --git a/plugins/kv/drivers/boltdb/plugin.go b/plugins/kv/drivers/boltdb/plugin.go index 6ae1a1f6..c839130f 100644 --- a/plugins/kv/drivers/boltdb/plugin.go +++ b/plugins/kv/drivers/boltdb/plugin.go @@ -2,12 +2,15 @@ package boltdb import ( "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/kv" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" ) -const PluginName = "boltdb" +const ( + PluginName string = "boltdb" + RootPluginName string = "kv" +) // Plugin BoltDB K/V storage. type Plugin struct { @@ -21,7 +24,7 @@ type Plugin struct { } func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - if !cfg.Has(kv.PluginName) { + if !cfg.Has(RootPluginName) { return errors.E(errors.Disabled) } diff --git a/plugins/kv/drivers/memcached/config.go b/plugins/kv/drivers/memcached/config.go index 7aad53b6..6d413790 100644 --- a/plugins/kv/drivers/memcached/config.go +++ b/plugins/kv/drivers/memcached/config.go @@ -7,6 +7,6 @@ type Config struct { func (s *Config) InitDefaults() { if s.Addr == nil { - s.Addr = []string{"localhost:11211"} // default url for memcached + s.Addr = []string{"127.0.0.1:11211"} // default url for memcached } } diff --git a/plugins/kv/drivers/memcached/driver.go b/plugins/kv/drivers/memcached/driver.go index 14e7c078..e24747fe 100644 --- a/plugins/kv/drivers/memcached/driver.go +++ b/plugins/kv/drivers/memcached/driver.go @@ -7,7 +7,6 @@ import ( "github.com/bradfitz/gomemcache/memcache" "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" ) @@ -21,7 +20,7 @@ type Driver struct { // NewMemcachedDriver returns a memcache client using the provided server(s) // with equal weight. If a server is listed multiple times, // it gets a proportional amount of weight. -func NewMemcachedDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (kv.Storage, error) { +func NewMemcachedDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) { const op = errors.Op("new_memcached_driver") s := &Driver{ diff --git a/plugins/kv/drivers/memcached/plugin.go b/plugins/kv/drivers/memcached/plugin.go index 22ea5cca..59a2b7cb 100644 --- a/plugins/kv/drivers/memcached/plugin.go +++ b/plugins/kv/drivers/memcached/plugin.go @@ -2,12 +2,15 @@ package memcached import ( "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/kv" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" ) -const PluginName = "memcached" +const ( + PluginName string = "memcached" + RootPluginName string = "kv" +) type Plugin struct { // config plugin @@ -17,7 +20,7 @@ type Plugin struct { } func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - if !cfg.Has(kv.PluginName) { + if !cfg.Has(RootPluginName) { return errors.E(errors.Disabled) } diff --git a/plugins/kv/plugin.go b/plugins/kv/plugin.go index 03dbaed6..53fade97 100644 --- a/plugins/kv/plugin.go +++ b/plugins/kv/plugin.go @@ -5,10 +5,12 @@ import ( endure "github.com/spiral/endure/pkg/container" "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/kv" "github.com/spiral/roadrunner/v2/plugins/config" "github.com/spiral/roadrunner/v2/plugins/logger" ) +// PluginName linked to the memory, boltdb, memcached, redis plugins. DO NOT change w/o sync. const PluginName string = "kv" const ( @@ -25,9 +27,9 @@ const ( type Plugin struct { log logger.Logger // constructors contains general storage constructors, such as boltdb, memory, memcached, redis. - constructors map[string]Constructor + constructors map[string]kv.Constructor // storages contains user-defined storages, such as boltdb-north, memcached-us and so on. - storages map[string]Storage + storages map[string]kv.Storage // KV configuration cfg Config cfgPlugin config.Configurer @@ -43,8 +45,8 @@ func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error { if err != nil { return errors.E(op, err) } - p.constructors = make(map[string]Constructor, 5) - p.storages = make(map[string]Storage, 5) + p.constructors = make(map[string]kv.Constructor, 5) + p.storages = make(map[string]kv.Storage, 5) p.log = log p.cfgPlugin = cfg return nil @@ -78,7 +80,7 @@ func (p *Plugin) Serve() chan error { //nolint:gocognit memcached: driver: memcached - addr: [ "localhost:11211" ] + addr: [ "127.0.0.1:11211" ] For this config we should have 3 constructors: memory, boltdb and memcached but 4 KVs: default, boltdb-south, boltdb-north and memcached @@ -203,7 +205,7 @@ func (p *Plugin) Collects() []interface{} { } } -func (p *Plugin) GetAllStorageDrivers(name endure.Named, constructor Constructor) { +func (p *Plugin) GetAllStorageDrivers(name endure.Named, constructor kv.Constructor) { // save the storage constructor p.constructors[name.Name()] = constructor } diff --git a/plugins/kv/rpc.go b/plugins/kv/rpc.go index 3f7ba97c..ad4aefa9 100644 --- a/plugins/kv/rpc.go +++ b/plugins/kv/rpc.go @@ -2,6 +2,7 @@ package kv import ( "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/kv" "github.com/spiral/roadrunner/v2/plugins/logger" kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" ) @@ -9,7 +10,7 @@ import ( // Wrapper for the plugin type rpc struct { // all available storages - storages map[string]Storage + storages map[string]kv.Storage // svc is a plugin implementing Storage interface srv *Plugin // Logger diff --git a/plugins/memory/kv.go b/plugins/memory/kv.go index c13c2314..68ea7266 100644 --- a/plugins/memory/kv.go +++ b/plugins/memory/kv.go @@ -7,7 +7,6 @@ import ( "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" ) @@ -21,7 +20,7 @@ type Driver struct { cfg *Config } -func NewInMemoryDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stop chan struct{}) (kv.Storage, error) { +func NewInMemoryDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stop chan struct{}) (*Driver, error) { const op = errors.Op("new_in_memory_driver") d := &Driver{ diff --git a/plugins/memory/plugin.go b/plugins/memory/plugin.go index 70badf15..7d418a70 100644 --- a/plugins/memory/plugin.go +++ b/plugins/memory/plugin.go @@ -2,9 +2,9 @@ package memory import ( "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/kv" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -21,7 +21,6 @@ type Plugin struct { func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { p.log = log - p.log = log p.cfgPlugin = cfg p.stop = make(chan struct{}, 1) return nil diff --git a/plugins/memory/pubsub.go b/plugins/memory/pubsub.go index d027a8a5..c79f3eb0 100644 --- a/plugins/memory/pubsub.go +++ b/plugins/memory/pubsub.go @@ -3,8 +3,8 @@ package memory import ( "sync" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/pkg/bst" - "github.com/spiral/roadrunner/v2/pkg/pubsub" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -17,7 +17,7 @@ type PubSubDriver struct { log logger.Logger } -func NewPubSubDriver(log logger.Logger, _ string) (pubsub.PubSub, error) { +func NewPubSubDriver(log logger.Logger, _ string) (*PubSubDriver, error) { ps := &PubSubDriver{ pushCh: make(chan *pubsub.Message, 10), storage: bst.NewBST(), diff --git a/plugins/metrics/config.go b/plugins/metrics/config.go index dd36005e..a2835130 100644 --- a/plugins/metrics/config.go +++ b/plugins/metrics/config.go @@ -135,6 +135,6 @@ func (c *Config) getCollectors() (map[string]prometheus.Collector, error) { func (c *Config) InitDefaults() { if c.Address == "" { - c.Address = "localhost:2112" + c.Address = "127.0.0.1:2112" } } diff --git a/plugins/redis/channel.go b/plugins/redis/channel.go index 5817853c..0cd62d19 100644 --- a/plugins/redis/channel.go +++ b/plugins/redis/channel.go @@ -6,7 +6,7 @@ import ( "github.com/go-redis/redis/v8" "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/logger" "github.com/spiral/roadrunner/v2/utils" ) diff --git a/plugins/redis/config.go b/plugins/redis/config.go index 41348236..9acb4b47 100644 --- a/plugins/redis/config.go +++ b/plugins/redis/config.go @@ -29,6 +29,6 @@ type Config struct { // InitDefaults initializing fill config with default values func (s *Config) InitDefaults() { if s.Addrs == nil { - s.Addrs = []string{"localhost:6379"} // default addr is pointing to local storage + s.Addrs = []string{"127.0.0.1:6379"} // default addr is pointing to local storage } } diff --git a/plugins/redis/kv.go b/plugins/redis/kv.go index 2e4b9bfd..29f89d46 100644 --- a/plugins/redis/kv.go +++ b/plugins/redis/kv.go @@ -8,7 +8,6 @@ import ( "github.com/go-redis/redis/v8" "github.com/spiral/errors" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" "github.com/spiral/roadrunner/v2/utils" @@ -20,7 +19,7 @@ type Driver struct { cfg *Config } -func NewRedisDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (kv.Storage, error) { +func NewRedisDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) { const op = errors.Op("new_boltdb_driver") d := &Driver{ diff --git a/plugins/redis/plugin.go b/plugins/redis/plugin.go index 9d98790b..3c62a63f 100644 --- a/plugins/redis/plugin.go +++ b/plugins/redis/plugin.go @@ -5,9 +5,9 @@ import ( "github.com/go-redis/redis/v8" "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/kv" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/kv" "github.com/spiral/roadrunner/v2/plugins/logger" ) diff --git a/plugins/redis/pubsub.go b/plugins/redis/pubsub.go index 4e41acb5..01efc623 100644 --- a/plugins/redis/pubsub.go +++ b/plugins/redis/pubsub.go @@ -6,7 +6,7 @@ import ( "github.com/go-redis/redis/v8" "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/config" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -21,7 +21,7 @@ type PubSubDriver struct { stopCh chan struct{} } -func NewPubSubDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stopCh chan struct{}) (pubsub.PubSub, error) { +func NewPubSubDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stopCh chan struct{}) (*PubSubDriver, error) { const op = errors.Op("new_pub_sub_driver") ps := &PubSubDriver{ log: log, diff --git a/plugins/reload/plugin.go b/plugins/reload/plugin.go index 7e6bdfec..d3271d6c 100644 --- a/plugins/reload/plugin.go +++ b/plugins/reload/plugin.go @@ -117,7 +117,7 @@ func (s *Plugin) Serve() chan error { timer.Stop() // replace previous value in map by more recent without adding new one updated[cfg.service] = cfg.serviceConfig - // if we getting a lot of events, we shouldn't restart particular service on each of it (user doing batch move or very fast typing) + // if we are getting a lot of events, we shouldn't restart particular service on each of it (user doing batch move or very fast typing) // instead, we are resetting the timer and wait for s.cfg.Interval time // If there is no more events, we restart service only once timer.Reset(s.cfg.Interval) diff --git a/plugins/server/interface.go b/plugins/server/interface.go index 0424d52d..b0f84a7f 100644 --- a/plugins/server/interface.go +++ b/plugins/server/interface.go @@ -19,5 +19,5 @@ type Server interface { // NewWorker return a new worker with provided and attached by the user listeners and environment variables NewWorker(ctx context.Context, env Env, listeners ...events.Listener) (*worker.Process, error) // NewWorkerPool return new pool of workers (PHP) with attached events listeners, env variables and based on the provided configuration - NewWorkerPool(ctx context.Context, opt pool.Config, env Env, listeners ...events.Listener) (pool.Pool, error) + NewWorkerPool(ctx context.Context, opt *pool.Config, env Env, listeners ...events.Listener) (pool.Pool, error) } diff --git a/plugins/server/plugin.go b/plugins/server/plugin.go index 00639f43..1694cdf1 100644 --- a/plugins/server/plugin.go +++ b/plugins/server/plugin.go @@ -21,14 +21,14 @@ import ( "github.com/spiral/roadrunner/v2/utils" ) -// PluginName for the server -const PluginName = "server" - -// RrRelay env variable key (internal) -const RrRelay = "RR_RELAY" - -// RrRPC env variable key (internal) if the RPC presents -const RrRPC = "RR_RPC" +const ( + // PluginName for the server + PluginName = "server" + // RrRelay env variable key (internal) + RrRelay = "RR_RELAY" + // RrRPC env variable key (internal) if the RPC presents + RrRPC = "RR_RPC" +) // Plugin manages worker type Plugin struct { @@ -124,7 +124,7 @@ func (server *Plugin) NewWorker(ctx context.Context, env Env, listeners ...event const op = errors.Op("server_plugin_new_worker") list := make([]events.Listener, 0, len(listeners)) - list = append(list, server.collectWorkerLogs) + list = append(list, server.collectWorkerEvents) spawnCmd, err := server.CmdFactory(env) if err != nil { @@ -140,15 +140,16 @@ func (server *Plugin) NewWorker(ctx context.Context, env Env, listeners ...event } // NewWorkerPool issues new worker pool. -func (server *Plugin) NewWorkerPool(ctx context.Context, opt pool.Config, env Env, listeners ...events.Listener) (pool.Pool, error) { +func (server *Plugin) NewWorkerPool(ctx context.Context, opt *pool.Config, env Env, listeners ...events.Listener) (pool.Pool, error) { const op = errors.Op("server_plugin_new_worker_pool") + spawnCmd, err := server.CmdFactory(env) if err != nil { return nil, errors.E(op, err) } - list := make([]events.Listener, 0, 1) - list = append(list, server.collectEvents) + list := make([]events.Listener, 0, 2) + list = append(list, server.collectPoolEvents, server.collectWorkerEvents) if len(listeners) != 0 { list = append(list, listeners...) } @@ -209,7 +210,7 @@ func (server *Plugin) setEnv(e Env) []string { return env } -func (server *Plugin) collectEvents(event interface{}) { +func (server *Plugin) collectPoolEvents(event interface{}) { if we, ok := event.(events.PoolEvent); ok { switch we.Event { case events.EventMaxMemory: @@ -238,7 +239,9 @@ func (server *Plugin) collectEvents(event interface{}) { server.log.Warn("requested pool restart") } } +} +func (server *Plugin) collectWorkerEvents(event interface{}) { if we, ok := event.(events.WorkerEvent); ok { switch we.Event { case events.EventWorkerError: @@ -263,17 +266,3 @@ func (server *Plugin) collectEvents(event interface{}) { } } } - -func (server *Plugin) collectWorkerLogs(event interface{}) { - if we, ok := event.(events.WorkerEvent); ok { - switch we.Event { - case events.EventWorkerError: - server.log.Error(strings.TrimRight(we.Payload.(error).Error(), " \n\t")) - case events.EventWorkerLog: - server.log.Debug(strings.TrimRight(utils.AsString(we.Payload.([]byte)), " \n\t")) - // stderr event is INFO level - case events.EventWorkerStderr: - server.log.Info(strings.TrimRight(utils.AsString(we.Payload.([]byte)), " \n\t")) - } - } -} diff --git a/plugins/websockets/executor/executor.go b/plugins/websockets/executor/executor.go index 664b4dfd..c1f79a78 100644 --- a/plugins/websockets/executor/executor.go +++ b/plugins/websockets/executor/executor.go @@ -7,7 +7,7 @@ import ( json "github.com/json-iterator/go" "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/logger" "github.com/spiral/roadrunner/v2/plugins/websockets/commands" "github.com/spiral/roadrunner/v2/plugins/websockets/connection" diff --git a/plugins/websockets/plugin.go b/plugins/websockets/plugin.go index 1115bd10..2df23f11 100644 --- a/plugins/websockets/plugin.go +++ b/plugins/websockets/plugin.go @@ -10,10 +10,10 @@ import ( "github.com/google/uuid" json "github.com/json-iterator/go" "github.com/spiral/errors" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/pkg/payload" phpPool "github.com/spiral/roadrunner/v2/pkg/pool" "github.com/spiral/roadrunner/v2/pkg/process" - "github.com/spiral/roadrunner/v2/pkg/pubsub" "github.com/spiral/roadrunner/v2/pkg/worker" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/config" @@ -109,7 +109,7 @@ func (p *Plugin) Serve() chan error { p.Lock() defer p.Unlock() - p.phpPool, err = p.server.NewWorkerPool(context.Background(), phpPool.Config{ + p.phpPool, err = p.server.NewWorkerPool(context.Background(), &phpPool.Config{ Debug: p.cfg.Pool.Debug, NumWorkers: p.cfg.Pool.NumWorkers, MaxJobs: p.cfg.Pool.MaxJobs, @@ -243,13 +243,13 @@ func (p *Plugin) Middleware(next http.Handler) http.Handler { } // Workers returns slice with the process states for the workers -func (p *Plugin) Workers() []process.State { +func (p *Plugin) Workers() []*process.State { p.RLock() defer p.RUnlock() workers := p.workers() - ps := make([]process.State, 0, len(workers)) + ps := make([]*process.State, 0, len(workers)) for i := 0; i < len(workers); i++ { state, err := process.WorkerProcessState(workers[i]) if err != nil { @@ -276,7 +276,7 @@ func (p *Plugin) Reset() error { p.phpPool = nil var err error - p.phpPool, err = p.server.NewWorkerPool(context.Background(), phpPool.Config{ + p.phpPool, err = p.server.NewWorkerPool(context.Background(), &phpPool.Config{ Debug: p.cfg.Pool.Debug, NumWorkers: p.cfg.Pool.NumWorkers, MaxJobs: p.cfg.Pool.MaxJobs, @@ -337,7 +337,7 @@ func (p *Plugin) defaultAccessValidator(pool phpPool.Pool) validator.AccessValid func exec(ctx []byte, pool phpPool.Pool) (*validator.AccessValidator, error) { const op = errors.Op("exec") - pd := payload.Payload{ + pd := &payload.Payload{ Context: ctx, } diff --git a/plugins/websockets/pool/workers_pool.go b/plugins/websockets/pool/workers_pool.go index 752ba3ce..758620f6 100644 --- a/plugins/websockets/pool/workers_pool.go +++ b/plugins/websockets/pool/workers_pool.go @@ -4,7 +4,7 @@ import ( "sync" json "github.com/json-iterator/go" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/logger" "github.com/spiral/roadrunner/v2/plugins/websockets/connection" "github.com/spiral/roadrunner/v2/utils" diff --git a/plugins/websockets/validator/access_validator.go b/plugins/websockets/validator/access_validator.go index e666f846..2685da7f 100644 --- a/plugins/websockets/validator/access_validator.go +++ b/plugins/websockets/validator/access_validator.go @@ -12,6 +12,11 @@ import ( type AccessValidatorFn = func(r *http.Request, channels ...string) (*AccessValidator, error) +const ( + joinServer string = "ws:joinServer" + joinTopics string = "ws:joinTopics" +) + type AccessValidator struct { Header http.Header `json:"headers"` Status int `json:"status"` @@ -26,7 +31,7 @@ func ServerAccessValidator(r *http.Request) ([]byte, error) { return nil, errors.E(op, err) } - defer delete(attributes.All(r), "ws:joinServer") + defer delete(attributes.All(r), joinServer) req := &handler.Request{ RemoteAddr: handler.FetchIP(r.RemoteAddr), @@ -54,7 +59,7 @@ func TopicsAccessValidator(r *http.Request, topics ...string) ([]byte, error) { return nil, errors.E(op, err) } - defer delete(attributes.All(r), "ws:joinTopics") + defer delete(attributes.All(r), joinTopics) req := &handler.Request{ RemoteAddr: handler.FetchIP(r.RemoteAddr), diff --git a/proto/jobs/v1beta/jobs.pb.go b/proto/jobs/v1beta/jobs.pb.go new file mode 100644 index 00000000..6a6f59af --- /dev/null +++ b/proto/jobs/v1beta/jobs.pb.go @@ -0,0 +1,655 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: jobs.proto + +package jobsv1beta + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// single job request +type PushRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Job *Job `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` +} + +func (x *PushRequest) Reset() { + *x = PushRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushRequest) ProtoMessage() {} + +func (x *PushRequest) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushRequest.ProtoReflect.Descriptor instead. +func (*PushRequest) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{0} +} + +func (x *PushRequest) GetJob() *Job { + if x != nil { + return x.Job + } + return nil +} + +// batch jobs request +type PushBatchRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"` +} + +func (x *PushBatchRequest) Reset() { + *x = PushBatchRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushBatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushBatchRequest) ProtoMessage() {} + +func (x *PushBatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushBatchRequest.ProtoReflect.Descriptor instead. +func (*PushBatchRequest) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{1} +} + +func (x *PushBatchRequest) GetJobs() []*Job { + if x != nil { + return x.Jobs + } + return nil +} + +// request to pause/resume/list/Destroy +type Pipelines struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pipelines []string `protobuf:"bytes,1,rep,name=pipelines,proto3" json:"pipelines,omitempty"` +} + +func (x *Pipelines) Reset() { + *x = Pipelines{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Pipelines) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Pipelines) ProtoMessage() {} + +func (x *Pipelines) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Pipelines.ProtoReflect.Descriptor instead. +func (*Pipelines) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{2} +} + +func (x *Pipelines) GetPipelines() []string { + if x != nil { + return x.Pipelines + } + return nil +} + +// some endpoints receives nothing +// all endpoints returns nothing, except error +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{3} +} + +type DeclareRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pipeline map[string]string `protobuf:"bytes,1,rep,name=pipeline,proto3" json:"pipeline,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *DeclareRequest) Reset() { + *x = DeclareRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeclareRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeclareRequest) ProtoMessage() {} + +func (x *DeclareRequest) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeclareRequest.ProtoReflect.Descriptor instead. +func (*DeclareRequest) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{4} +} + +func (x *DeclareRequest) GetPipeline() map[string]string { + if x != nil { + return x.Pipeline + } + return nil +} + +type Job struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Job string `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Payload string `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + Headers map[string]*HeaderValue `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Options *Options `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *Job) Reset() { + *x = Job{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Job) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Job) ProtoMessage() {} + +func (x *Job) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Job.ProtoReflect.Descriptor instead. +func (*Job) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{5} +} + +func (x *Job) GetJob() string { + if x != nil { + return x.Job + } + return "" +} + +func (x *Job) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Job) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +func (x *Job) GetHeaders() map[string]*HeaderValue { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Job) GetOptions() *Options { + if x != nil { + return x.Options + } + return nil +} + +type Options struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Priority int64 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"` + Pipeline string `protobuf:"bytes,2,opt,name=pipeline,proto3" json:"pipeline,omitempty"` + Delay int64 `protobuf:"varint,3,opt,name=delay,proto3" json:"delay,omitempty"` +} + +func (x *Options) Reset() { + *x = Options{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Options) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Options) ProtoMessage() {} + +func (x *Options) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Options.ProtoReflect.Descriptor instead. +func (*Options) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{6} +} + +func (x *Options) GetPriority() int64 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *Options) GetPipeline() string { + if x != nil { + return x.Pipeline + } + return "" +} + +func (x *Options) GetDelay() int64 { + if x != nil { + return x.Delay + } + return 0 +} + +type HeaderValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *HeaderValue) Reset() { + *x = HeaderValue{} + if protoimpl.UnsafeEnabled { + mi := &file_jobs_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderValue) ProtoMessage() {} + +func (x *HeaderValue) ProtoReflect() protoreflect.Message { + mi := &file_jobs_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead. +func (*HeaderValue) Descriptor() ([]byte, []int) { + return file_jobs_proto_rawDescGZIP(), []int{7} +} + +func (x *HeaderValue) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +var File_jobs_proto protoreflect.FileDescriptor + +var file_jobs_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x6a, 0x6f, + 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x22, 0x31, 0x0a, 0x0b, 0x50, 0x75, 0x73, + 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x38, 0x0a, 0x10, + 0x50, 0x75, 0x73, 0x68, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x24, 0x0a, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x62, + 0x52, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x22, 0x29, 0x0a, 0x09, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, + 0x6e, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, + 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x94, 0x01, 0x0a, 0x0e, 0x44, + 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, + 0x08, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x65, + 0x63, 0x6c, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x69, 0x70, + 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x69, 0x70, 0x65, + 0x6c, 0x69, 0x6e, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x80, 0x02, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x6f, 0x62, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x37, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, + 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x54, + 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x23, 0x0a, + 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x0f, 0x5a, 0x0d, 0x2e, 0x2f, 0x3b, 0x6a, 0x6f, 0x62, 0x73, 0x76, 0x31, 0x62, + 0x65, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_jobs_proto_rawDescOnce sync.Once + file_jobs_proto_rawDescData = file_jobs_proto_rawDesc +) + +func file_jobs_proto_rawDescGZIP() []byte { + file_jobs_proto_rawDescOnce.Do(func() { + file_jobs_proto_rawDescData = protoimpl.X.CompressGZIP(file_jobs_proto_rawDescData) + }) + return file_jobs_proto_rawDescData +} + +var file_jobs_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_jobs_proto_goTypes = []interface{}{ + (*PushRequest)(nil), // 0: jobs.v1beta.PushRequest + (*PushBatchRequest)(nil), // 1: jobs.v1beta.PushBatchRequest + (*Pipelines)(nil), // 2: jobs.v1beta.Pipelines + (*Empty)(nil), // 3: jobs.v1beta.Empty + (*DeclareRequest)(nil), // 4: jobs.v1beta.DeclareRequest + (*Job)(nil), // 5: jobs.v1beta.Job + (*Options)(nil), // 6: jobs.v1beta.Options + (*HeaderValue)(nil), // 7: jobs.v1beta.HeaderValue + nil, // 8: jobs.v1beta.DeclareRequest.PipelineEntry + nil, // 9: jobs.v1beta.Job.HeadersEntry +} +var file_jobs_proto_depIdxs = []int32{ + 5, // 0: jobs.v1beta.PushRequest.job:type_name -> jobs.v1beta.Job + 5, // 1: jobs.v1beta.PushBatchRequest.jobs:type_name -> jobs.v1beta.Job + 8, // 2: jobs.v1beta.DeclareRequest.pipeline:type_name -> jobs.v1beta.DeclareRequest.PipelineEntry + 9, // 3: jobs.v1beta.Job.headers:type_name -> jobs.v1beta.Job.HeadersEntry + 6, // 4: jobs.v1beta.Job.options:type_name -> jobs.v1beta.Options + 7, // 5: jobs.v1beta.Job.HeadersEntry.value:type_name -> jobs.v1beta.HeaderValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_jobs_proto_init() } +func file_jobs_proto_init() { + if File_jobs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_jobs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushBatchRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Pipelines); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeclareRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Job); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Options); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_jobs_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_jobs_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_jobs_proto_goTypes, + DependencyIndexes: file_jobs_proto_depIdxs, + MessageInfos: file_jobs_proto_msgTypes, + }.Build() + File_jobs_proto = out.File + file_jobs_proto_rawDesc = nil + file_jobs_proto_goTypes = nil + file_jobs_proto_depIdxs = nil +} diff --git a/proto/jobs/v1beta/jobs.proto b/proto/jobs/v1beta/jobs.proto new file mode 100644 index 00000000..68d2ed97 --- /dev/null +++ b/proto/jobs/v1beta/jobs.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package jobs.v1beta; +option go_package = "./;jobsv1beta"; + +// single job request +message PushRequest { + Job job = 1; +} + +// batch jobs request +message PushBatchRequest { + repeated Job jobs = 1; +} + +// request to pause/resume/list/Destroy +message Pipelines { + repeated string pipelines = 1; +} + +// some endpoints receives nothing +// all endpoints returns nothing, except error +message Empty {} + +message DeclareRequest { + map<string, string> pipeline = 1; +} + +message Job { + string job = 1; + string id = 2; + string payload = 3; + map<string, HeaderValue> headers = 4; + Options options = 5; +} + +message Options { + int64 priority = 1; + string pipeline = 2; + int64 delay = 3; +} + +message HeaderValue { + repeated string value = 1; +} diff --git a/proto/kv/v1beta/kv.pb.go b/proto/kv/v1beta/kv.pb.go index 622967b8..1e38fe12 100644 --- a/proto/kv/v1beta/kv.pb.go +++ b/proto/kv/v1beta/kv.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.16.0 +// protoc-gen-go v1.27.1 +// protoc v3.17.3 // source: kv.proto package kvv1beta import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( diff --git a/proto/websockets/v1beta/websockets.pb.go b/proto/websockets/v1beta/websockets.pb.go index ad4ebbe7..b07c271e 100644 --- a/proto/websockets/v1beta/websockets.pb.go +++ b/proto/websockets/v1beta/websockets.pb.go @@ -1,16 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.16.0 +// protoc-gen-go v1.27.1 +// protoc v3.17.3 // source: websockets.proto package websocketsv1beta import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( diff --git a/tests/Dockerfile b/tests/Dockerfile deleted file mode 100644 index e69de29b..00000000 --- a/tests/Dockerfile +++ /dev/null diff --git a/tests/client.php b/tests/client.php index c00cece1..d67f6937 100644 --- a/tests/client.php +++ b/tests/client.php @@ -17,7 +17,7 @@ switch ($goridge) { break; case "tcp": - $relay = new Goridge\SocketRelay("localhost", 9007); + $relay = new Goridge\SocketRelay("127.0.0.1", 9007); break; case "unix": diff --git a/tests/composer.json b/tests/composer.json index 50178d1f..fa5925b7 100644 --- a/tests/composer.json +++ b/tests/composer.json @@ -2,7 +2,7 @@ "minimum-stability": "beta", "prefer-stable": true, "require": { - "nyholm/psr7": "^1.3", + "nyholm/psr7": "^1.4", "spiral/roadrunner": "^2.0", "spiral/roadrunner-http": "^2.0", "temporal/sdk": ">=1.0", diff --git a/tests/docker-compose.yaml b/tests/docker-compose.yaml deleted file mode 100644 index b6ba0f66..00000000 --- a/tests/docker-compose.yaml +++ /dev/null @@ -1,15 +0,0 @@ -version: '3' - -services: - memcached: - image: memcached:latest - ports: - - "0.0.0.0:11211:11211" - redis: - image: redis:6 - ports: - - "6379:6379" - redis2: - image: redis:6 - ports: - - "6378:6379" diff --git a/tests/env/Dockerfile-beanstalkd.yaml b/tests/env/Dockerfile-beanstalkd.yaml new file mode 100644 index 00000000..852385a1 --- /dev/null +++ b/tests/env/Dockerfile-beanstalkd.yaml @@ -0,0 +1,16 @@ +FROM archlinux:latest + +ARG DEBIAN_FRONTEND=noninteractive +RUN pacman-key --init + +RUN Y | pacman -Syu --noconfirm +RUN Y | pacman -S --noconfirm curl base-devel pkgconf + +RUN curl -sL https://github.com/kr/beanstalkd/archive/v1.12.tar.gz | tar xvz -C /tmp + +WORKDIR /tmp/beanstalkd-1.12 +RUN make -j12 +RUN cp beanstalkd /usr/bin + +EXPOSE 11300 +ENTRYPOINT ["/usr/bin/beanstalkd"] diff --git a/tests/env/Dockerfile-elastic-mq.yaml b/tests/env/Dockerfile-elastic-mq.yaml new file mode 100644 index 00000000..e1513450 --- /dev/null +++ b/tests/env/Dockerfile-elastic-mq.yaml @@ -0,0 +1,9 @@ +FROM openjdk:16 + +ADD https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-1.2.0.jar / +COPY custom.conf / +ENTRYPOINT ["java", "-Dconfig.file=custom.conf", "-jar", "/elasticmq-server-1.2.0.jar"] + +EXPOSE 9324 + +CMD ["-help"] diff --git a/tests/env/custom.conf b/tests/env/custom.conf new file mode 100644 index 00000000..9be7730e --- /dev/null +++ b/tests/env/custom.conf @@ -0,0 +1,8 @@ +include classpath("application.conf") + +node-address { + protocol = http + host = "*" + port = 9324 + context-path = "" +} diff --git a/tests/env/docker-compose.yaml b/tests/env/docker-compose.yaml new file mode 100644 index 00000000..4f58d543 --- /dev/null +++ b/tests/env/docker-compose.yaml @@ -0,0 +1,39 @@ +version: '3.8' + +services: + memcached: + image: memcached:latest + ports: + - "127.0.0.1:11211:11211" + redis: + image: redis:6 + ports: + - "127.0.0.1:6379:6379" + redis2: + image: redis:6 + ports: + - "127.0.0.1:6378:6379" + + toxicproxy: + image: shopify/toxiproxy + network_mode: "host" + + beanstalk: + build: + context: . + dockerfile: Dockerfile-beanstalkd.yaml + ports: + - "127.0.0.1:11300:11300" + + sqs: + build: + context: . + dockerfile: Dockerfile-elastic-mq.yaml + ports: + - "127.0.0.1:9324:9324" + + rabbitmq: + image: rabbitmq:3-management + ports: + - "127.0.0.1:15672:15672" + - "127.0.0.1:5672:5672" diff --git a/tests/http/client.php b/tests/http/client.php index ad5cce24..90b5c2b5 100644 --- a/tests/http/client.php +++ b/tests/http/client.php @@ -18,7 +18,7 @@ switch ($goridge) { break; case "tcp": - $relay = new Goridge\SocketRelay("localhost", 9007); + $relay = new Goridge\SocketRelay("127.0.0.1", 9007); break; case "unix": diff --git a/tests/http/slow-client.php b/tests/http/slow-client.php index 731232f7..1eaa7bc8 100644 --- a/tests/http/slow-client.php +++ b/tests/http/slow-client.php @@ -19,7 +19,7 @@ switch ($goridge) { break; case "tcp": - $relay = new Goridge\SocketRelay("localhost", 9007); + $relay = new Goridge\SocketRelay("127.0.0.1", 9007); break; case "unix": diff --git a/tests/jobs_err.php b/tests/jobs_err.php new file mode 100644 index 00000000..4ccea4f8 --- /dev/null +++ b/tests/jobs_err.php @@ -0,0 +1,52 @@ +<?php + +/** + * @var Goridge\RelayInterface $relay + */ + +use Spiral\Goridge; +use Spiral\RoadRunner; +use Spiral\Goridge\StreamRelay; + +require __DIR__ . "/vendor/autoload.php"; + +$rr = new RoadRunner\Worker(new StreamRelay(\STDIN, \STDOUT)); + +while ($in = $rr->waitPayload()) { + try { + $ctx = json_decode($in->header, true); + $headers = $ctx['headers']; + + $set = isset($headers['attempts']); + + $val = 0; + + if ($set == true) { + $val = intval($headers['attempts'][0]); + $val++; + $headers['attempts'][0] = strval($val); + } else { + $headers['attempts'][0] = "1"; + }; + + if ($val > 3) { + $rr->respond(new RoadRunner\Payload(json_encode([ + // no error + 'type' => 0, + 'data' => [] + ]))); + } else { + $rr->respond(new RoadRunner\Payload(json_encode([ + 'type' => 1, + 'data' => [ + 'message' => 'error', + 'requeue' => true, + 'delay_seconds' => 5, + 'headers' => $headers + ] + ]))); + } + } catch (\Throwable $e) { + $rr->error((string)$e); + } +} diff --git a/tests/jobs_ok.php b/tests/jobs_ok.php new file mode 100644 index 00000000..4e786d15 --- /dev/null +++ b/tests/jobs_ok.php @@ -0,0 +1,27 @@ +<?php + +/** + * @var Goridge\RelayInterface $relay + */ + +use Spiral\Goridge; +use Spiral\RoadRunner; +use Spiral\Goridge\StreamRelay; + +require __DIR__ . "/vendor/autoload.php"; + +$rr = new RoadRunner\Worker(new StreamRelay(\STDIN, \STDOUT)); + +while ($in = $rr->waitPayload()) { + try { + $ctx = json_decode($in->header, true); + $headers = $ctx['headers']; + + $rr->respond(new RoadRunner\Payload(json_encode([ + 'type' => 0, + 'data' => [] + ]))); + } catch (\Throwable $e) { + $rr->error((string)$e); + } +} diff --git a/tests/plugins/broadcast/broadcast_plugin_test.go b/tests/plugins/broadcast/broadcast_plugin_test.go index 0ec813f3..d8bedf29 100644 --- a/tests/plugins/broadcast/broadcast_plugin_test.go +++ b/tests/plugins/broadcast/broadcast_plugin_test.go @@ -176,7 +176,7 @@ func TestBroadcastNoConfig(t *testing.T) { } func TestBroadcastSameSubscriber(t *testing.T) { - cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel), endure.GracefulShutdownTimeout(time.Second)) assert.NoError(t, err) cfg := &config.Viper{ @@ -189,11 +189,11 @@ func TestBroadcastSameSubscriber(t *testing.T) { mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() - mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6002", "services", []string{"broadcast"}).MinTimes(1) - mockLogger.EXPECT().Debug("message published", "msg", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6002", "services", []string{"broadcast"}).AnyTimes() + mockLogger.EXPECT().Debug("message published", "msg", gomock.Any()).AnyTimes() mockLogger.EXPECT().Info(`plugin1: {foo hello}`).Times(3) - mockLogger.EXPECT().Info(`plugin1: {foo2 hello}`).Times(3) + mockLogger.EXPECT().Info(`plugin1: {foo2 hello}`).Times(2) mockLogger.EXPECT().Info(`plugin1: {foo3 hello}`).Times(3) mockLogger.EXPECT().Info(`plugin2: {foo hello}`).Times(3) mockLogger.EXPECT().Info(`plugin3: {foo hello}`).Times(3) @@ -279,14 +279,15 @@ func TestBroadcastSameSubscriber(t *testing.T) { t.Run("PublishHelloFoo3", BroadcastPublishFoo3("6002")) t.Run("PublishAsyncHelloFooFoo2Foo3", BroadcastPublishAsyncFooFoo2Foo3("6002")) - time.Sleep(time.Second * 4) stopCh <- struct{}{} wg.Wait() + + time.Sleep(time.Second * 5) } func TestBroadcastSameSubscriberGlobal(t *testing.T) { - cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel), endure.GracefulShutdownTimeout(time.Second)) assert.NoError(t, err) cfg := &config.Viper{ @@ -299,11 +300,11 @@ func TestBroadcastSameSubscriberGlobal(t *testing.T) { mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() - mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6003", "services", []string{"broadcast"}).MinTimes(1) - mockLogger.EXPECT().Debug("message published", "msg", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6003", "services", []string{"broadcast"}).AnyTimes() + mockLogger.EXPECT().Debug("message published", "msg", gomock.Any()).AnyTimes() mockLogger.EXPECT().Info(`plugin1: {foo hello}`).Times(3) - mockLogger.EXPECT().Info(`plugin1: {foo2 hello}`).Times(3) + mockLogger.EXPECT().Info(`plugin1: {foo2 hello}`).Times(2) mockLogger.EXPECT().Info(`plugin1: {foo3 hello}`).Times(3) mockLogger.EXPECT().Info(`plugin2: {foo hello}`).Times(3) mockLogger.EXPECT().Info(`plugin3: {foo hello}`).Times(3) @@ -389,10 +390,10 @@ func TestBroadcastSameSubscriberGlobal(t *testing.T) { t.Run("PublishHelloFoo3", BroadcastPublishFoo3("6003")) t.Run("PublishAsyncHelloFooFoo2Foo3", BroadcastPublishAsyncFooFoo2Foo3("6003")) - time.Sleep(time.Second * 4) stopCh <- struct{}{} wg.Wait() + time.Sleep(time.Second * 5) } func BroadcastPublishFooFoo2Foo3(port string) func(t *testing.T) { diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml index d8daa251..66114d64 100644 --- a/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml +++ b/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml @@ -3,8 +3,6 @@ rpc: server: command: "php ../../psr-worker-bench.php" - user: "" - group: "" relay: "pipes" relay_timeout: "20s" diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml index 2ca97055..5ae5a101 100644 --- a/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml +++ b/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml @@ -3,8 +3,6 @@ rpc: server: command: "php ../../psr-worker-bench.php" - user: "" - group: "" relay: "pipes" relay_timeout: "20s" @@ -21,7 +19,7 @@ http: redis: addrs: - - "localhost:6379" + - "127.0.0.1:6379" broadcast: test: @@ -29,7 +27,7 @@ broadcast: test2: driver: redis addrs: - - "localhost:6378" + - "127.0.0.1:6378" test3: driver: memory test4: @@ -38,9 +36,4 @@ broadcast: logs: mode: development - level: error - -endure: - grace_period: 120s - print_graph: false - log_level: error + level: info diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml index aa80330e..d8457578 100644 --- a/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml +++ b/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml @@ -23,7 +23,7 @@ broadcast: default: driver: redis addrs: - - "localhost:6379" + - "127.0.0.1:6379" logs: mode: development diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml index 360e05e5..2337b8fe 100644 --- a/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml +++ b/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml @@ -3,8 +3,6 @@ rpc: server: command: "php ../../psr-worker-bench.php" - user: "" - group: "" relay: "pipes" relay_timeout: "20s" @@ -23,11 +21,11 @@ broadcast: test: driver: redis addrs: - - "localhost:6379" + - "127.0.0.1:6379" test2: driver: redis addrs: - - "localhost:6378" + - "127.0.0.1:6378" test3: driver: memory test4: @@ -35,9 +33,4 @@ broadcast: logs: mode: development - level: debug - -endure: - grace_period: 120s - print_graph: false - log_level: error + level: info diff --git a/tests/plugins/broadcast/plugins/plugin1.go b/tests/plugins/broadcast/plugins/plugin1.go index d3b16256..01ad1479 100644 --- a/tests/plugins/broadcast/plugins/plugin1.go +++ b/tests/plugins/broadcast/plugins/plugin1.go @@ -3,7 +3,7 @@ package plugins import ( "fmt" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -14,11 +14,14 @@ type Plugin1 struct { log logger.Logger b broadcast.Broadcaster driver pubsub.SubReader + + exit chan struct{} } func (p *Plugin1) Init(log logger.Logger, b broadcast.Broadcaster) error { p.log = log p.b = b + p.exit = make(chan struct{}, 1) return nil } @@ -39,16 +42,22 @@ func (p *Plugin1) Serve() chan error { go func() { for { - msg, err := p.driver.Next() - if err != nil { - panic(err) - } + select { + case <-p.exit: + return + default: + msg, err := p.driver.Next() + if err != nil { + errCh <- err + return + } - if msg == nil { - continue - } + if msg == nil { + continue + } - p.log.Info(fmt.Sprintf("%s: %s", Plugin1Name, *msg)) + p.log.Info(fmt.Sprintf("%s: %s", Plugin1Name, *msg)) + } } }() @@ -59,6 +68,8 @@ func (p *Plugin1) Stop() error { _ = p.driver.Unsubscribe("1", "foo") _ = p.driver.Unsubscribe("1", "foo2") _ = p.driver.Unsubscribe("1", "foo3") + + p.exit <- struct{}{} return nil } diff --git a/tests/plugins/broadcast/plugins/plugin2.go b/tests/plugins/broadcast/plugins/plugin2.go index 2bd819d2..ee072ffe 100644 --- a/tests/plugins/broadcast/plugins/plugin2.go +++ b/tests/plugins/broadcast/plugins/plugin2.go @@ -3,7 +3,7 @@ package plugins import ( "fmt" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -14,11 +14,13 @@ type Plugin2 struct { log logger.Logger b broadcast.Broadcaster driver pubsub.SubReader + exit chan struct{} } func (p *Plugin2) Init(log logger.Logger, b broadcast.Broadcaster) error { p.log = log p.b = b + p.exit = make(chan struct{}, 1) return nil } @@ -38,16 +40,22 @@ func (p *Plugin2) Serve() chan error { go func() { for { - msg, err := p.driver.Next() - if err != nil { - panic(err) - } + select { + case <-p.exit: + return + default: + msg, err := p.driver.Next() + if err != nil { + errCh <- err + return + } - if msg == nil { - continue - } + if msg == nil { + continue + } - p.log.Info(fmt.Sprintf("%s: %s", Plugin2Name, *msg)) + p.log.Info(fmt.Sprintf("%s: %s", Plugin2Name, *msg)) + } } }() @@ -56,6 +64,7 @@ func (p *Plugin2) Serve() chan error { func (p *Plugin2) Stop() error { _ = p.driver.Unsubscribe("2", "foo") + p.exit <- struct{}{} return nil } diff --git a/tests/plugins/broadcast/plugins/plugin3.go b/tests/plugins/broadcast/plugins/plugin3.go index ef926222..288201d1 100644 --- a/tests/plugins/broadcast/plugins/plugin3.go +++ b/tests/plugins/broadcast/plugins/plugin3.go @@ -3,7 +3,7 @@ package plugins import ( "fmt" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -14,11 +14,15 @@ type Plugin3 struct { log logger.Logger b broadcast.Broadcaster driver pubsub.SubReader + + exit chan struct{} } func (p *Plugin3) Init(log logger.Logger, b broadcast.Broadcaster) error { p.log = log p.b = b + + p.exit = make(chan struct{}, 1) return nil } @@ -38,16 +42,22 @@ func (p *Plugin3) Serve() chan error { go func() { for { - msg, err := p.driver.Next() - if err != nil { - panic(err) - } + select { + case <-p.exit: + return + default: + msg, err := p.driver.Next() + if err != nil { + errCh <- err + return + } - if msg == nil { - continue - } + if msg == nil { + continue + } - p.log.Info(fmt.Sprintf("%s: %s", Plugin3Name, *msg)) + p.log.Info(fmt.Sprintf("%s: %s", Plugin3Name, *msg)) + } } }() @@ -56,6 +66,7 @@ func (p *Plugin3) Serve() chan error { func (p *Plugin3) Stop() error { _ = p.driver.Unsubscribe("3", "foo") + p.exit <- struct{}{} return nil } diff --git a/tests/plugins/broadcast/plugins/plugin4.go b/tests/plugins/broadcast/plugins/plugin4.go index c9b94777..56f79c0f 100644 --- a/tests/plugins/broadcast/plugins/plugin4.go +++ b/tests/plugins/broadcast/plugins/plugin4.go @@ -3,7 +3,7 @@ package plugins import ( "fmt" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -14,11 +14,15 @@ type Plugin4 struct { log logger.Logger b broadcast.Broadcaster driver pubsub.SubReader + + exit chan struct{} } func (p *Plugin4) Init(log logger.Logger, b broadcast.Broadcaster) error { p.log = log p.b = b + + p.exit = make(chan struct{}, 1) return nil } @@ -38,16 +42,22 @@ func (p *Plugin4) Serve() chan error { go func() { for { - msg, err := p.driver.Next() - if err != nil { - panic(err) - } + select { + case <-p.exit: + return + default: + msg, err := p.driver.Next() + if err != nil { + errCh <- err + return + } - if msg == nil { - continue - } + if msg == nil { + continue + } - p.log.Info(fmt.Sprintf("%s: %s", Plugin4Name, *msg)) + p.log.Info(fmt.Sprintf("%s: %s", Plugin4Name, *msg)) + } } }() @@ -56,6 +66,8 @@ func (p *Plugin4) Serve() chan error { func (p *Plugin4) Stop() error { _ = p.driver.Unsubscribe("4", "foo") + + p.exit <- struct{}{} return nil } diff --git a/tests/plugins/broadcast/plugins/plugin5.go b/tests/plugins/broadcast/plugins/plugin5.go index 01562a8f..e7cd7e60 100644 --- a/tests/plugins/broadcast/plugins/plugin5.go +++ b/tests/plugins/broadcast/plugins/plugin5.go @@ -3,7 +3,7 @@ package plugins import ( "fmt" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -14,11 +14,15 @@ type Plugin5 struct { log logger.Logger b broadcast.Broadcaster driver pubsub.SubReader + + exit chan struct{} } func (p *Plugin5) Init(log logger.Logger, b broadcast.Broadcaster) error { p.log = log p.b = b + + p.exit = make(chan struct{}, 1) return nil } @@ -38,16 +42,22 @@ func (p *Plugin5) Serve() chan error { go func() { for { - msg, err := p.driver.Next() - if err != nil { - panic(err) - } + select { + case <-p.exit: + return + default: + msg, err := p.driver.Next() + if err != nil { + errCh <- err + return + } - if msg == nil { - continue - } + if msg == nil { + continue + } - p.log.Info(fmt.Sprintf("%s: %s", Plugin5Name, *msg)) + p.log.Info(fmt.Sprintf("%s: %s", Plugin5Name, *msg)) + } } }() @@ -56,6 +66,8 @@ func (p *Plugin5) Serve() chan error { func (p *Plugin5) Stop() error { _ = p.driver.Unsubscribe("5", "foo") + + p.exit <- struct{}{} return nil } diff --git a/tests/plugins/broadcast/plugins/plugin6.go b/tests/plugins/broadcast/plugins/plugin6.go index 76f2d6e8..08272196 100644 --- a/tests/plugins/broadcast/plugins/plugin6.go +++ b/tests/plugins/broadcast/plugins/plugin6.go @@ -3,7 +3,7 @@ package plugins import ( "fmt" - "github.com/spiral/roadrunner/v2/pkg/pubsub" + "github.com/spiral/roadrunner/v2/common/pubsub" "github.com/spiral/roadrunner/v2/plugins/broadcast" "github.com/spiral/roadrunner/v2/plugins/logger" ) @@ -14,11 +14,15 @@ type Plugin6 struct { log logger.Logger b broadcast.Broadcaster driver pubsub.SubReader + + exit chan struct{} } func (p *Plugin6) Init(log logger.Logger, b broadcast.Broadcaster) error { p.log = log p.b = b + + p.exit = make(chan struct{}, 1) return nil } @@ -38,16 +42,22 @@ func (p *Plugin6) Serve() chan error { go func() { for { - msg, err := p.driver.Next() - if err != nil { - panic(err) - } + select { + case <-p.exit: + return + default: + msg, err := p.driver.Next() + if err != nil { + errCh <- err + return + } - if msg == nil { - continue - } + if msg == nil { + continue + } - p.log.Info(fmt.Sprintf("%s: %s", Plugin6Name, *msg)) + p.log.Info(fmt.Sprintf("%s: %s", Plugin6Name, *msg)) + } } }() @@ -56,6 +66,8 @@ func (p *Plugin6) Serve() chan error { func (p *Plugin6) Stop() error { _ = p.driver.Unsubscribe("6", "foo") + + p.exit <- struct{}{} return nil } diff --git a/tests/plugins/config/config_test.go b/tests/plugins/config/config_test.go index b6063cec..87ab1eaa 100755 --- a/tests/plugins/config/config_test.go +++ b/tests/plugins/config/config_test.go @@ -97,7 +97,7 @@ func TestConfigOverwriteValid(t *testing.T) { vp := &config.Viper{} vp.Path = "configs/.rr.yaml" vp.Prefix = "rr" - vp.Flags = []string{"rpc.listen=tcp://localhost:36643"} + vp.Flags = []string{"rpc.listen=tcp://127.0.0.1:36643"} err = container.RegisterAll( &logger.ZapLogger{}, @@ -143,7 +143,7 @@ func TestConfigEnvVariables(t *testing.T) { t.Fatal(err) } - err = os.Setenv("SUPER_RPC_ENV", "tcp://localhost:36643") + err = os.Setenv("SUPER_RPC_ENV", "tcp://127.0.0.1:36643") assert.NoError(t, err) vp := &config.Viper{} @@ -194,7 +194,7 @@ func TestConfigEnvVariablesFail(t *testing.T) { t.Fatal(err) } - err = os.Setenv("SUPER_RPC_ENV", "tcp://localhost:6065") + err = os.Setenv("SUPER_RPC_ENV", "tcp://127.0.0.1:6065") assert.NoError(t, err) vp := &config.Viper{} diff --git a/tests/plugins/config/configs/.rr.yaml b/tests/plugins/config/configs/.rr.yaml index f449dcf3..575cdd33 100755 --- a/tests/plugins/config/configs/.rr.yaml +++ b/tests/plugins/config/configs/.rr.yaml @@ -1,5 +1,5 @@ rpc: - listen: tcp://localhost:6060 + listen: tcp://127.0.0.1:6060 logs: mode: development diff --git a/tests/plugins/config/plugin1.go b/tests/plugins/config/plugin1.go index 1de9a02e..08a48a4f 100755 --- a/tests/plugins/config/plugin1.go +++ b/tests/plugins/config/plugin1.go @@ -83,7 +83,7 @@ func (f *Foo) Serve() chan error { return errCh } - if allCfg.RPC.Listen != "tcp://localhost:6060" { + if allCfg.RPC.Listen != "tcp://127.0.0.1:6060" { errCh <- errors.E(op, errors.Str("RPC.Listen should be parsed")) return errCh } diff --git a/tests/plugins/config/plugin2.go b/tests/plugins/config/plugin2.go index 9639b170..8c6f36c1 100755 --- a/tests/plugins/config/plugin2.go +++ b/tests/plugins/config/plugin2.go @@ -37,7 +37,7 @@ func (f *Foo2) Serve() chan error { return errCh } - if allCfg.RPC.Listen != "tcp://localhost:36643" { + if allCfg.RPC.Listen != "tcp://127.0.0.1:36643" { errCh <- errors.E(op, errors.Str("RPC.Listen should be overwritten")) return errCh } diff --git a/tests/plugins/gzip/plugin_test.go b/tests/plugins/gzip/plugin_test.go index b254fad5..5612ec94 100644 --- a/tests/plugins/gzip/plugin_test.go +++ b/tests/plugins/gzip/plugin_test.go @@ -89,7 +89,7 @@ func TestGzipPlugin(t *testing.T) { } func headerCheck(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:18953", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:18953", nil) assert.NoError(t, err) client := &http.Client{ Transport: &http.Transport{ diff --git a/tests/plugins/headers/configs/.rr-cors-headers.yaml b/tests/plugins/headers/configs/.rr-cors-headers.yaml index 9d2ef7e5..b4e960f1 100644 --- a/tests/plugins/headers/configs/.rr-cors-headers.yaml +++ b/tests/plugins/headers/configs/.rr-cors-headers.yaml @@ -1,9 +1,5 @@ server: command: "php ../../http/client.php headers pipes" - user: "" - group: "" - env: - "RR_HTTP": "true" relay: "pipes" relay_timeout: "20s" diff --git a/tests/plugins/headers/headers_plugin_test.go b/tests/plugins/headers/headers_plugin_test.go index 49d86b00..a03a3c34 100644 --- a/tests/plugins/headers/headers_plugin_test.go +++ b/tests/plugins/headers/headers_plugin_test.go @@ -154,7 +154,7 @@ func TestRequestHeaders(t *testing.T) { } func reqHeaders(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:22655?hello=value", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:22655?hello=value", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -239,7 +239,7 @@ func TestResponseHeaders(t *testing.T) { } func resHeaders(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:22455?hello=value", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:22455?hello=value", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -326,7 +326,7 @@ func TestCORSHeaders(t *testing.T) { } func corsHeadersPass(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:22855", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:22855", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -346,7 +346,7 @@ func corsHeadersPass(t *testing.T) { } func corsHeaders(t *testing.T) { - req, err := http.NewRequest("OPTIONS", "http://localhost:22855", nil) + req, err := http.NewRequest("OPTIONS", "http://127.0.0.1:22855", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) diff --git a/tests/plugins/http/configs/.rr-env.yaml b/tests/plugins/http/configs/.rr-env.yaml index 99358b04..4ea8ec73 100644 --- a/tests/plugins/http/configs/.rr-env.yaml +++ b/tests/plugins/http/configs/.rr-env.yaml @@ -3,17 +3,13 @@ rpc: server: command: "php ../../http/client.php env pipes" - user: "" - group: "" - env: - "env_key": "ENV_VALUE" relay: "pipes" relay_timeout: "20s" http: address: 127.0.0.1:12084 max_request_size: 1024 - middleware: [ "" ] + middleware: [] env: "RR_HTTP": "true" "env_key": "ENV_VALUE" diff --git a/tests/plugins/http/configs/.rr-http.yaml b/tests/plugins/http/configs/.rr-http.yaml index c95bc049..b4910160 100644 --- a/tests/plugins/http/configs/.rr-http.yaml +++ b/tests/plugins/http/configs/.rr-http.yaml @@ -3,10 +3,6 @@ rpc: server: command: "php ../../http/client.php echo pipes" - user: "" - group: "" - env: - "RR_HTTP": "true" relay: "pipes" relay_timeout: "20s" diff --git a/tests/plugins/http/handler_test.go b/tests/plugins/http/handler_test.go index 40e3a720..c8709678 100644 --- a/tests/plugins/http/handler_test.go +++ b/tests/plugins/http/handler_test.go @@ -26,7 +26,7 @@ func TestHandler_Echo(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -56,7 +56,7 @@ func TestHandler_Echo(t *testing.T) { }(hs) time.Sleep(time.Millisecond * 10) - body, r, err := get("http://localhost:8177/?hello=world") + body, r, err := get("http://127.0.0.1:8177/?hello=world") assert.NoError(t, err) defer func() { _ = r.Body.Close() @@ -77,7 +77,7 @@ func TestHandler_Headers(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "header", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -111,7 +111,7 @@ func TestHandler_Headers(t *testing.T) { }() time.Sleep(time.Millisecond * 100) - req, err := http.NewRequest("GET", "http://localhost:8078?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:8078?hello=world", nil) assert.NoError(t, err) req.Header.Add("input", "sample") @@ -138,7 +138,7 @@ func TestHandler_Empty_User_Agent(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "user-agent", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -172,7 +172,7 @@ func TestHandler_Empty_User_Agent(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - req, err := http.NewRequest("GET", "http://localhost:19658?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:19658?hello=world", nil) assert.NoError(t, err) req.Header.Add("user-agent", "") @@ -198,7 +198,7 @@ func TestHandler_User_Agent(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "user-agent", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -232,7 +232,7 @@ func TestHandler_User_Agent(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - req, err := http.NewRequest("GET", "http://localhost:25688?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:25688?hello=world", nil) assert.NoError(t, err) req.Header.Add("User-Agent", "go-agent") @@ -258,7 +258,7 @@ func TestHandler_Cookies(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "cookie", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -292,7 +292,7 @@ func TestHandler_Cookies(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - req, err := http.NewRequest("GET", "http://localhost:8079", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:8079", nil) assert.NoError(t, err) req.AddCookie(&http.Cookie{Name: "input", Value: "input-value"}) @@ -323,7 +323,7 @@ func TestHandler_JsonPayload_POST(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -359,7 +359,7 @@ func TestHandler_JsonPayload_POST(t *testing.T) { req, err := http.NewRequest( "POST", - "http://localhost"+hs.Addr, + "http://127.0.0.1"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`), ) assert.NoError(t, err) @@ -387,7 +387,7 @@ func TestHandler_JsonPayload_PUT(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -421,7 +421,7 @@ func TestHandler_JsonPayload_PUT(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - req, err := http.NewRequest("PUT", "http://localhost"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`)) + req, err := http.NewRequest("PUT", "http://127.0.0.1"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`)) assert.NoError(t, err) req.Header.Add("Content-Type", "application/json") @@ -447,7 +447,7 @@ func TestHandler_JsonPayload_PATCH(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -481,7 +481,7 @@ func TestHandler_JsonPayload_PATCH(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - req, err := http.NewRequest("PATCH", "http://localhost"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`)) + req, err := http.NewRequest("PATCH", "http://127.0.0.1"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`)) assert.NoError(t, err) req.Header.Add("Content-Type", "application/json") @@ -507,7 +507,7 @@ func TestHandler_FormData_POST(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -552,7 +552,7 @@ func TestHandler_FormData_POST(t *testing.T) { form.Add("arr[c]p", "l") form.Add("arr[c]z", "") - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, strings.NewReader(form.Encode())) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode())) assert.NoError(t, err) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") @@ -580,7 +580,7 @@ func TestHandler_FormData_POST_Overwrite(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -626,7 +626,7 @@ func TestHandler_FormData_POST_Overwrite(t *testing.T) { form.Add("arr[c]p", "l") form.Add("arr[c]z", "") - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, strings.NewReader(form.Encode())) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode())) assert.NoError(t, err) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") @@ -653,7 +653,7 @@ func TestHandler_FormData_POST_Form_UrlEncoded_Charset(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -698,7 +698,7 @@ func TestHandler_FormData_POST_Form_UrlEncoded_Charset(t *testing.T) { form.Add("arr[c]p", "l") form.Add("arr[c]z", "") - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, strings.NewReader(form.Encode())) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode())) assert.NoError(t, err) req.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8") @@ -725,7 +725,7 @@ func TestHandler_FormData_PUT(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -770,7 +770,7 @@ func TestHandler_FormData_PUT(t *testing.T) { form.Add("arr[c]p", "l") form.Add("arr[c]z", "") - req, err := http.NewRequest("PUT", "http://localhost"+hs.Addr, strings.NewReader(form.Encode())) + req, err := http.NewRequest("PUT", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode())) assert.NoError(t, err) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") @@ -797,7 +797,7 @@ func TestHandler_FormData_PATCH(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -842,7 +842,7 @@ func TestHandler_FormData_PATCH(t *testing.T) { form.Add("arr[c]p", "l") form.Add("arr[c]z", "") - req, err := http.NewRequest("PATCH", "http://localhost"+hs.Addr, strings.NewReader(form.Encode())) + req, err := http.NewRequest("PATCH", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode())) assert.NoError(t, err) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") @@ -869,7 +869,7 @@ func TestHandler_Multipart_POST(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -956,7 +956,7 @@ func TestHandler_Multipart_POST(t *testing.T) { t.Errorf("error closing the writer: error %v", err) } - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) @@ -983,7 +983,7 @@ func TestHandler_Multipart_PUT(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1070,7 +1070,7 @@ func TestHandler_Multipart_PUT(t *testing.T) { t.Errorf("error closing the writer: error %v", err) } - req, err := http.NewRequest("PUT", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("PUT", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) @@ -1097,7 +1097,7 @@ func TestHandler_Multipart_PATCH(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1186,7 +1186,7 @@ func TestHandler_Multipart_PATCH(t *testing.T) { t.Errorf("error closing the writer: error %v", err) } - req, err := http.NewRequest("PATCH", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("PATCH", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) @@ -1213,7 +1213,7 @@ func TestHandler_Error(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1247,7 +1247,7 @@ func TestHandler_Error(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - _, r, err := get("http://localhost:8177/?hello=world") + _, r, err := get("http://127.0.0.1:8177/?hello=world") assert.NoError(t, err) defer func() { _ = r.Body.Close() @@ -1259,7 +1259,7 @@ func TestHandler_Error2(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error2", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1293,7 +1293,7 @@ func TestHandler_Error2(t *testing.T) { }() time.Sleep(time.Millisecond * 10) - _, r, err := get("http://localhost:8177/?hello=world") + _, r, err := get("http://127.0.0.1:8177/?hello=world") assert.NoError(t, err) defer func() { _ = r.Body.Close() @@ -1305,7 +1305,7 @@ func TestHandler_Error3(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "pid", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1344,7 +1344,7 @@ func TestHandler_Error3(t *testing.T) { b2.Write([]byte(" ")) } - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, b2) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, b2) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -1364,7 +1364,7 @@ func TestHandler_ResponseDuration(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1409,7 +1409,7 @@ func TestHandler_ResponseDuration(t *testing.T) { } }) - body, r, err := get("http://localhost:8177/?hello=world") + body, r, err := get("http://127.0.0.1:8177/?hello=world") assert.NoError(t, err) defer func() { _ = r.Body.Close() @@ -1425,7 +1425,7 @@ func TestHandler_ResponseDurationDelayed(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echoDelay", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1470,7 +1470,7 @@ func TestHandler_ResponseDurationDelayed(t *testing.T) { } }) - body, r, err := get("http://localhost:8177/?hello=world") + body, r, err := get("http://127.0.0.1:8177/?hello=world") assert.NoError(t, err) defer func() { _ = r.Body.Close() @@ -1485,7 +1485,7 @@ func TestHandler_ErrorDuration(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1530,7 +1530,7 @@ func TestHandler_ErrorDuration(t *testing.T) { } }) - _, r, err := get("http://localhost:8177/?hello=world") + _, r, err := get("http://127.0.0.1:8177/?hello=world") assert.NoError(t, err) defer func() { _ = r.Body.Close() @@ -1560,7 +1560,7 @@ func TestHandler_IP(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1621,7 +1621,7 @@ func TestHandler_XRealIP(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1687,7 +1687,7 @@ func TestHandler_XForwardedFor(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1752,7 +1752,7 @@ func TestHandler_XForwardedFor_NotTrustedRemoteIp(t *testing.T) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1800,7 +1800,7 @@ func BenchmarkHandler_Listen_Echo(b *testing.B) { p, err := pool.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") }, pipe.NewPipeFactory(), - pool.Config{ + &pool.Config{ NumWorkers: uint64(runtime.NumCPU()), AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -1838,7 +1838,7 @@ func BenchmarkHandler_Listen_Echo(b *testing.B) { b.ReportAllocs() bb := "WORLD" for n := 0; n < b.N; n++ { - r, err := http.Get("http://localhost:8177/?hello=world") + r, err := http.Get("http://127.0.0.1:8177/?hello=world") if err != nil { b.Fail() } diff --git a/tests/plugins/http/http_plugin_test.go b/tests/plugins/http/http_plugin_test.go index c3949911..db62781f 100644 --- a/tests/plugins/http/http_plugin_test.go +++ b/tests/plugins/http/http_plugin_test.go @@ -251,7 +251,7 @@ func TestHTTPInformerReset(t *testing.T) { } func echoHTTP(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:10084?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:10084?hello=world", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -371,7 +371,7 @@ func TestSSL(t *testing.T) { } func sslNoRedirect(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:8085?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:8085?hello=world", nil) assert.NoError(t, err) r, err := sslClient.Do(req) @@ -393,7 +393,7 @@ func sslNoRedirect(t *testing.T) { } func sslEcho(t *testing.T) { - req, err := http.NewRequest("GET", "https://localhost:8893?hello=world", nil) + req, err := http.NewRequest("GET", "https://127.0.0.1:8893?hello=world", nil) assert.NoError(t, err) r, err := sslClient.Do(req) @@ -505,7 +505,7 @@ func TestSSLRedirect(t *testing.T) { } func sslRedirect(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:8087?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:8087?hello=world", nil) assert.NoError(t, err) r, err := sslClient.Do(req) @@ -593,7 +593,7 @@ func TestSSLPushPipes(t *testing.T) { } func sslPush(t *testing.T) { - req, err := http.NewRequest("GET", "https://localhost:8894?hello=world", nil) + req, err := http.NewRequest("GET", "https://127.0.0.1:8894?hello=world", nil) assert.NoError(t, err) r, err := sslClient.Do(req) @@ -604,7 +604,7 @@ func sslPush(t *testing.T) { b, err := ioutil.ReadAll(r.Body) assert.NoError(t, err) - assert.Equal(t, "", r.Header.Get("Http2-Push")) + assert.Equal(t, "", r.Header.Get("Http2-Release")) assert.NoError(t, err) assert.Equal(t, 201, r.StatusCode) @@ -864,7 +864,7 @@ func TestH2CUpgrade(t *testing.T) { } func h2cUpgrade(t *testing.T) { - req, err := http.NewRequest("PRI", "http://localhost:8083?hello=world", nil) + req, err := http.NewRequest("PRI", "http://127.0.0.1:8083?hello=world", nil) if err != nil { t.Fatal(err) } @@ -955,7 +955,7 @@ func TestH2C(t *testing.T) { } func h2c(t *testing.T) { - req, err := http.NewRequest("PRI", "http://localhost:8083?hello=world", nil) + req, err := http.NewRequest("PRI", "http://127.0.0.1:8083?hello=world", nil) if err != nil { t.Fatal(err) } @@ -1047,7 +1047,7 @@ func TestHttpMiddleware(t *testing.T) { } func middleware(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:18903?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:18903?hello=world", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -1062,7 +1062,7 @@ func middleware(t *testing.T) { err = r.Body.Close() assert.NoError(t, err) - req, err = http.NewRequest("GET", "http://localhost:18903/halt", nil) + req, err = http.NewRequest("GET", "http://127.0.0.1:18903/halt", nil) assert.NoError(t, err) r, err = http.DefaultClient.Do(req) @@ -1127,7 +1127,7 @@ logs: mockLogger.EXPECT().Debug(gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).MinTimes(1) - mockLogger.EXPECT().Debug("201 GET http://localhost:34999/?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34999/?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Info("WORLD").MinTimes(1) mockLogger.EXPECT().Debug("worker event received", "event", events.EventWorkerLog, "worker state", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror @@ -1193,7 +1193,7 @@ logs: } func echoError(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:34999?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:34999?hello=world", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -1278,7 +1278,7 @@ func TestHttpEnvVariables(t *testing.T) { } func envVarsTest(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:12084", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:12084", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -1432,7 +1432,7 @@ func TestHTTPSupervisedPool(t *testing.T) { } func echoHTTP2(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:18888?hello=world", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:18888?hello=world", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -1638,7 +1638,7 @@ func bigEchoHTTP(t *testing.T) { bt := bytes.NewBuffer(buf) - req, err := http.NewRequest("GET", "http://localhost:10085?hello=world", bt) + req, err := http.NewRequest("GET", "http://127.0.0.1:10085?hello=world", bt) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) @@ -1723,7 +1723,7 @@ func TestStaticEtagPlugin(t *testing.T) { func serveStaticSampleEtag(t *testing.T) { // OK 200 response - b, r, err := get("http://localhost:21603/sample.txt") + b, r, err := get("http://127.0.0.1:21603/sample.txt") assert.NoError(t, err) assert.Equal(t, "sample\n", b) assert.Equal(t, r.StatusCode, http.StatusOK) @@ -1736,7 +1736,7 @@ func serveStaticSampleEtag(t *testing.T) { Timeout: time.Second * 5, } - parsedURL, _ := url.Parse("http://localhost:21603/sample.txt") + parsedURL, _ := url.Parse("http://127.0.0.1:21603/sample.txt") req := &http.Request{ Method: http.MethodGet, @@ -1828,7 +1828,7 @@ func serveStaticSampleNotAllowedPath(t *testing.T) { parsedURL := &url.URL{ Scheme: "http", User: nil, - Host: "localhost:21603", + Host: "127.0.0.1:21603", Path: "%2e%2e%/tests/", } @@ -1845,7 +1845,7 @@ func serveStaticSampleNotAllowedPath(t *testing.T) { parsedURL = &url.URL{ Scheme: "http", User: nil, - Host: "localhost:21603", + Host: "127.0.0.1:21603", Path: "%2e%2e%5ctests/", } @@ -1862,7 +1862,7 @@ func serveStaticSampleNotAllowedPath(t *testing.T) { parsedURL = &url.URL{ Scheme: "http", User: nil, - Host: "localhost:21603", + Host: "127.0.0.1:21603", Path: "..%2ftests/", } @@ -1879,7 +1879,7 @@ func serveStaticSampleNotAllowedPath(t *testing.T) { parsedURL = &url.URL{ Scheme: "http", User: nil, - Host: "localhost:21603", + Host: "127.0.0.1:21603", Path: "%2e%2e%2ftests/", } @@ -1893,7 +1893,7 @@ func serveStaticSampleNotAllowedPath(t *testing.T) { assert.Equal(t, http.StatusBadRequest, resp.StatusCode) _ = resp.Body.Close() - _, r, err := get("http://localhost:21603/../../../../tests/../static/sample.txt") + _, r, err := get("http://127.0.0.1:21603/../../../../tests/../static/sample.txt") assert.NoError(t, err) assert.Equal(t, 403, r.StatusCode) _ = r.Body.Close() @@ -1971,7 +1971,7 @@ func TestStaticPlugin(t *testing.T) { } func staticHeaders(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:21603/client.php", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:21603/client.php", nil) if err != nil { t.Fatal(err) } @@ -1999,7 +1999,7 @@ func staticHeaders(t *testing.T) { } func staticNotForbid(t *testing.T) { - b, r, err := get("http://localhost:21603/client.php") + b, r, err := get("http://127.0.0.1:21603/client.php") assert.NoError(t, err) assert.Equal(t, all("../../../tests/client.php"), b) assert.Equal(t, all("../../../tests/client.php"), b) @@ -2007,7 +2007,7 @@ func staticNotForbid(t *testing.T) { } func serveStaticSample(t *testing.T) { - b, r, err := get("http://localhost:21603/sample.txt") + b, r, err := get("http://127.0.0.1:21603/sample.txt") assert.NoError(t, err) assert.Equal(t, "sample\n", b) _ = r.Body.Close() @@ -2104,7 +2104,7 @@ func TestStaticFilesDisabled(t *testing.T) { } func staticFilesDisabled(t *testing.T) { - b, r, err := get("http://localhost:45877/client.php?hello=world") + b, r, err := get("http://127.0.0.1:45877/client.php?hello=world") if err != nil { t.Fatal(err) } @@ -2126,9 +2126,9 @@ func TestStaticFilesForbid(t *testing.T) { mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() - mockLogger.EXPECT().Debug("201 GET http://localhost:34653/http?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) - mockLogger.EXPECT().Debug("201 GET http://localhost:34653/client.XXX?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) - mockLogger.EXPECT().Debug("201 GET http://localhost:34653/client.php?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34653/http?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34653/client.XXX?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34653/client.php?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Error("file open error", "error", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("no such file or directory", "error", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("possible path to dir provided").AnyTimes() @@ -2198,19 +2198,19 @@ func TestStaticFilesForbid(t *testing.T) { } func staticTestFilesDir(t *testing.T) { - b, r, err := get("http://localhost:34653/http?hello=world") + b, r, err := get("http://127.0.0.1:34653/http?hello=world") assert.NoError(t, err) assert.Equal(t, "WORLD", b) _ = r.Body.Close() } func staticNotFound(t *testing.T) { - b, _, _ := get("http://localhost:34653/client.XXX?hello=world") //nolint:bodyclose + b, _, _ := get("http://127.0.0.1:34653/client.XXX?hello=world") //nolint:bodyclose assert.Equal(t, "WORLD", b) } func staticFilesForbid(t *testing.T) { - b, r, err := get("http://localhost:34653/client.php?hello=world") + b, r, err := get("http://127.0.0.1:34653/client.php?hello=world") if err != nil { t.Fatal(err) } @@ -2288,7 +2288,7 @@ func TestHTTPIssue659(t *testing.T) { } func echoIssue659(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, "http://localhost:32552", nil) + req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:32552", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) diff --git a/tests/plugins/http/response_test.go b/tests/plugins/http/response_test.go index 276c22ef..f754429d 100644 --- a/tests/plugins/http/response_test.go +++ b/tests/plugins/http/response_test.go @@ -45,13 +45,13 @@ func (tw *testWriter) Push(target string, opts *http.PushOptions) error { } func TestNewResponse_Error(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{Context: []byte(`invalid payload`)}) + r, err := handler.NewResponse(&payload.Payload{Context: []byte(`invalid payload`)}) assert.Error(t, err) assert.Nil(t, r) } func TestNewResponse_Write(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{ + r, err := handler.NewResponse(&payload.Payload{ Context: []byte(`{"headers":{"key":["value"]},"status": 301}`), Body: []byte(`sample body`), }) @@ -68,7 +68,7 @@ func TestNewResponse_Write(t *testing.T) { } func TestNewResponse_Stream(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{ + r, err := handler.NewResponse(&payload.Payload{ Context: []byte(`{"headers":{"key":["value"]},"status": 301}`), }) @@ -93,7 +93,7 @@ func TestNewResponse_Stream(t *testing.T) { } func TestNewResponse_StreamError(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{ + r, err := handler.NewResponse(&payload.Payload{ Context: []byte(`{"headers":{"key":["value"]},"status": 301}`), }) @@ -114,7 +114,7 @@ func TestNewResponse_StreamError(t *testing.T) { } func TestWrite_HandlesPush(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{ + r, err := handler.NewResponse(&payload.Payload{ Context: []byte(`{"headers":{"Http2-Push":["/test.js"],"content-type":["text/html"]},"status": 200}`), }) @@ -129,7 +129,7 @@ func TestWrite_HandlesPush(t *testing.T) { } func TestWrite_HandlesTrailers(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{ + r, err := handler.NewResponse(&payload.Payload{ Context: []byte(`{"headers":{"Trailer":["foo, bar", "baz"],"foo":["test"],"bar":["demo"]},"status": 200}`), }) @@ -148,7 +148,7 @@ func TestWrite_HandlesTrailers(t *testing.T) { } func TestWrite_HandlesHandlesWhitespacesInTrailer(t *testing.T) { - r, err := handler.NewResponse(payload.Payload{ + r, err := handler.NewResponse(&payload.Payload{ Context: []byte( `{"headers":{"Trailer":["foo\t,bar , baz"],"foo":["a"],"bar":["b"],"baz":["c"]},"status": 200}`), }) diff --git a/tests/plugins/http/uploads_test.go b/tests/plugins/http/uploads_test.go index df696668..54f2bead 100644 --- a/tests/plugins/http/uploads_test.go +++ b/tests/plugins/http/uploads_test.go @@ -31,7 +31,7 @@ func TestHandler_Upload_File(t *testing.T) { pool, err := poolImpl.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") }, pipe.NewPipeFactory(), - poolImpl.Config{ + &poolImpl.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -85,7 +85,7 @@ func TestHandler_Upload_File(t *testing.T) { t.Errorf("error closing the file: error %v", err) } - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) @@ -114,7 +114,7 @@ func TestHandler_Upload_NestedFile(t *testing.T) { pool, err := poolImpl.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") }, pipe.NewPipeFactory(), - poolImpl.Config{ + &poolImpl.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -168,7 +168,7 @@ func TestHandler_Upload_NestedFile(t *testing.T) { t.Errorf("error closing the file: error %v", err) } - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) @@ -197,7 +197,7 @@ func TestHandler_Upload_File_NoTmpDir(t *testing.T) { pool, err := poolImpl.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") }, pipe.NewPipeFactory(), - poolImpl.Config{ + &poolImpl.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -251,7 +251,7 @@ func TestHandler_Upload_File_NoTmpDir(t *testing.T) { t.Errorf("error closing the file: error %v", err) } - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) @@ -280,7 +280,7 @@ func TestHandler_Upload_File_Forbids(t *testing.T) { pool, err := poolImpl.Initialize(context.Background(), func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") }, pipe.NewPipeFactory(), - poolImpl.Config{ + &poolImpl.Config{ NumWorkers: 1, AllocateTimeout: time.Second * 1000, DestroyTimeout: time.Second * 1000, @@ -334,7 +334,7 @@ func TestHandler_Upload_File_Forbids(t *testing.T) { t.Errorf("error closing the file: error %v", err) } - req, err := http.NewRequest("POST", "http://localhost"+hs.Addr, &mb) + req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb) assert.NoError(t, err) req.Header.Set("Content-Type", w.FormDataContentType()) diff --git a/tests/plugins/informer/test_plugin.go b/tests/plugins/informer/test_plugin.go index 43335999..095140b8 100644 --- a/tests/plugins/informer/test_plugin.go +++ b/tests/plugins/informer/test_plugin.go @@ -10,7 +10,7 @@ import ( "github.com/spiral/roadrunner/v2/plugins/server" ) -var testPoolConfig = pool.Config{ +var testPoolConfig = &pool.Config{ NumWorkers: 10, MaxJobs: 100, AllocateTimeout: time.Second * 10, @@ -51,13 +51,13 @@ func (p1 *Plugin1) Name() string { func (p1 *Plugin1) Available() {} -func (p1 *Plugin1) Workers() []process.State { +func (p1 *Plugin1) Workers() []*process.State { p, err := p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil) if err != nil { panic(err) } - ps := make([]process.State, 0, len(p.Workers())) + ps := make([]*process.State, 0, len(p.Workers())) workers := p.Workers() for i := 0; i < len(workers); i++ { state, err := process.WorkerProcessState(workers[i]) diff --git a/tests/plugins/jobs/amqp/.rr-amqp-declare.yaml b/tests/plugins/jobs/amqp/.rr-amqp-declare.yaml new file mode 100644 index 00000000..f9a7308b --- /dev/null +++ b/tests/plugins/jobs/amqp/.rr-amqp-declare.yaml @@ -0,0 +1,24 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +amqp: + addr: amqp://guest:[email protected]:5672/ + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/amqp/.rr-amqp-init.yaml b/tests/plugins/jobs/amqp/.rr-amqp-init.yaml new file mode 100644 index 00000000..43840545 --- /dev/null +++ b/tests/plugins/jobs/amqp/.rr-amqp-init.yaml @@ -0,0 +1,55 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +amqp: + addr: amqp://guest:[email protected]:5672/ + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 1 + pipeline_size: 100000 + timeout: 1 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: amqp + prefetch: 100 + queue: test-1-queue + priority: 1 + exchange: default + exchange_type: direct + routing_key: test-1 + exclusive: false + multiple_ack: false + requeue_on_fail: false + + test-2: + driver: amqp + prefetch: 100 + queue: test-2-queue + priority: 2 + exchange: default + exchange_type: direct + routing_key: test-2 + exclusive: false + multiple_ack: false + requeue_on_fail: false + + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml b/tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml new file mode 100644 index 00000000..79493d96 --- /dev/null +++ b/tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml @@ -0,0 +1,24 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_err.php" + relay: "pipes" + relay_timeout: "20s" + +amqp: + addr: amqp://guest:[email protected]:5672/ + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 1 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/amqp/.rr-no-global.yaml b/tests/plugins/jobs/amqp/.rr-no-global.yaml new file mode 100644 index 00000000..1b01eb73 --- /dev/null +++ b/tests/plugins/jobs/amqp/.rr-no-global.yaml @@ -0,0 +1,47 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../client.php echo pipes" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: error + mode: development + +jobs: + # num logical cores by default + num_pollers: 10 + # 1mi by default + pipeline_size: 100000 + # worker pool configuration + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + # list of broker pipelines associated with endpoints + pipelines: + test-1: + driver: amqp + priority: 1 + pipeline_size: 100 + queue: test-1-queue + exchange: default + exchange_type: direct + routing_key: test + + test-2: + driver: amqp + priority: 2 + pipeline_size: 100 + queue: test-2-queue + exchange: default + exchange_type: direct + routing_key: test-2 + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml b/tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml new file mode 100644 index 00000000..3555ef96 --- /dev/null +++ b/tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml @@ -0,0 +1,27 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +beanstalk: + # beanstalk address + addr: tcp://127.0.0.1:11300 + # connect timeout + timeout: 10s + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml b/tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml new file mode 100644 index 00000000..cf9069a8 --- /dev/null +++ b/tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml @@ -0,0 +1,45 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +beanstalk: + addr: tcp://127.0.0.1:11300 + timeout: 10s + +logs: + level: info + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: beanstalk + priority: 11 + tube_priority: 1 + tube: default-1 + reserve_timeout: 10s + + test-2: + driver: beanstalk + priority: 11 + tube_priority: 3 + tube: default-2 + reserve_timeout: 10s + + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml b/tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml new file mode 100644 index 00000000..a4f31290 --- /dev/null +++ b/tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml @@ -0,0 +1,27 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_err.php" + relay: "pipes" + relay_timeout: "20s" + +beanstalk: + # beanstalk address + addr: tcp://127.0.0.1:11300 + # connect timeout + timeout: 10s + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/beanstalk/.rr-no-global.yaml b/tests/plugins/jobs/beanstalk/.rr-no-global.yaml new file mode 100644 index 00000000..87f46069 --- /dev/null +++ b/tests/plugins/jobs/beanstalk/.rr-no-global.yaml @@ -0,0 +1,31 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: error + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + # list of broker pipelines associated with endpoints + pipelines: + test-1: + driver: beanstalk + priority: 11 + tube_priority: 1 + tube: default-1 + reserve_timeout: 10s + + consume: [ "test-1" ] diff --git a/tests/plugins/jobs/configs/.rr-jobs-init.yaml b/tests/plugins/jobs/configs/.rr-jobs-init.yaml new file mode 100644 index 00000000..bf9f60cc --- /dev/null +++ b/tests/plugins/jobs/configs/.rr-jobs-init.yaml @@ -0,0 +1,112 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../client.php echo pipes" + relay: "pipes" + relay_timeout: "20s" + +amqp: + addr: amqp://guest:[email protected]:5672/ + +# beanstalk configuration +# +beanstalk: + # beanstalk address + addr: tcp://127.0.0.1:11300 + # connect timeout + timeout: 10s + +# amazon sqs configuration +# General section +sqs: + key: api-key + secret: api-secret + region: us-west-1 + endpoint: http://127.0.0.1:9324 + +logs: + level: info + encoding: console + mode: development + +jobs: + # num logical cores by default + num_pollers: 10 + # 1mi by default + pipeline_size: 100000 + # worker pool configuration + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + # list of broker pipelines associated with endpoints + pipelines: + test-local: + driver: ephemeral + priority: 10 + prefetch: 10000 + + test-local-2: + driver: ephemeral + priority: 1 + prefetch: 10000 + + test-local-3: + driver: ephemeral + priority: 2 + prefetch: 10000 + + test-1: + driver: amqp + # QoS + prefetch: 1000000 + # Queue name + queue: test-1-queue + # Pipeline jobs priority, 1 - highest + priority: 1 + # Exchange + exchange: default + # Exchange type: direct, topic, fanout + exchange_type: direct + # Routing key for the queue + routing_key: test + # Declare a queue exclusive at the exchange + exclusive: false + # When multiple is true, this delivery and all prior unacknowledged deliveries + # on the same channel will be acknowledged. This is useful for batch processing + # of deliveries + multiple_ack: false + # When multiple is true, this delivery and all prior unacknowledged deliveries + # on the same channel will be acknowledged. This is useful for batch processing + # of deliveries + requeue_on_fail: false + + test-2-amqp: + driver: amqp + priority: 2 + prefetch: 1000000 + queue: test-2-queue + exchange: default + exchange_type: direct + routing_key: test-2 + + test-2: + driver: beanstalk + priority: 11 + tube: default + + test-3: + driver: sqs + prefetch: 1000000 + queue: default + attributes: + MessageRetentionPeriod: 86400 + tags: + test: "tag" + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-local", "test-local-2", "test-local-3", "test-1", "test-2-amqp", "test-3" ] + diff --git a/tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml b/tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml new file mode 100644 index 00000000..861f7ec4 --- /dev/null +++ b/tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml @@ -0,0 +1,55 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../client.php echo pipes" + relay: "pipes" + relay_timeout: "20s" + +amqp: + addr: amqp://guest:[email protected]:23679/ + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + timeout: 1 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: amqp + prefetch: 100 + queue: test-1-queue + priority: 1 + exchange: default + exchange_type: direct + routing_key: test-1 + exclusive: false + multiple_ack: false + requeue_on_fail: false + + test-2: + driver: amqp + prefetch: 100 + queue: test-2-queue + priority: 2 + exchange: default + exchange_type: direct + routing_key: test-2 + exclusive: false + multiple_ack: false + requeue_on_fail: false + + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml b/tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml new file mode 100644 index 00000000..57d8ad2d --- /dev/null +++ b/tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml @@ -0,0 +1,44 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../client.php echo pipes" + relay: "pipes" + relay_timeout: "20s" + +beanstalk: + addr: tcp://127.0.0.1:11400 + timeout: 10s + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: beanstalk + priority: 11 + tube_priority: 1 + tube: default-1 + reserve_timeout: 10s + + test-2: + driver: beanstalk + priority: 11 + tube_priority: 3 + tube: default-2 + reserve_timeout: 10s + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml b/tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml new file mode 100644 index 00000000..b6ba83a4 --- /dev/null +++ b/tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml @@ -0,0 +1,60 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../client.php echo pipes" + relay: "pipes" + relay_timeout: "20s" + +sqs: + key: api-key + secret: api-secret + region: us-west-1 + endpoint: http://127.0.0.1:19324 + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + timeout: 20 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: sqs + prefetch: 10 + visibility_timeout: 0 + wait_time_seconds: 1 + queue: default + # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html + attributes: + DelaySeconds: 0 + MaximumMessageSize: 262144 + MessageRetentionPeriod: 345600 + ReceiveMessageWaitTimeSeconds: 0 + VisibilityTimeout: 0 + tags: + test: "tag" + + test-2: + driver: sqs + prefetch: 10 + queue: default-2 + wait_time_seconds: 1 + attributes: + MessageRetentionPeriod: 86400 + tags: + test: "tag" + + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/ephemeral/.rr-ephemeral-declare.yaml b/tests/plugins/jobs/ephemeral/.rr-ephemeral-declare.yaml new file mode 100644 index 00000000..726c24ac --- /dev/null +++ b/tests/plugins/jobs/ephemeral/.rr-ephemeral-declare.yaml @@ -0,0 +1,21 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/ephemeral/.rr-ephemeral-init.yaml b/tests/plugins/jobs/ephemeral/.rr-ephemeral-init.yaml new file mode 100644 index 00000000..8914dfaa --- /dev/null +++ b/tests/plugins/jobs/ephemeral/.rr-ephemeral-init.yaml @@ -0,0 +1,37 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: ephemeral + priority: 10 + prefetch: 10000 + + test-2: + driver: ephemeral + priority: 10 + prefetch: 10000 + + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/ephemeral/.rr-ephemeral-jobs-err.yaml b/tests/plugins/jobs/ephemeral/.rr-ephemeral-jobs-err.yaml new file mode 100644 index 00000000..05dc3ffa --- /dev/null +++ b/tests/plugins/jobs/ephemeral/.rr-ephemeral-jobs-err.yaml @@ -0,0 +1,21 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_err.php" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/ephemeral/.rr-ephemeral-pause-resume.yaml b/tests/plugins/jobs/ephemeral/.rr-ephemeral-pause-resume.yaml new file mode 100644 index 00000000..e1b76263 --- /dev/null +++ b/tests/plugins/jobs/ephemeral/.rr-ephemeral-pause-resume.yaml @@ -0,0 +1,44 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: debug + mode: development + +jobs: + # num logical cores by default + num_pollers: 10 + # 1mi by default + pipeline_size: 100000 + # worker pool configuration + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + # list of broker pipelines associated with endpoints + pipelines: + test-local: + driver: ephemeral + priority: 10 + pipeline_size: 10000 + + test-local-2: + driver: ephemeral + priority: 1 + pipeline_size: 10000 + + test-local-3: + driver: ephemeral + priority: 2 + pipeline_size: 10000 + + # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually + consume: [ "test-local", "test-local-2" ] + diff --git a/tests/plugins/jobs/helpers.go b/tests/plugins/jobs/helpers.go new file mode 100644 index 00000000..4c2f2fea --- /dev/null +++ b/tests/plugins/jobs/helpers.go @@ -0,0 +1,185 @@ +package jobs + +import ( + "bytes" + "net" + "net/http" + "net/rpc" + "testing" + + goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc" + jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + push string = "jobs.Push" + pause string = "jobs.Pause" + destroy string = "jobs.Destroy" + resume string = "jobs.Resume" +) + +func resumePipes(pipes ...string) func(t *testing.T) { + return func(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, len(pipes))} + + for i := 0; i < len(pipes); i++ { + pipe.GetPipelines()[i] = pipes[i] + } + + er := &jobsv1beta.Empty{} + err = client.Call(resume, pipe, er) + assert.NoError(t, err) + } +} + +func pushToDisabledPipe(pipeline string) func(t *testing.T) { + return func(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{ + Job: "some/php/namespace", + Id: "1", + Payload: `{"hello":"world"}`, + Headers: nil, + Options: &jobsv1beta.Options{ + Priority: 1, + Pipeline: pipeline, + }, + }} + + er := &jobsv1beta.Empty{} + err = client.Call(push, req, er) + assert.Error(t, err) + } +} + +func pushToPipe(pipeline string) func(t *testing.T) { + return func(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{ + Job: "some/php/namespace", + Id: "1", + Payload: `{"hello":"world"}`, + Headers: map[string]*jobsv1beta.HeaderValue{"test": {Value: []string{"test2"}}}, + Options: &jobsv1beta.Options{ + Priority: 1, + Pipeline: pipeline, + Delay: 0, + }, + }} + + er := &jobsv1beta.Empty{} + err = client.Call(push, req, er) + assert.NoError(t, err) + } +} + +func pushToPipeErr(pipeline string) func(t *testing.T) { + return func(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{ + Job: "some/php/namespace", + Id: "1", + Payload: `{"hello":"world"}`, + Headers: map[string]*jobsv1beta.HeaderValue{"test": {Value: []string{"test2"}}}, + Options: &jobsv1beta.Options{ + Priority: 1, + Pipeline: pipeline, + Delay: 0, + }, + }} + + er := &jobsv1beta.Empty{} + err = client.Call(push, req, er) + require.Error(t, err) + } +} +func pausePipelines(pipes ...string) func(t *testing.T) { + return func(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, len(pipes))} + + for i := 0; i < len(pipes); i++ { + pipe.GetPipelines()[i] = pipes[i] + } + + er := &jobsv1beta.Empty{} + err = client.Call(pause, pipe, er) + assert.NoError(t, err) + } +} + +func destroyPipelines(pipes ...string) func(t *testing.T) { + return func(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, len(pipes))} + + for i := 0; i < len(pipes); i++ { + pipe.GetPipelines()[i] = pipes[i] + } + + er := &jobsv1beta.Empty{} + err = client.Call(destroy, pipe, er) + assert.NoError(t, err) + } +} + +func enableProxy(name string, t *testing.T) { + buf := new(bytes.Buffer) + buf.WriteString(`{"enabled":true}`) + + resp, err := http.Post("http://127.0.0.1:8474/proxies/"+name, "application/json", buf) //nolint:noctx + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + if resp.Body != nil { + _ = resp.Body.Close() + } +} + +func disableProxy(name string, t *testing.T) { + buf := new(bytes.Buffer) + buf.WriteString(`{"enabled":false}`) + + resp, err := http.Post("http://127.0.0.1:8474/proxies/"+name, "application/json", buf) //nolint:noctx + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + if resp.Body != nil { + _ = resp.Body.Close() + } +} + +func deleteProxy(name string, t *testing.T) { + client := &http.Client{} + + req, err := http.NewRequest(http.MethodDelete, "http://127.0.0.1:8474/proxies/"+name, nil) //nolint:noctx + require.NoError(t, err) + + resp, err := client.Do(req) + require.NoError(t, err) + + require.NoError(t, err) + require.Equal(t, 204, resp.StatusCode) + if resp.Body != nil { + _ = resp.Body.Close() + } +} diff --git a/tests/plugins/jobs/jobs_amqp_test.go b/tests/plugins/jobs/jobs_amqp_test.go new file mode 100644 index 00000000..bb5281c0 --- /dev/null +++ b/tests/plugins/jobs/jobs_amqp_test.go @@ -0,0 +1,369 @@ +package jobs + +import ( + "net" + "net/rpc" + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/mock/gomock" + endure "github.com/spiral/endure/pkg/container" + goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/informer" + "github.com/spiral/roadrunner/v2/plugins/jobs" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/amqp" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/plugins/resetter" + rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc" + "github.com/spiral/roadrunner/v2/plugins/server" + jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta" + "github.com/spiral/roadrunner/v2/tests/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAMQPInit(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "amqp/.rr-amqp-init.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(2) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &amqp.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + stopCh <- struct{}{} + wg.Wait() +} + +func TestAMQPDeclare(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "amqp/.rr-amqp-declare.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &amqp.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareAMQPPipeline", declareAMQPPipe) + t.Run("ConsumeAMQPPipeline", resumePipes("test-3")) + t.Run("PushAMQPPipeline", pushToPipe("test-3")) + time.Sleep(time.Second) + t.Run("PauseAMQPPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroyAMQPPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() +} + +func TestAMQPJobsError(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "amqp/.rr-amqp-jobs-err.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &amqp.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareAMQPPipeline", declareAMQPPipe) + t.Run("ConsumeAMQPPipeline", resumePipes("test-3")) + t.Run("PushAMQPPipeline", pushToPipe("test-3")) + time.Sleep(time.Second * 25) + t.Run("PauseAMQPPipeline", pausePipelines("test-3")) + t.Run("DestroyAMQPPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() +} + +func declareAMQPPipe(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{ + "driver": "amqp", + "name": "test-3", + "routing-key": "test-3", + "queue": "default", + "exchange-type": "direct", + "exchange": "amqp.default", + "prefetch": "100", + "priority": "3", + "exclusive": "true", + "multiple_ask": "true", + "requeue_on_fail": "true", + }} + + er := &jobsv1beta.Empty{} + err = client.Call("jobs.Declare", pipe, er) + assert.NoError(t, err) +} + +func TestAMQPNoGlobalSection(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "amqp/.rr-no-global.yaml", + Prefix: "rr", + } + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + &logger.ZapLogger{}, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &amqp.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + _, err = cont.Serve() + require.Error(t, err) +} diff --git a/tests/plugins/jobs/jobs_beanstalk_test.go b/tests/plugins/jobs/jobs_beanstalk_test.go new file mode 100644 index 00000000..916ac08f --- /dev/null +++ b/tests/plugins/jobs/jobs_beanstalk_test.go @@ -0,0 +1,372 @@ +package jobs + +import ( + "net" + "net/rpc" + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/mock/gomock" + endure "github.com/spiral/endure/pkg/container" + goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/informer" + "github.com/spiral/roadrunner/v2/plugins/jobs" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/beanstalk" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/plugins/resetter" + rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc" + "github.com/spiral/roadrunner/v2/plugins/server" + jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta" + "github.com/spiral/roadrunner/v2/tests/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBeanstalkInit(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "beanstalk/.rr-beanstalk-init.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "beanstalk", "start", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes() + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes() + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &beanstalk.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + stopCh <- struct{}{} + wg.Wait() +} + +func TestBeanstalkDeclare(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "beanstalk/.rr-beanstalk-declare.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("driver initialized", "driver", "beanstalk", "start", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "beanstalk", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes() + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes() + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &beanstalk.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareBeanstalkPipeline", declareBeanstalkPipe) + t.Run("ConsumeBeanstalkPipeline", resumePipes("test-3")) + t.Run("PushBeanstalkPipeline", pushToPipe("test-3")) + time.Sleep(time.Second) + t.Run("PauseBeanstalkPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroyBeanstalkPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() +} + +func TestBeanstalkJobsError(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "beanstalk/.rr-beanstalk-jobs-err.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("driver initialized", "driver", "beanstalk", "start", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "beanstalk", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes() + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes() + + mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &beanstalk.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareBeanstalkPipeline", declareBeanstalkPipe) + t.Run("ConsumeBeanstalkPipeline", resumePipes("test-3")) + t.Run("PushBeanstalkPipeline", pushToPipe("test-3")) + time.Sleep(time.Second * 25) + t.Run("PauseBeanstalkPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroyBeanstalkPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() +} + +func TestBeanstalkNoGlobalSection(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "beanstalk/.rr-no-global.yaml", + Prefix: "rr", + } + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + &logger.ZapLogger{}, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &beanstalk.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + _, err = cont.Serve() + require.Error(t, err) +} + +func declareBeanstalkPipe(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{ + "driver": "beanstalk", + "name": "test-3", + "tube": "default", + "reserve_timeout": "1", + "priority": "3", + "tube_priority": "10", + }} + + er := &jobsv1beta.Empty{} + err = client.Call("jobs.Declare", pipe, er) + assert.NoError(t, err) +} diff --git a/tests/plugins/jobs/jobs_ephemeral_test.go b/tests/plugins/jobs/jobs_ephemeral_test.go new file mode 100644 index 00000000..0a882556 --- /dev/null +++ b/tests/plugins/jobs/jobs_ephemeral_test.go @@ -0,0 +1,424 @@ +package jobs + +import ( + "net" + "net/rpc" + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/mock/gomock" + endure "github.com/spiral/endure/pkg/container" + goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/informer" + "github.com/spiral/roadrunner/v2/plugins/jobs" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/ephemeral" + "github.com/spiral/roadrunner/v2/plugins/resetter" + rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc" + "github.com/spiral/roadrunner/v2/plugins/server" + jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta" + "github.com/spiral/roadrunner/v2/tests/mocks" + "github.com/stretchr/testify/assert" +) + +func TestEphemeralInit(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "ephemeral/.rr-ephemeral-init.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &ephemeral.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + stopCh <- struct{}{} + wg.Wait() +} + +func TestEphemeralDeclare(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "ephemeral/.rr-ephemeral-declare.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &ephemeral.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareEphemeralPipeline", declareEphemeralPipe) + t.Run("ConsumeEphemeralPipeline", consumeEphemeralPipe) + t.Run("PushEphemeralPipeline", pushToPipe("test-3")) + time.Sleep(time.Second) + t.Run("PauseEphemeralPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroyEphemeralPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() + + time.Sleep(time.Second * 5) +} + +func TestEphemeralPauseResume(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "ephemeral/.rr-ephemeral-pause-resume.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &ephemeral.Plugin{}, + ) + + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("ephemeralPause", pausePipelines("test-local")) + t.Run("pushToDisabledPipe", pushToDisabledPipe("test-local")) + t.Run("ephemeralResume", resumePipes("test-local")) + t.Run("pushToEnabledPipe", pushToPipe("test-local")) + + time.Sleep(time.Second * 1) + + stopCh <- struct{}{} + wg.Wait() +} + +func TestEphemeralJobsError(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "ephemeral/.rr-ephemeral-jobs-err.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &ephemeral.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareEphemeralPipeline", declareEphemeralPipe) + t.Run("ConsumeEphemeralPipeline", resumePipes("test-3")) + t.Run("PushEphemeralPipeline", pushToPipe("test-3")) + time.Sleep(time.Second * 25) + t.Run("PauseEphemeralPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroyEphemeralPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() +} + +func declareEphemeralPipe(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{ + "driver": "ephemeral", + "name": "test-3", + "prefetch": "10000", + }} + + er := &jobsv1beta.Empty{} + err = client.Call("jobs.Declare", pipe, er) + assert.NoError(t, err) +} + +func consumeEphemeralPipe(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, 1)} + pipe.GetPipelines()[0] = "test-3" + + er := &jobsv1beta.Empty{} + err = client.Call("jobs.Resume", pipe, er) + assert.NoError(t, err) +} diff --git a/tests/plugins/jobs/jobs_general_test.go b/tests/plugins/jobs/jobs_general_test.go new file mode 100644 index 00000000..829fd102 --- /dev/null +++ b/tests/plugins/jobs/jobs_general_test.go @@ -0,0 +1,125 @@ +package jobs + +import ( + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/mock/gomock" + endure "github.com/spiral/endure/pkg/container" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/informer" + "github.com/spiral/roadrunner/v2/plugins/jobs" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/amqp" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/ephemeral" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/plugins/resetter" + rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc" + "github.com/spiral/roadrunner/v2/plugins/server" + "github.com/spiral/roadrunner/v2/tests/mocks" + "github.com/stretchr/testify/assert" +) + +func TestJobsInit(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "configs/.rr-jobs-init.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("driver ready", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("driver ready", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-2-amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-local-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(2) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2-amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + &logger.ZapLogger{}, + // mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &ephemeral.Plugin{}, + &amqp.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + stopCh <- struct{}{} + wg.Wait() +} diff --git a/tests/plugins/jobs/jobs_sqs_test.go b/tests/plugins/jobs/jobs_sqs_test.go new file mode 100644 index 00000000..d4cb4e52 --- /dev/null +++ b/tests/plugins/jobs/jobs_sqs_test.go @@ -0,0 +1,365 @@ +package jobs + +import ( + "net" + "net/rpc" + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/golang/mock/gomock" + endure "github.com/spiral/endure/pkg/container" + goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/informer" + "github.com/spiral/roadrunner/v2/plugins/jobs" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/sqs" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/plugins/resetter" + rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc" + "github.com/spiral/roadrunner/v2/plugins/server" + jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta" + "github.com/spiral/roadrunner/v2/tests/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSQSInit(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "sqs/.rr-sqs-init.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline started", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "sqs", "start", gomock.Any()).Times(2) + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Warn("sqs listener stopped").Times(2) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &sqs.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + stopCh <- struct{}{} + wg.Wait() +} + +func TestSQSDeclare(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "sqs/.rr-sqs-declare.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "sqs", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("sqs listener stopped").Times(1) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &sqs.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareSQSPipeline", declareSQSPipe) + t.Run("ConsumeSQSPipeline", resumePipes("test-3")) + t.Run("PushSQSPipeline", pushToPipe("test-3")) + time.Sleep(time.Second) + t.Run("PauseSQSPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroySQSPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() +} + +func TestSQSJobsError(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "sqs/.rr-sqs-jobs-err.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3) + + mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "sqs", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("sqs listener stopped").Times(1) + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + mockLogger, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &sqs.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + + t.Run("DeclareSQSPipeline", declareSQSPipe) + t.Run("ConsumeSQSPipeline", resumePipes("test-3")) + t.Run("PushSQSPipeline", pushToPipe("test-3")) + time.Sleep(time.Second * 25) + t.Run("PauseSQSPipeline", pausePipelines("test-3")) + time.Sleep(time.Second) + t.Run("DestroySQSPipeline", destroyPipelines("test-3")) + + time.Sleep(time.Second * 5) + stopCh <- struct{}{} + wg.Wait() + + time.Sleep(time.Second * 5) +} + +func TestSQSNoGlobalSection(t *testing.T) { + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + assert.NoError(t, err) + + cfg := &config.Viper{ + Path: "sqs/.rr-no-global.yaml", + Prefix: "rr", + } + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + &logger.ZapLogger{}, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &sqs.Plugin{}, + ) + assert.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + _, err = cont.Serve() + require.Error(t, err) +} + +func declareSQSPipe(t *testing.T) { + conn, err := net.Dial("tcp", "127.0.0.1:6001") + assert.NoError(t, err) + client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)) + + pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{ + "driver": "sqs", + "name": "test-3", + "queue": "default", + "prefetch": "10", + "priority": "3", + "visibility_timeout": "0", + "wait_time_seconds": "3", + }} + + er := &jobsv1beta.Empty{} + err = client.Call("jobs.Declare", pipe, er) + assert.NoError(t, err) +} diff --git a/tests/plugins/jobs/jobs_with_toxics_test.go b/tests/plugins/jobs/jobs_with_toxics_test.go new file mode 100644 index 00000000..71986db3 --- /dev/null +++ b/tests/plugins/jobs/jobs_with_toxics_test.go @@ -0,0 +1,396 @@ +package jobs + +import ( + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + toxiproxy "github.com/Shopify/toxiproxy/client" + "github.com/golang/mock/gomock" + endure "github.com/spiral/endure/pkg/container" + "github.com/spiral/roadrunner/v2/plugins/config" + "github.com/spiral/roadrunner/v2/plugins/informer" + "github.com/spiral/roadrunner/v2/plugins/jobs" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/amqp" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/beanstalk" + "github.com/spiral/roadrunner/v2/plugins/jobs/drivers/sqs" + "github.com/spiral/roadrunner/v2/plugins/logger" + "github.com/spiral/roadrunner/v2/plugins/resetter" + rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc" + "github.com/spiral/roadrunner/v2/plugins/server" + "github.com/spiral/roadrunner/v2/tests/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDurabilityAMQP(t *testing.T) { + client := toxiproxy.NewClient("127.0.0.1:8474") + + _, err := client.CreateProxy("redial", "127.0.0.1:23679", "127.0.0.1:5672") + require.NoError(t, err) + defer deleteProxy("redial", t) + + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + require.NoError(t, err) + + cfg := &config.Viper{ + Path: "durability/.rr-amqp-durability-redial.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(4) + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2) + + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(4) + + // redial errors + mockLogger.EXPECT().Warn("rabbitmq reconnecting, caused by", "error", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-1", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-2", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("rabbitmq dial succeed. trying to redeclare queues and subscribers").AnyTimes() + mockLogger.EXPECT().Info("queues and subscribers redeclared successfully").AnyTimes() + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + // mockLogger, + &logger.ZapLogger{}, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &amqp.Plugin{}, + ) + require.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + disableProxy("redial", t) + time.Sleep(time.Second * 3) + + go func() { + time.Sleep(time.Second * 5) + enableProxy("redial", t) + }() + + t.Run("PushPipelineWhileRedialing-1", pushToPipeErr("test-1")) + t.Run("PushPipelineWhileRedialing-2", pushToPipeErr("test-2")) + + time.Sleep(time.Second * 15) + t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1")) + t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2")) + + time.Sleep(time.Second * 5) + + stopCh <- struct{}{} + wg.Wait() +} + +func TestDurabilitySQS(t *testing.T) { + client := toxiproxy.NewClient("127.0.0.1:8474") + + _, err := client.CreateProxy("redial", "127.0.0.1:19324", "127.0.0.1:9324") + require.NoError(t, err) + defer deleteProxy("redial", t) + + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + require.NoError(t, err) + + cfg := &config.Viper{ + Path: "durability/.rr-sqs-durability-redial.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(4) + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2) + + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(4) + + // redial errors + mockLogger.EXPECT().Warn("rabbitmq reconnecting, caused by", "error", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-1", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-2", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("rabbitmq dial succeed. trying to redeclare queues and subscribers").AnyTimes() + mockLogger.EXPECT().Info("queues and subscribers redeclared successfully").AnyTimes() + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + // mockLogger, + &logger.ZapLogger{}, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &sqs.Plugin{}, + ) + require.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + disableProxy("redial", t) + time.Sleep(time.Second * 3) + + go func() { + time.Sleep(time.Second * 2) + t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1")) + t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2")) + }() + + time.Sleep(time.Second * 5) + enableProxy("redial", t) + + t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1")) + t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2")) + + time.Sleep(time.Second * 10) + + stopCh <- struct{}{} + wg.Wait() +} + +func TestDurabilityBeanstalk(t *testing.T) { + client := toxiproxy.NewClient("127.0.0.1:8474") + + _, err := client.CreateProxy("redial", "127.0.0.1:11400", "127.0.0.1:11300") + require.NoError(t, err) + defer deleteProxy("redial", t) + + cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel)) + require.NoError(t, err) + + cfg := &config.Viper{ + Path: "durability/.rr-beanstalk-durability-redial.yaml", + Prefix: "rr", + } + + controller := gomock.NewController(t) + mockLogger := mocks.NewMockLogger(controller) + + // general + mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("driver initialized", "driver", "amqp", "start", gomock.Any()).Times(4) + + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2) + mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2) + + mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1) + + mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(4) + + // redial errors + mockLogger.EXPECT().Warn("rabbitmq reconnecting, caused by", "error", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-1", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes() + mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-2", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes() + + mockLogger.EXPECT().Info("rabbitmq dial succeed. trying to redeclare queues and subscribers").AnyTimes() + mockLogger.EXPECT().Info("queues and subscribers redeclared successfully").AnyTimes() + + err = cont.RegisterAll( + cfg, + &server.Plugin{}, + &rpcPlugin.Plugin{}, + // mockLogger, + &logger.ZapLogger{}, + &jobs.Plugin{}, + &resetter.Plugin{}, + &informer.Plugin{}, + &beanstalk.Plugin{}, + ) + require.NoError(t, err) + + err = cont.Init() + if err != nil { + t.Fatal(err) + } + + ch, err := cont.Serve() + if err != nil { + t.Fatal(err) + } + + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + wg := &sync.WaitGroup{} + wg.Add(1) + + stopCh := make(chan struct{}, 1) + + go func() { + defer wg.Done() + for { + select { + case e := <-ch: + assert.Fail(t, "error", e.Error.Error()) + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + case <-sig: + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + case <-stopCh: + // timeout + err = cont.Stop() + if err != nil { + assert.FailNow(t, "error", err.Error()) + } + return + } + } + }() + + time.Sleep(time.Second * 3) + disableProxy("redial", t) + time.Sleep(time.Second * 3) + + go func() { + time.Sleep(time.Second * 2) + t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1")) + t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2")) + }() + + time.Sleep(time.Second * 5) + enableProxy("redial", t) + + t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1")) + t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2")) + + time.Sleep(time.Second * 10) + + stopCh <- struct{}{} + wg.Wait() +} diff --git a/tests/plugins/jobs/sqs/.rr-no-global.yaml b/tests/plugins/jobs/sqs/.rr-no-global.yaml new file mode 100644 index 00000000..2c97a37e --- /dev/null +++ b/tests/plugins/jobs/sqs/.rr-no-global.yaml @@ -0,0 +1,39 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +logs: + level: error + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: sqs + prefetch: 1000 + visibility_timeout: 0 + wait_time_seconds: 0 + queue: default + attributes: + DelaySeconds: 0 + MaximumMessageSize: 262144 + MessageRetentionPeriod: 345600 + ReceiveMessageWaitTimeSeconds: 0 + VisibilityTimeout: 30 + tags: + test: "tag" + + consume: [ "test-1" ] + diff --git a/tests/plugins/jobs/sqs/.rr-sqs-declare.yaml b/tests/plugins/jobs/sqs/.rr-sqs-declare.yaml new file mode 100644 index 00000000..21209cbb --- /dev/null +++ b/tests/plugins/jobs/sqs/.rr-sqs-declare.yaml @@ -0,0 +1,29 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_ok.php" + relay: "pipes" + relay_timeout: "20s" + +# amazon sqs configuration +# General section +sqs: + key: api-key + secret: api-secret + region: us-west-1 + endpoint: http://127.0.0.1:9324 + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 1 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/jobs/sqs/.rr-sqs-init.yaml b/tests/plugins/jobs/sqs/.rr-sqs-init.yaml new file mode 100644 index 00000000..ffdec1fd --- /dev/null +++ b/tests/plugins/jobs/sqs/.rr-sqs-init.yaml @@ -0,0 +1,54 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../client.php echo pipes" + relay: "pipes" + relay_timeout: "20s" + +sqs: + key: api-key + secret: api-secret + region: us-west-1 + endpoint: http://127.0.0.1:9324 + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s + + pipelines: + test-1: + driver: sqs + prefetch: 1000 + visibility_timeout: 0 + wait_time_seconds: 0 + queue: default + attributes: + DelaySeconds: 0 + MaximumMessageSize: 262144 + MessageRetentionPeriod: 345600 + ReceiveMessageWaitTimeSeconds: 0 + VisibilityTimeout: 30 + tags: + test: "tag" + + test-2: + driver: sqs + prefetch: 1000 + queue: default-2 + attributes: + MessageRetentionPeriod: 86400 + tags: + test: "tag" + consume: [ "test-1", "test-2" ] + diff --git a/tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml b/tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml new file mode 100644 index 00000000..b518d433 --- /dev/null +++ b/tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml @@ -0,0 +1,28 @@ +rpc: + listen: tcp://127.0.0.1:6001 + +server: + command: "php ../../jobs_err.php" + relay: "pipes" + relay_timeout: "20s" + +sqs: + key: api-key + secret: api-secret + region: us-west-1 + endpoint: http://127.0.0.1:9324 + +logs: + level: debug + encoding: console + mode: development + +jobs: + num_pollers: 10 + timeout: 60 + pipeline_size: 100000 + pool: + num_workers: 10 + max_jobs: 0 + allocate_timeout: 60s + destroy_timeout: 60s diff --git a/tests/plugins/kv/configs/.rr-kv-init.yaml b/tests/plugins/kv/configs/.rr-kv-init.yaml index 34e22a4e..a13b591c 100644 --- a/tests/plugins/kv/configs/.rr-kv-init.yaml +++ b/tests/plugins/kv/configs/.rr-kv-init.yaml @@ -24,7 +24,7 @@ kv: memcached: driver: memcached - addr: [ "localhost:11211" ] + addr: [ "127.0.0.1:11211" ] # redis: # driver: redis diff --git a/tests/plugins/kv/configs/.rr-memcached.yaml b/tests/plugins/kv/configs/.rr-memcached.yaml index 68443bc4..da5d59c6 100644 --- a/tests/plugins/kv/configs/.rr-memcached.yaml +++ b/tests/plugins/kv/configs/.rr-memcached.yaml @@ -9,4 +9,4 @@ kv: memcached-rr: driver: memcached addr: - - "localhost:11211" + - "127.0.0.1:11211" diff --git a/tests/plugins/kv/configs/.rr-redis-global.yaml b/tests/plugins/kv/configs/.rr-redis-global.yaml index d2e8aefe..a4979879 100644 --- a/tests/plugins/kv/configs/.rr-redis-global.yaml +++ b/tests/plugins/kv/configs/.rr-redis-global.yaml @@ -7,7 +7,7 @@ logs: redis-rr: addrs: - - 'localhost:6379' + - '127.0.0.1:6379' kv: redis-rr: diff --git a/tests/plugins/kv/configs/.rr-redis.yaml b/tests/plugins/kv/configs/.rr-redis.yaml index 0a7396ca..522e365a 100644 --- a/tests/plugins/kv/configs/.rr-redis.yaml +++ b/tests/plugins/kv/configs/.rr-redis.yaml @@ -9,4 +9,4 @@ kv: redis-rr: driver: redis addrs: - - 'localhost:6379' + - '127.0.0.1:6379' diff --git a/tests/plugins/logger/logger_test.go b/tests/plugins/logger/logger_test.go index 9e3fa4da..ec4a748d 100644 --- a/tests/plugins/logger/logger_test.go +++ b/tests/plugins/logger/logger_test.go @@ -347,7 +347,7 @@ func TestFileLogger(t *testing.T) { } func httpEcho(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, "http://localhost:54224?hello=world", nil) + req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:54224?hello=world", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) diff --git a/tests/plugins/metrics/configs/.rr-http-metrics.yaml b/tests/plugins/metrics/configs/.rr-http-metrics.yaml index 95f131c0..3e92a88c 100644 --- a/tests/plugins/metrics/configs/.rr-http-metrics.yaml +++ b/tests/plugins/metrics/configs/.rr-http-metrics.yaml @@ -13,7 +13,7 @@ http: num_workers: 1 metrics: - address: localhost:2112 + address: 127.0.0.1:2112 logs: mode: development diff --git a/tests/plugins/metrics/configs/.rr-test.yaml b/tests/plugins/metrics/configs/.rr-test.yaml index 4890076f..b5c4e64f 100644 --- a/tests/plugins/metrics/configs/.rr-test.yaml +++ b/tests/plugins/metrics/configs/.rr-test.yaml @@ -3,7 +3,7 @@ rpc: metrics: # prometheus client address (path /metrics added automatically) - address: localhost:2112 + address: 127.0.0.1:2112 collect: app_metric: type: histogram @@ -15,4 +15,4 @@ metrics: help: "Custom application counter." logs: mode: development - level: error
\ No newline at end of file + level: error diff --git a/tests/plugins/metrics/metrics_test.go b/tests/plugins/metrics/metrics_test.go index 3d900fcc..c4ea9f2c 100644 --- a/tests/plugins/metrics/metrics_test.go +++ b/tests/plugins/metrics/metrics_test.go @@ -27,7 +27,7 @@ import ( const dialAddr = "127.0.0.1:6001" const dialNetwork = "tcp" -const getAddr = "http://localhost:2112/metrics" +const getAddr = "http://127.0.0.1:2112/metrics" // get request and return body func get() (string, error) { @@ -130,7 +130,7 @@ func TestMetricsIssue571(t *testing.T) { mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "services", []string{"metrics"}).MinTimes(1) - mockLogger.EXPECT().Debug("200 GET http://localhost:56444/", "remote", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("200 GET http://127.0.0.1:56444/", "remote", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Info("declaring new metric", "name", "test", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Info("metric successfully added", "name", "test", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1) mockLogger.EXPECT().Info("metric successfully added", "name", "test", "labels", []string{}, "value", gomock.Any()).MinTimes(1) @@ -209,7 +209,7 @@ func TestMetricsIssue571(t *testing.T) { // get request and return body func issue571Http() (string, error) { - r, err := http.Get("http://localhost:56444") + r, err := http.Get("http://127.0.0.1:56444") if err != nil { return "", err } @@ -229,7 +229,7 @@ func issue571Http() (string, error) { // get request and return body func issue571Metrics() (string, error) { - r, err := http.Get("http://localhost:23557") + r, err := http.Get("http://127.0.0.1:23557") if err != nil { return "", err } @@ -989,7 +989,7 @@ func TestHTTPMetrics(t *testing.T) { mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes() mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes() - mockLogger.EXPECT().Debug("200 GET http://localhost:13223/", "remote", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1) + mockLogger.EXPECT().Debug("200 GET http://127.0.0.1:13223/", "remote", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1) err = cont.RegisterAll( cfg, @@ -1056,7 +1056,7 @@ func TestHTTPMetrics(t *testing.T) { } func echoHTTP(t *testing.T) { - req, err := http.NewRequest("GET", "http://localhost:13223", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:13223", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) diff --git a/tests/plugins/redis/redis_plugin_test.go b/tests/plugins/redis/redis_plugin_test.go index 96a191a1..1b84e339 100644 --- a/tests/plugins/redis/redis_plugin_test.go +++ b/tests/plugins/redis/redis_plugin_test.go @@ -21,7 +21,7 @@ func redisConfig(port string) string { cfg := ` redis: addrs: - - 'localhost:%s' + - '127.0.0.1:%s' master_name: '' username: '' password: '' diff --git a/tests/plugins/resetter/test_plugin.go b/tests/plugins/resetter/test_plugin.go index 61942516..5c26cbd0 100644 --- a/tests/plugins/resetter/test_plugin.go +++ b/tests/plugins/resetter/test_plugin.go @@ -9,7 +9,7 @@ import ( "github.com/spiral/roadrunner/v2/plugins/server" ) -var testPoolConfig = poolImpl.Config{ +var testPoolConfig = &poolImpl.Config{ NumWorkers: 10, MaxJobs: 100, AllocateTimeout: time.Second * 10, diff --git a/tests/plugins/server/configs/.rr-tcp.yaml b/tests/plugins/server/configs/.rr-tcp.yaml index 4582482f..6b9c9ddb 100644 --- a/tests/plugins/server/configs/.rr-tcp.yaml +++ b/tests/plugins/server/configs/.rr-tcp.yaml @@ -5,7 +5,7 @@ server: env: - RR_CONFIG: "/some/place/on/the/C134" - RR_CONFIG2: "C138" - relay: "tcp://localhost:9999" + relay: "tcp://127.0.0.1:9999" relay_timeout: "20s" logs: mode: development diff --git a/tests/plugins/server/plugin_pipes.go b/tests/plugins/server/plugin_pipes.go index f1c13734..d136da1e 100644 --- a/tests/plugins/server/plugin_pipes.go +++ b/tests/plugins/server/plugin_pipes.go @@ -15,7 +15,7 @@ import ( const ConfigSection = "server" const Response = "test" -var testPoolConfig = pool.Config{ +var testPoolConfig = &pool.Config{ NumWorkers: 10, MaxJobs: 100, AllocateTimeout: time.Second * 10, @@ -45,7 +45,7 @@ func (f *Foo) Serve() chan error { const op = errors.Op("serve") // test payload for echo - r := payload.Payload{ + r := &payload.Payload{ Context: nil, Body: []byte(Response), } diff --git a/tests/plugins/server/plugin_sockets.go b/tests/plugins/server/plugin_sockets.go index 0b2857e3..143a604c 100644 --- a/tests/plugins/server/plugin_sockets.go +++ b/tests/plugins/server/plugin_sockets.go @@ -30,7 +30,7 @@ func (f *Foo2) Serve() chan error { conf := &server.Config{} // test payload for echo - r := payload.Payload{ + r := &payload.Payload{ Context: nil, Body: []byte(Response), } diff --git a/tests/plugins/server/plugin_tcp.go b/tests/plugins/server/plugin_tcp.go index ef4cea39..57a2e6ea 100644 --- a/tests/plugins/server/plugin_tcp.go +++ b/tests/plugins/server/plugin_tcp.go @@ -30,7 +30,7 @@ func (f *Foo3) Serve() chan error { conf := &server.Config{} // test payload for echo - r := payload.Payload{ + r := &payload.Payload{ Context: nil, Body: []byte(Response), } diff --git a/tests/plugins/server/tcp.php b/tests/plugins/server/tcp.php index 873f25b2..acc1e1a5 100644 --- a/tests/plugins/server/tcp.php +++ b/tests/plugins/server/tcp.php @@ -8,7 +8,7 @@ use Spiral\RoadRunner; require dirname(__DIR__) . "/../vendor/autoload.php"; -$relay = new Goridge\SocketRelay("localhost", 9999); +$relay = new Goridge\SocketRelay("127.0.0.1", 9999); $rr = new RoadRunner\Worker($relay); while ($in = $rr->waitPayload()) { diff --git a/tests/plugins/service/placeholder.go b/tests/plugins/service/placeholder.go deleted file mode 100644 index 6d43c336..00000000 --- a/tests/plugins/service/placeholder.go +++ /dev/null @@ -1 +0,0 @@ -package service diff --git a/tests/plugins/status/plugin_test.go b/tests/plugins/status/plugin_test.go index 663f4ee3..227cfd46 100644 --- a/tests/plugins/status/plugin_test.go +++ b/tests/plugins/status/plugin_test.go @@ -345,7 +345,7 @@ func TestReadinessRPCWorkerNotReady(t *testing.T) { func doHTTPReq(t *testing.T) { go func() { - req, err := http.NewRequest("GET", "http://localhost:11933", nil) + req, err := http.NewRequest("GET", "http://127.0.0.1:11933", nil) assert.NoError(t, err) r, err := http.DefaultClient.Do(req) diff --git a/tests/plugins/websockets/configs/.rr-websockets-allow.yaml b/tests/plugins/websockets/configs/.rr-websockets-allow.yaml index e6c43857..900094a4 100644 --- a/tests/plugins/websockets/configs/.rr-websockets-allow.yaml +++ b/tests/plugins/websockets/configs/.rr-websockets-allow.yaml @@ -21,7 +21,7 @@ http: redis: addrs: - - "localhost:6379" + - "127.0.0.1:6379" broadcast: test: diff --git a/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml b/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml index d537a80b..43f4b2ec 100644 --- a/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml +++ b/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml @@ -21,13 +21,13 @@ http: redis: addrs: - - "localhost:6379" + - "127.0.0.1:6379" broadcast: test: driver: redis addrs: - - "localhost:6379" + - "127.0.0.1:6379" websockets: broker: test diff --git a/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml b/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml index 4deea30a..e0bdf993 100644 --- a/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml +++ b/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml @@ -23,7 +23,7 @@ broadcast: test: driver: redis addrs: - - "localhost:6379" + - "127.0.0.1:6379" websockets: broker: test diff --git a/tests/plugins/websockets/configs/.rr-websockets-redis.yaml b/tests/plugins/websockets/configs/.rr-websockets-redis.yaml index 3557f5f1..e3d5f0b8 100644 --- a/tests/plugins/websockets/configs/.rr-websockets-redis.yaml +++ b/tests/plugins/websockets/configs/.rr-websockets-redis.yaml @@ -21,7 +21,7 @@ http: redis: addrs: - - "localhost:6379" + - "127.0.0.1:6379" broadcast: test: diff --git a/tests/plugins/websockets/websocket_plugin_test.go b/tests/plugins/websockets/websocket_plugin_test.go index 53b6a572..bfdc980b 100644 --- a/tests/plugins/websockets/websocket_plugin_test.go +++ b/tests/plugins/websockets/websocket_plugin_test.go @@ -443,7 +443,7 @@ func RPCWsMemoryStop(port string) func(t *testing.T) { HandshakeTimeout: time.Second * 20, } - connURL := url.URL{Scheme: "ws", Host: "localhost:" + port, Path: "/ws"} + connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"} c, resp, err := da.Dial(connURL.String(), nil) assert.NotNil(t, resp) @@ -613,7 +613,7 @@ func wsInit(t *testing.T) { HandshakeTimeout: time.Second * 20, } - connURL := url.URL{Scheme: "ws", Host: "localhost:11111", Path: "/ws"} + connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:11111", Path: "/ws"} c, resp, err := da.Dial(connURL.String(), nil) assert.NoError(t, err) @@ -648,7 +648,7 @@ func RPCWsPubAsync(port string) func(t *testing.T) { HandshakeTimeout: time.Second * 20, } - connURL := url.URL{Scheme: "ws", Host: "localhost:" + port, Path: "/ws"} + connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"} c, resp, err := da.Dial(connURL.String(), nil) assert.NoError(t, err) @@ -725,7 +725,7 @@ func RPCWsPub(port string) func(t *testing.T) { HandshakeTimeout: time.Second * 20, } - connURL := url.URL{Scheme: "ws", Host: "localhost:" + port, Path: "/ws"} + connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"} c, resp, err := da.Dial(connURL.String(), nil) assert.NoError(t, err) @@ -802,7 +802,7 @@ func RPCWsDeny(port string) func(t *testing.T) { HandshakeTimeout: time.Second * 20, } - connURL := url.URL{Scheme: "ws", Host: "localhost:" + port, Path: "/ws"} + connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"} c, resp, err := da.Dial(connURL.String(), nil) assert.NoError(t, err) diff --git a/tests/psr-worker-bench.php b/tests/psr-worker-bench.php index d0c72eae..80fc435c 100644 --- a/tests/psr-worker-bench.php +++ b/tests/psr-worker-bench.php @@ -1,59 +1,28 @@ <?php - -declare(strict_types=1); - +/** + * @var Goridge\RelayInterface $relay + */ +use Spiral\Goridge; use Spiral\RoadRunner; -use Nyholm\Psr7\Factory; ini_set('display_errors', 'stderr'); -include "vendor/autoload.php"; - -$env = \Spiral\RoadRunner\Environment::fromGlobals(); - -if ($env->getMode() === 'http') { - $worker = new RoadRunner\Http\PSR7Worker( - RoadRunner\Worker::create(), - new Factory\Psr17Factory(), - new Factory\Psr17Factory(), - new Factory\Psr17Factory() - ); - - while ($req = $worker->waitRequest()) { - try { - $rsp = new \Nyholm\Psr7\Response(); - $rsp->getBody()->write("hello world"); - $worker->respond($rsp); - } catch (\Throwable $e) { - $worker->getWorker()->error((string)$e); - } +require __DIR__ . "/vendor/autoload.php"; + +$worker = RoadRunner\Worker::create(); +$psr7 = new RoadRunner\Http\PSR7Worker( + $worker, + new \Nyholm\Psr7\Factory\Psr17Factory(), + new \Nyholm\Psr7\Factory\Psr17Factory(), + new \Nyholm\Psr7\Factory\Psr17Factory() +); + +while ($req = $psr7->waitRequest()) { + try { + $resp = new \Nyholm\Psr7\Response(); + $resp->getBody()->write("hello world"); + + $psr7->respond($resp); + } catch (\Throwable $e) { + $psr7->getWorker()->error((string)$e); } -} else { - /** - * @param string $dir - * @return array<string> - */ - $getClasses = static function (string $dir): iterable { - $files = glob($dir . '/*.php'); - - foreach ($files as $file) { - yield substr(basename($file), 0, -4); - } - }; - - $factory = \Temporal\WorkerFactory::create(); - - $worker = $factory->newWorker('default'); - - // register all workflows - foreach ($getClasses(__DIR__ . '/src/Workflow') as $name) { - $worker->registerWorkflowTypes('Temporal\\Tests\\Workflow\\' . $name); - } - - // register all activity - foreach ($getClasses(__DIR__ . '/src/Activity') as $name) { - $class = 'Temporal\\Tests\\Activity\\' . $name; - $worker->registerActivityImplementations(new $class); - } - - $factory->run(); -}
\ No newline at end of file +} diff --git a/tests/slow-client.php b/tests/slow-client.php index 7737f0b1..c21b45d2 100644 --- a/tests/slow-client.php +++ b/tests/slow-client.php @@ -17,7 +17,7 @@ switch ($goridge) { break; case "tcp": - $relay = new Goridge\SocketRelay("localhost", 9007); + $relay = new Goridge\SocketRelay("127.0.0.1", 9007); break; case "unix": diff --git a/tests/slow-destroy.php b/tests/slow-destroy.php index 900bb68a..2edbc0db 100644 --- a/tests/slow-destroy.php +++ b/tests/slow-destroy.php @@ -17,7 +17,7 @@ switch ($goridge) { break; case "tcp": - $relay = new Goridge\SocketRelay("localhost", 9007); + $relay = new Goridge\SocketRelay("127.0.0.1", 9007); break; case "unix": diff --git a/tests/worker-cors.php b/tests/worker-cors.php new file mode 100644 index 00000000..ea3c986c --- /dev/null +++ b/tests/worker-cors.php @@ -0,0 +1,15 @@ +<?php + +use Spiral\RoadRunner\Worker; +use Spiral\RoadRunner\Http\HttpWorker; + +ini_set('display_errors', 'stderr'); +require __DIR__ . '/vendor/autoload.php'; + +$http = new HttpWorker(Worker::create()); + +while ($req = $http->waitRequest()) { + $http->respond(200, 'Response', [ + 'Access-Control-Allow-Origin' => ['*'] + ]); +} diff --git a/utils/to_ptr.go b/utils/to_ptr.go new file mode 100644 index 00000000..7c93ef46 --- /dev/null +++ b/utils/to_ptr.go @@ -0,0 +1,467 @@ +package utils + +import "time" + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + ps := make([]*bool, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + ps := make(map[string]*bool, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return &v +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + ps := make([]*byte, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + ps := make(map[string]*byte, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return &v +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + ps := make([]*string, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + ps := make(map[string]*string, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return &v +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + ps := make([]*int, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + ps := make(map[string]*int, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + ps := make([]*int8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + ps := make(map[string]*int8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + ps := make([]*int16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + ps := make(map[string]*int16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + ps := make([]*int32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + ps := make(map[string]*int32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + ps := make([]*int64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + ps := make(map[string]*int64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + ps := make([]*uint, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + ps := make(map[string]*uint, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + ps := make([]*uint8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + ps := make(map[string]*uint8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + ps := make([]*uint16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + ps := make(map[string]*uint16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + ps := make([]*uint32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + ps := make(map[string]*uint32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + ps := make([]*uint64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + ps := make(map[string]*uint64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + ps := make([]*float32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + ps := make(map[string]*float32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + ps := make([]*float64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + ps := make(map[string]*float64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + ps := make([]*time.Time, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + ps := make(map[string]*time.Time, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} |