summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.changes1
-rw-r--r--.dockerignore2
-rwxr-xr-x[-rw-r--r--].github/ISSUE_TEMPLATE/bug_report.md0
-rwxr-xr-x[-rw-r--r--].github/ISSUE_TEMPLATE/feature_request.md0
-rw-r--r--.github/dependabot.yml12
-rw-r--r--.github/workflows/build.yml162
-rw-r--r--.github/workflows/release.yml8
-rwxr-xr-x[-rw-r--r--].gitignore21
-rwxr-xr-x.golangci.yml61
-rwxr-xr-x[-rw-r--r--].rr.yaml308
-rw-r--r--.vscode/launch.json25
-rw-r--r--.vscode/settings.json15
-rw-r--r--CHANGELOG.md6
-rwxr-xr-x[-rw-r--r--]CODE_OF_CONDUCT.md0
-rw-r--r--Dockerfile4
-rwxr-xr-x[-rw-r--r--]LICENSE0
-rwxr-xr-xMakefile56
-rw-r--r--README.md49
-rwxr-xr-xbin/rr312
-rwxr-xr-x[-rw-r--r--]bors.toml26
-rw-r--r--cmd/cli/reset.go107
-rw-r--r--cmd/cli/root.go101
-rw-r--r--cmd/cli/serve.go63
-rw-r--r--cmd/cli/version.go (renamed from cmd/rr/cmd/version.go)6
-rw-r--r--cmd/cli/workers.go110
-rw-r--r--cmd/main.go58
-rw-r--r--cmd/rr/LICENSE21
-rw-r--r--cmd/rr/cmd/root.go159
-rw-r--r--cmd/rr/cmd/serve.go64
-rw-r--r--cmd/rr/cmd/stop.go51
-rw-r--r--cmd/rr/http/debug.go139
-rw-r--r--cmd/rr/http/metrics.go124
-rw-r--r--cmd/rr/http/reset.go53
-rw-r--r--cmd/rr/http/workers.go101
-rw-r--r--cmd/rr/limit/debug.go71
-rw-r--r--cmd/rr/limit/metrics.go63
-rw-r--r--cmd/rr/main.go59
-rw-r--r--cmd/util/config.go182
-rw-r--r--cmd/util/cprint.go48
-rw-r--r--cmd/util/debug.go62
-rw-r--r--cmd/util/exit.go15
-rw-r--r--cmd/util/rpc.go19
-rw-r--r--codecov.yml7
-rw-r--r--composer.json44
-rw-r--r--config.go56
-rw-r--r--config_test.go52
-rw-r--r--controller.go16
-rw-r--r--controller_test.go217
-rw-r--r--dput.cf5
-rw-r--r--error_buffer.go113
-rw-r--r--error_buffer_test.go132
-rw-r--r--errors.go24
-rw-r--r--errors_test.go18
-rw-r--r--factory.go13
-rwxr-xr-x[-rw-r--r--]go.mod34
-rwxr-xr-x[-rw-r--r--]go.sum200
-rw-r--r--interfaces/events/handler.go14
-rw-r--r--interfaces/events/pool_events.go65
-rw-r--r--interfaces/events/worker_events.go33
-rw-r--r--interfaces/pool/pool.go100
-rw-r--r--interfaces/worker/factory.go20
-rw-r--r--interfaces/worker/watcher.go26
-rw-r--r--interfaces/worker/worker.go59
-rwxr-xr-xinternal/protocol.go94
-rwxr-xr-xinternal/state.go122
-rwxr-xr-xinternal/state_test.go27
-rw-r--r--phpstan.neon.dist4
-rw-r--r--pipe_factory.go79
-rw-r--r--pipe_factory_test.go164
-rwxr-xr-xpkg/events/events.go41
-rwxr-xr-x[-rw-r--r--]pkg/payload/payload.go (renamed from payload.go)6
-rwxr-xr-xpkg/pipe/pipe_factory.go163
-rwxr-xr-xpkg/pipe/pipe_factory_test.go510
-rw-r--r--pkg/pool/config.go75
-rwxr-xr-xpkg/pool/static_pool.go324
-rwxr-xr-xpkg/pool/static_pool_test.go570
-rwxr-xr-xpkg/pool/supervisor_pool.go223
-rw-r--r--pkg/pool/supervisor_test.go196
-rwxr-xr-xpkg/socket/socket_factory.go229
-rwxr-xr-x[-rw-r--r--]pkg/socket/socket_factory_test.go (renamed from socket_factory_test.go)246
-rwxr-xr-xpkg/worker/sync_worker.go224
-rwxr-xr-xpkg/worker/sync_worker_test.go37
-rwxr-xr-xpkg/worker/worker.go315
-rwxr-xr-xpkg/worker/worker_test.go19
-rwxr-xr-xpkg/worker_watcher/worker_watcher.go299
-rw-r--r--plugins/checker/config.go5
-rw-r--r--plugins/checker/interface.go11
-rw-r--r--plugins/checker/plugin.go151
-rw-r--r--plugins/checker/rpc.go27
-rw-r--r--plugins/config/interface.go26
-rwxr-xr-xplugins/config/plugin.go84
-rw-r--r--plugins/doc/graphviz.svg169
-rw-r--r--plugins/gzip/plugin.go25
-rw-r--r--plugins/headers/config.go (renamed from service/headers/config.go)21
-rw-r--r--plugins/headers/plugin.go117
-rw-r--r--plugins/http/attributes/attributes.go (renamed from service/http/attributes/attributes.go)21
-rw-r--r--plugins/http/config.go (renamed from service/http/config.go)142
-rw-r--r--plugins/http/constants.go (renamed from service/http/constants.go)4
-rw-r--r--plugins/http/errors.go (renamed from service/http/errors.go)0
-rw-r--r--plugins/http/errors_windows.go (renamed from service/http/errors_windows.go)0
-rw-r--r--plugins/http/handler.go (renamed from service/http/handler.go)117
-rw-r--r--plugins/http/parse.go (renamed from service/http/parse.go)10
-rw-r--r--plugins/http/plugin.go532
-rw-r--r--plugins/http/request.go (renamed from service/http/request.go)31
-rw-r--r--plugins/http/response.go (renamed from service/http/response.go)27
-rw-r--r--plugins/http/uploads.go (renamed from service/http/uploads.go)23
-rw-r--r--plugins/http/uploads_config.go (renamed from service/http/uploads_config.go)1
-rw-r--r--plugins/informer/interface.go8
-rw-r--r--plugins/informer/plugin.go55
-rw-r--r--plugins/informer/rpc.go54
-rw-r--r--plugins/kv/boltdb/config.go24
-rw-r--r--plugins/kv/boltdb/plugin.go452
-rw-r--r--plugins/kv/boltdb/plugin_unit_test.go531
-rw-r--r--plugins/kv/interface.go41
-rw-r--r--plugins/kv/memcached/config.go10
-rw-r--r--plugins/kv/memcached/plugin.go252
-rw-r--r--plugins/kv/memcached/plugin_unit_test.go432
-rw-r--r--plugins/kv/memory/config.go15
-rw-r--r--plugins/kv/memory/plugin.go262
-rw-r--r--plugins/kv/memory/plugin_unit_test.go473
-rw-r--r--plugins/kv/rpc.go110
-rw-r--r--plugins/logger/config.go94
-rw-r--r--plugins/logger/encoder.go66
-rw-r--r--plugins/logger/interface.go16
-rw-r--r--plugins/logger/plugin.go69
-rw-r--r--plugins/logger/zap_adapter.go56
-rw-r--r--plugins/metrics/config.go (renamed from service/metrics/config.go)10
-rw-r--r--plugins/metrics/config_test.go (renamed from service/metrics/config_test.go)51
-rw-r--r--plugins/metrics/doc.go1
-rw-r--r--plugins/metrics/interface.go7
-rw-r--r--plugins/metrics/plugin.go229
-rw-r--r--plugins/metrics/rpc.go294
-rw-r--r--plugins/redis/config.go32
-rw-r--r--plugins/redis/interface.go9
-rw-r--r--plugins/redis/plugin.go75
-rw-r--r--plugins/reload/config.go (renamed from service/reload/config.go)27
-rw-r--r--plugins/reload/plugin.go159
-rw-r--r--plugins/reload/watcher.go (renamed from service/reload/watcher.go)46
-rw-r--r--plugins/resetter/interface.go17
-rw-r--r--plugins/resetter/plugin.go80
-rw-r--r--plugins/resetter/rpc.go30
-rw-r--r--plugins/rpc/config.go (renamed from service/rpc/config.go)26
-rw-r--r--plugins/rpc/doc/plugin_arch.drawio1
-rw-r--r--plugins/rpc/interface.go7
-rw-r--r--plugins/rpc/plugin.go161
-rw-r--r--plugins/server/config.go147
-rw-r--r--plugins/server/interface.go21
-rw-r--r--plugins/server/plugin.go257
-rw-r--r--plugins/static/config.go76
-rw-r--r--plugins/static/plugin.go110
-rw-r--r--pool.go39
-rw-r--r--protocol.go52
-rw-r--r--protocol_test.go47
-rwxr-xr-xrrbin0 -> 18108416 bytes
-rw-r--r--server.go257
-rw-r--r--server_config.go169
-rw-r--r--server_config_test.go175
-rw-r--r--server_test.go254
-rw-r--r--service/container.go372
-rw-r--r--service/container_test.go534
-rw-r--r--service/entry.go59
-rw-r--r--service/entry_test.go17
-rw-r--r--service/env/config.go22
-rw-r--r--service/env/config_test.go42
-rw-r--r--service/env/environment.go23
-rw-r--r--service/env/service.go55
-rw-r--r--service/env/service_test.go81
-rw-r--r--service/gzip/config.go22
-rw-r--r--service/gzip/config_test.go47
-rw-r--r--service/gzip/service.go38
-rw-r--r--service/gzip/service_test.go73
-rw-r--r--service/headers/config_test.go31
-rw-r--r--service/headers/service.go114
-rw-r--r--service/headers/service_test.go341
-rw-r--r--service/health/config.go32
-rw-r--r--service/health/config_test.go47
-rw-r--r--service/health/service.go117
-rw-r--r--service/health/service_test.go318
-rw-r--r--service/http/config_test.go330
-rw-r--r--service/http/fcgi_test.go106
-rw-r--r--service/http/h2c_test.go83
-rw-r--r--service/http/rpc.go34
-rw-r--r--service/http/rpc_test.go220
-rw-r--r--service/http/service.go432
-rw-r--r--service/http/service_test.go757
-rw-r--r--service/http/ssl_test.go254
-rw-r--r--service/limit/config.go49
-rw-r--r--service/limit/config_test.go52
-rw-r--r--service/limit/controller.go167
-rw-r--r--service/limit/service.go39
-rw-r--r--service/limit/service_test.go498
-rw-r--r--service/limit/state_filter.go59
-rw-r--r--service/metrics/rpc.go263
-rw-r--r--service/metrics/rpc_test.go862
-rw-r--r--service/metrics/service.go191
-rw-r--r--service/metrics/service_test.go248
-rw-r--r--service/reload/service.go163
-rw-r--r--service/reload/watcher_test.go578
-rw-r--r--service/rpc/service.go125
-rw-r--r--service/rpc/service_test.go97
-rw-r--r--service/rpc/system.go18
-rw-r--r--service/static/config.go83
-rw-r--r--service/static/config_test.go46
-rw-r--r--service/static/service.go88
-rw-r--r--service/static/service_test.go530
-rw-r--r--socket_factory.go140
-rw-r--r--src/Diactoros/ServerRequestFactory.php28
-rw-r--r--src/Diactoros/StreamFactory.php57
-rw-r--r--src/Diactoros/UploadedFileFactory.php36
-rw-r--r--src/Exception/MetricException.php17
-rw-r--r--src/Exception/RoadRunnerException.php14
-rw-r--r--src/Exceptions/RoadRunnerException.php18
-rw-r--r--src/HttpClient.php74
-rw-r--r--src/Metrics.php80
-rw-r--r--src/MetricsInterface.php64
-rw-r--r--src/PSR7Client.php217
-rw-r--r--src/Worker.php178
-rw-r--r--state.go98
-rw-r--r--state_test.go27
-rw-r--r--static_pool.go374
-rw-r--r--static_pool_test.go495
-rwxr-xr-x[-rw-r--r--]systemd/rr.service0
-rw-r--r--tests/broken.php4
-rw-r--r--tests/client.php2
-rw-r--r--tests/composer.json13
-rw-r--r--tests/delay.php4
-rw-r--r--tests/docker-compose.yaml7
-rw-r--r--tests/echo.php4
-rw-r--r--tests/error.php4
-rw-r--r--tests/head.php4
-rw-r--r--tests/http/client.php14
-rw-r--r--tests/http/slow-client.php18
-rw-r--r--tests/http/upload.php2
-rw-r--r--tests/memleak.php15
-rw-r--r--tests/mocks/mock_log.go150
-rw-r--r--tests/pid.php2
-rwxr-xr-xtests/plugins/checker/configs/.rr-checker-init.yaml31
-rw-r--r--tests/plugins/checker/plugin_test.go190
-rwxr-xr-xtests/plugins/config/.rr.yaml21
-rwxr-xr-xtests/plugins/config/config_test.go64
-rwxr-xr-xtests/plugins/config/plugin1.go96
-rw-r--r--tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml25
-rw-r--r--tests/plugins/gzip/configs/.rr-http-withGzip.yaml25
-rw-r--r--tests/plugins/gzip/plugin_test.go176
-rw-r--r--tests/plugins/headers/configs/.rr-cors-headers.yaml39
-rw-r--r--tests/plugins/headers/configs/.rr-headers-init.yaml39
-rw-r--r--tests/plugins/headers/configs/.rr-req-headers.yaml32
-rw-r--r--tests/plugins/headers/configs/.rr-res-headers.yaml32
-rw-r--r--tests/plugins/headers/headers_plugin_test.go367
-rw-r--r--tests/plugins/http/attributes_test.go (renamed from service/http/attributes/attributes_test.go)42
-rw-r--r--tests/plugins/http/configs/.rr-broken-pipes.yaml31
-rw-r--r--tests/plugins/http/configs/.rr-env.yaml33
-rw-r--r--tests/plugins/http/configs/.rr-fcgi-reqUri.yaml38
-rw-r--r--tests/plugins/http/configs/.rr-fcgi.yaml38
-rw-r--r--tests/plugins/http/configs/.rr-h2c.yaml29
-rw-r--r--tests/plugins/http/configs/.rr-http.yaml31
-rw-r--r--tests/plugins/http/configs/.rr-init.yaml43
-rw-r--r--tests/plugins/http/configs/.rr-resetter.yaml30
-rw-r--r--tests/plugins/http/configs/.rr-ssl-push.yaml31
-rw-r--r--tests/plugins/http/configs/.rr-ssl-redirect.yaml31
-rw-r--r--tests/plugins/http/configs/.rr-ssl.yaml38
-rw-r--r--tests/plugins/http/fixtures/server.crt (renamed from service/http/fixtures/server.crt)0
-rw-r--r--tests/plugins/http/fixtures/server.key (renamed from service/http/fixtures/server.key)0
-rw-r--r--tests/plugins/http/handler_test.go (renamed from service/http/handler_test.go)1455
-rw-r--r--tests/plugins/http/http_plugin_test.go1247
-rw-r--r--tests/plugins/http/parse_test.go (renamed from service/http/parse_test.go)20
-rw-r--r--tests/plugins/http/plugin1.go27
-rw-r--r--tests/plugins/http/plugin_middleware.go69
-rw-r--r--tests/plugins/http/response_test.go (renamed from service/http/response_test.go)31
-rw-r--r--tests/plugins/http/uploads_config_test.go (renamed from service/http/uploads_config_test.go)7
-rw-r--r--tests/plugins/http/uploads_test.go (renamed from service/http/uploads_test.go)197
-rw-r--r--tests/plugins/informer/.rr-informer.yaml16
-rw-r--r--tests/plugins/informer/informer_test.go108
-rw-r--r--tests/plugins/informer/test_plugin.go59
-rw-r--r--tests/plugins/kv/boltdb/configs/.rr-init.yaml46
-rw-r--r--tests/plugins/kv/boltdb/plugin_test.go195
-rw-r--r--tests/plugins/kv/memcached/configs/.rr-init.yaml43
-rw-r--r--tests/plugins/kv/memcached/plugin_test.go195
-rw-r--r--tests/plugins/kv/memory/configs/.rr-init.yaml45
-rw-r--r--tests/plugins/kv/memory/plugin_test.go195
-rw-r--r--tests/plugins/logger/.rr.yaml3
-rw-r--r--tests/plugins/logger/logger_test.go79
-rw-r--r--tests/plugins/logger/plugin.go40
-rw-r--r--tests/plugins/metrics/.rr-test.yaml16
-rw-r--r--tests/plugins/metrics/docker-compose.yml7
-rw-r--r--tests/plugins/metrics/metrics_test.go739
-rw-r--r--tests/plugins/metrics/plugin1.go46
-rw-r--r--tests/plugins/mocks/mock_log.go150
-rw-r--r--tests/plugins/redis/plugin1.go43
-rw-r--r--tests/plugins/redis/redis_plugin_test.go120
-rw-r--r--tests/plugins/reload/config_test.go (renamed from service/reload/config_test.go)21
-rw-r--r--tests/plugins/reload/configs/.rr-reload-2.yaml44
-rw-r--r--tests/plugins/reload/configs/.rr-reload-3.yaml46
-rw-r--r--tests/plugins/reload/configs/.rr-reload-4.yaml46
-rw-r--r--tests/plugins/reload/configs/.rr-reload.yaml44
-rw-r--r--tests/plugins/reload/reload_plugin_test.go827
-rw-r--r--tests/plugins/resetter/.rr-resetter.yaml16
-rw-r--r--tests/plugins/resetter/resetter_test.go113
-rw-r--r--tests/plugins/resetter/test_plugin.go66
-rwxr-xr-x[-rw-r--r--]tests/plugins/rpc/config_test.go (renamed from service/rpc/config_test.go)63
-rw-r--r--tests/plugins/rpc/configs/.rr-rpc-disabled.yaml3
-rw-r--r--tests/plugins/rpc/configs/.rr.yaml5
-rw-r--r--tests/plugins/rpc/plugin1.go42
-rw-r--r--tests/plugins/rpc/plugin2.go53
-rw-r--r--tests/plugins/rpc/rpc_test.go188
-rw-r--r--tests/plugins/server/configs/.rr-no-app-section.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-sockets.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-tcp.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-wrong-command.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-wrong-relay.yaml12
-rw-r--r--tests/plugins/server/configs/.rr.yaml12
-rw-r--r--tests/plugins/server/plugin_pipes.go133
-rw-r--r--tests/plugins/server/plugin_sockets.go113
-rw-r--r--tests/plugins/server/plugin_tcp.go113
-rw-r--r--tests/plugins/server/server_plugin_test.go361
-rw-r--r--tests/plugins/server/socket.php25
-rw-r--r--tests/plugins/server/tcp.php20
-rw-r--r--tests/plugins/static/config_test.go49
-rw-r--r--tests/plugins/static/configs/.rr-http-static-disabled.yaml33
-rw-r--r--tests/plugins/static/configs/.rr-http-static-files-disable.yaml33
-rw-r--r--tests/plugins/static/configs/.rr-http-static-files.yaml34
-rw-r--r--tests/plugins/static/configs/.rr-http-static.yaml31
-rw-r--r--tests/plugins/static/static_plugin_test.go437
-rw-r--r--tests/psr-worker-bench.php28
-rw-r--r--tests/psr-worker.php28
-rw-r--r--tests/sleep.php15
-rw-r--r--tests/slow-client.php2
-rw-r--r--tests/slow-destroy.php2
-rw-r--r--tests/slow-pid.php2
-rw-r--r--tests/stop.php2
-rw-r--r--tools/process.go44
-rw-r--r--tools/worker_table.go (renamed from cmd/util/table.go)36
-rw-r--r--util/state.go63
-rw-r--r--util/state_test.go37
-rwxr-xr-xutils/doc.go5
-rwxr-xr-x[-rw-r--r--]utils/isolate.go (renamed from osutil/isolate.go)17
-rwxr-xr-x[-rw-r--r--]utils/isolate_win.go (renamed from osutil/isolate_win.go)2
-rwxr-xr-x[-rw-r--r--]utils/network.go (renamed from util/network.go)24
-rwxr-xr-x[-rw-r--r--]utils/network_test.go (renamed from util/network_test.go)2
-rwxr-xr-x[-rw-r--r--]utils/network_windows.go (renamed from util/network_windows.go)2
-rwxr-xr-x[-rw-r--r--]utils/network_windows_test.go (renamed from util/network_windows_test.go)2
-rw-r--r--worker.go261
-rw-r--r--worker_test.go249
343 files changed, 20320 insertions, 16590 deletions
diff --git a/.changes b/.changes
new file mode 100644
index 00000000..345e6aef
--- /dev/null
+++ b/.changes
@@ -0,0 +1 @@
+Test
diff --git a/.dockerignore b/.dockerignore
index b817b3c8..bfa82a3d 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -7,4 +7,4 @@
/tests
/bin
composer.json
-vendor_php
+vendor_php \ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index 448a1aa4..448a1aa4 100644..100755
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 8c4b568f..8c4b568f 100644..100755
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..6143fb25
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,12 @@
+# To get started with Dependabot version updates, you'll need to specify which
+# package ecosystems to update and where the package manifests are located.
+# Please see the documentation for all configuration options:
+# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
+
+version: 2
+updates:
+ - package-ecosystem: "" # See documentation for possible values
+ directory: "/" # Location of package manifests
+ schedule:
+ interval: "daily"
+
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 8f4dc73b..1f67b944 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -8,65 +8,19 @@ on:
# this job on pull_request events for branches that look like fork
# branches. Without this we would end up running this job twice for non
# forked PRs, once for the push and then once for opening the PR.
- - '**:**'
+ - "**:**"
jobs:
- php:
- name: Build (PHP ${{ matrix.php }}, ${{ matrix.setup }} setup)
- runs-on: ubuntu-20.04
- timeout-minutes: 10
- strategy:
- fail-fast: false
- matrix:
- php: [ '7.2', '7.3', '7.4', '8.0' ]
- setup: [ basic, lowest ]
- steps:
- - name: Set up PHP ${{ matrix.php }}
- uses: shivammathur/setup-php@v2 # action page: <https://github.com/shivammathur/setup-php>
- with:
- php-version: ${{ matrix.php }}
-
- - name: Check out code
- uses: actions/checkout@v2
-
- - name: Syntax check only (lint)
- run: find ./src/ ./tests/ -name "*.php" -print0 | xargs -0 -n1 -P8 php -l
-
- - name: Get Composer Cache Directory
- id: composer-cache
- run: echo "::set-output name=dir::$(composer config cache-files-dir)"
-
- - name: Init Composer Cache # Docs: <https://git.io/JfAKn#php---composer>
- uses: actions/cache@v2
- with:
- path: ${{ steps.composer-cache.outputs.dir }}
- key: ${{ runner.os }}-composer-${{ matrix.setup }}-${{ hashFiles('**/composer.json') }}
- restore-keys: ${{ runner.os }}-composer-
-
- - name: Install lowest Composer dependencies
- if: matrix.setup == 'lowest'
- run: composer update --prefer-dist --no-progress --prefer-lowest --ansi
-
- - name: Install basic Composer dependencies
- if: matrix.setup == 'basic'
- run: composer update --prefer-dist --no-progress --ansi
-
- - name: Analyze PHP sources
- run: composer analyze
-
- # TODO write phpunit tests
- #- name: Analyze PHP sources
- # run: composer test
-
golang:
- name: Build (Go ${{ matrix.go }}, PHP ${{ matrix.php }})
- runs-on: ubuntu-20.04
- timeout-minutes: 20
+ name: Build (Go ${{ matrix.go }}, PHP ${{ matrix.php }}, OS ${{matrix.os}})
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
strategy:
fail-fast: false
matrix:
- php: [ '7.2', '7.3', '7.4', '8.0' ]
- go: [ '1.14', '1.15' ]
+ php: ["7.4", "8.0"]
+ go: ["1.14", "1.15"]
+ os: [ubuntu-latest, windows-latest]
steps:
- name: Set up Go ${{ matrix.go }}
uses: actions/setup-go@v2 # action page: <https://github.com/actions/setup-go>
@@ -77,15 +31,18 @@ jobs:
uses: shivammathur/setup-php@v2 # action page: <https://github.com/shivammathur/setup-php>
with:
php-version: ${{ matrix.php }}
+ extensions: sockets
- name: Check out code
uses: actions/checkout@v2
- name: Get Composer Cache Directory
+ if: ${{ matrix.os != 'windows-latest' }}
id: composer-cache
run: echo "::set-output name=dir::$(composer config cache-files-dir)"
- name: Init Composer Cache # Docs: <https://git.io/JfAKn#php---composer>
+ if: ${{ matrix.os != 'windows-latest' }}
uses: actions/cache@v2
with:
path: ${{ steps.composer-cache.outputs.dir }}
@@ -93,7 +50,7 @@ jobs:
restore-keys: ${{ runner.os }}-composer-
- name: Install Composer dependencies
- run: composer update --prefer-dist --no-progress --ansi
+ run: cd tests && composer update --prefer-dist --no-progress --ansi
- name: Init Go modules Cache # Docs: <https://git.io/JfAKn#go---modules>
uses: actions/cache@v2
@@ -105,32 +62,77 @@ jobs:
- name: Install Go dependencies
run: go mod download
- - name: Run golang tests
+ - name: Run golang tests on Windows without codecov
+ if: ${{ matrix.os == 'windows-latest' }}
run: |
+ docker-compose -f ./tests/docker-compose.yaml up -d
+ go test -v -race -cover -tags=debug ./utils
+ go test -v -race -cover -tags=debug ./pkg/pipe
+ go test -v -race -cover -tags=debug ./pkg/pool
+ go test -v -race -cover -tags=debug ./pkg/socket
+ go test -v -race -cover -tags=debug ./pkg/worker
+ go test -v -race -cover -tags=debug ./tests/plugins/http
+ go test -v -race -cover -tags=debug ./tests/plugins/informer
+ go test -v -race -cover -tags=debug ./tests/plugins/reload
+ go test -v -race -cover -tags=debug ./tests/plugins/server
+ go test -v -race -cover -tags=debug ./tests/plugins/checker
+ go test -v -race -cover -tags=debug ./tests/plugins/config
+ go test -v -race -cover -tags=debug ./tests/plugins/gzip
+ go test -v -race -cover -tags=debug ./tests/plugins/headers
+ go test -v -race -cover -tags=debug ./tests/plugins/logger
+ go test -v -race -cover -tags=debug ./tests/plugins/metrics
+ go test -v -race -cover -tags=debug ./tests/plugins/redis
+ go test -v -race -cover -tags=debug ./tests/plugins/resetter
+ go test -v -race -cover -tags=debug ./tests/plugins/rpc
+ go test -v -race -cover -tags=debug ./tests/plugins/static
+ go test -v -race -cover -tags=debug ./plugins/kv/boltdb
+ go test -v -race -cover -tags=debug ./plugins/kv/memory
+ go test -v -race -cover -tags=debug ./plugins/kv/memcached
+ go test -v -race -cover -tags=debug ./tests/plugins/kv/boltdb
+ go test -v -race -cover -tags=debug ./tests/plugins/kv/memory
+ go test -v -race -cover -tags=debug ./tests/plugins/kv/memcached
+ docker-compose -f ./tests/docker-compose.yaml down
+
+ - name: Run golang tests on Linux
+ if: ${{ matrix.os == 'ubuntu-latest' }}
+ run: |
+ docker-compose -f ./tests/docker-compose.yaml up -d
mkdir ./coverage-ci
- go test -race -v -covermode=atomic -coverprofile=./coverage-ci/lib.txt
- go test ./util -race -v -covermode=atomic -coverprofile=./coverage-ci/util.txt
- go test ./service -race -v -covermode=atomic -coverprofile=./coverage-ci/service.txt
- go test ./service/env -race -v -covermode=atomic -coverprofile=./coverage-ci/env.txt
- go test ./service/rpc -race -v -covermode=atomic -coverprofile=./coverage-ci/rpc.txt
- go test ./service/http -race -v -covermode=atomic -coverprofile=./coverage-ci/http.txt
- go test ./service/static -race -v -covermode=atomic -coverprofile=./coverage-ci/static.txt
- go test ./service/limit -race -v -covermode=atomic -coverprofile=./coverage-ci/limit.txt
- go test ./service/headers -race -v -covermode=atomic -coverprofile=./coverage-ci/headers.txt
- go test ./service/metrics -race -v -covermode=atomic -coverprofile=./coverage-ci/metrics.txt
- go test ./service/health -race -v -covermode=atomic -coverprofile=./coverage-ci/health.txt
- go test ./service/gzip -race -v -covermode=atomic -coverprofile=./coverage-ci/gzip.txt
- # reload tests temporarily excluded from CI due to intensive work with the file system, and unpredictable GHA
- # go test ./service/reload -race -v -covermode=atomic -coverprofile=./coverage-ci/reload.txt
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/utils.txt -covermode=atomic ./utils
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/pipe.txt -covermode=atomic ./pkg/pipe
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/pool.txt -covermode=atomic ./pkg/pool
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/socket.txt -covermode=atomic ./pkg/socket
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/worker.txt -covermode=atomic ./pkg/worker
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/http.txt -covermode=atomic ./tests/plugins/http
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/informer.txt -covermode=atomic ./tests/plugins/informer
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/reload.txt -covermode=atomic ./tests/plugins/reload
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/server.txt -covermode=atomic ./tests/plugins/server
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/checker.txt -covermode=atomic ./tests/plugins/checker
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/config.txt -covermode=atomic ./tests/plugins/config
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/gzip.txt -covermode=atomic ./tests/plugins/gzip
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/headers.txt -covermode=atomic ./tests/plugins/headers
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/logger.txt -covermode=atomic ./tests/plugins/logger
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/metrics.txt -covermode=atomic ./tests/plugins/metrics
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/redis.txt -covermode=atomic ./tests/plugins/redis
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/resetter.txt -covermode=atomic ./tests/plugins/resetter
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/rpc.txt -covermode=atomic ./tests/plugins/rpc
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/static.txt -covermode=atomic ./tests/plugins/static
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/boltdb_unit.txt -covermode=atomic ./plugins/kv/boltdb
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/kv_unit.txt -covermode=atomic ./plugins/kv/memory
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/memcached_unit.txt -covermode=atomic ./plugins/kv/memcached
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/boltdb.txt -covermode=atomic ./tests/plugins/kv/boltdb
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/memory.txt -covermode=atomic ./tests/plugins/kv/memory
+ go test -v -race -cover -tags=debug -coverprofile=./coverage-ci/memcached.txt -covermode=atomic ./tests/plugins/kv/memcached
+ docker-compose -f ./tests/docker-compose.yaml down
cat ./coverage-ci/*.txt > ./coverage-ci/summary.txt
- uses: codecov/codecov-action@v1 # Docs: <https://github.com/codecov/codecov-action>
+ if: ${{ matrix.os == 'ubuntu-latest' }} # codecov only from linux
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./coverage-ci/summary.txt
fail_ci_if_error: false
-
golangci-lint:
name: Golang-CI (lint)
runs-on: ubuntu-latest
@@ -141,23 +143,5 @@ jobs:
- name: Run linter
uses: golangci/golangci-lint-action@v2 # Action page: <https://github.com/golangci/golangci-lint-action>
with:
- version: v1.35 # without patch version
+ version: v1.34 # without patch version
only-new-issues: false # show only new issues if it's a pull request
-
- image:
- name: Build docker image
- runs-on: ubuntu-20.04
- timeout-minutes: 10
- steps:
- - name: Check out code
- uses: actions/checkout@v2
-
- - name: Build image
- run: docker build -t roadrunner:local -f Dockerfile .
-
- - name: Scan image
- uses: anchore/scan-action@v2 # action page: <https://github.com/anchore/scan-action>
- with:
- image: roadrunner:local
- fail-build: true
- severity-cutoff: low # negligible, low, medium, high or critical
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index b1cd83ae..785f40ad 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -28,7 +28,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v2
with:
- go-version: 1.15.5
+ go-version: 1.15.6
- name: Check out code
uses: actions/checkout@v2
@@ -55,10 +55,10 @@ jobs:
CGO_ENABLED: 0
LDFLAGS: >-
-s
- -X github.com/spiral/roadrunner/cmd/rr/cmd.Version=${{ steps.values.outputs.version }}
- -X github.com/spiral/roadrunner/cmd/rr/cmd.BuildTime=${{ steps.values.outputs.timestamp }}
+ -X github.com/spiral/roadrunner/cmd/cli.Version=${{ steps.values.outputs.version }}
+ -X github.com/spiral/roadrunner/cmd/cli.BuildTime=${{ steps.values.outputs.timestamp }}
run: |
- go build -trimpath -ldflags "$LDFLAGS" -o "./${{ steps.values.outputs.binary-name }}" ./cmd/rr/main.go
+ go build -trimpath -ldflags "$LDFLAGS" -o "./${{ steps.values.outputs.binary-name }}" ./cmd/main.go
stat "./${{ steps.values.outputs.binary-name }}"
- name: Generate distributive directory name
diff --git a/.gitignore b/.gitignore
index 8a2f7d68..78c1ee17 100644..100755
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,20 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
.idea
composer.lock
vendor
@@ -5,4 +22,6 @@ vendor_php
builds/
tests/vendor/
.rr-sample.yaml
-psr-worker.php
+unit_tests
+unit_tests_copied
+dir1 \ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100755
index 00000000..3ba99efc
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,61 @@
+run:
+ skip-files:
+ - plugins/http/tests/http_test.go
+ - plugins/http/tests/plugin_test_old.go
+ - plugins/http/tests/rpc_test_old.go
+ - plugins/http/tests/config_test.go
+ - plugins/static/tests/static_plugin_test.go
+ - plugins/headers/tests/old.go
+linters:
+ disable-all: true
+ enable:
+ - bodyclose
+ - deadcode
+ - depguard
+ - dogsled
+ # - dupl
+ - errcheck
+ - exhaustive
+ # - funlen
+ # - gochecknoinits
+ # - goconst
+ - gocritic
+ - gocyclo
+ - gofmt
+ - goimports
+ - golint
+ # - gomnd
+ - goprintffuncname
+ - gosec
+ # - gosimple
+ - govet
+ - ineffassign
+ - interfacer
+ # - lll
+ - misspell
+ - nakedret
+ # - noctx
+ - nolintlint
+ - rowserrcheck
+ - scopelint
+ - staticcheck
+ - structcheck
+ - stylecheck
+ - unconvert
+ # - unparam
+ # - unused
+ - varcheck
+ - whitespace
+
+ # don't enable:
+ # - typecheck
+ # - gochecknoglobals
+ # - gocognit
+ # - godot
+ # - godox
+ # - goerr113
+ # - maligned
+ # - nestif
+ # - prealloc
+ # - testpackage
+ # - wsl
diff --git a/.rr.yaml b/.rr.yaml
index 7fa191a2..18087c2d 100644..100755
--- a/.rr.yaml
+++ b/.rr.yaml
@@ -1,201 +1,147 @@
-# defines environment variables for all underlying php processes
-env:
- key: value
-
-# rpc bus allows php application and external clients to talk to rr services.
rpc:
- # enable rpc server
- enable: true
-
- # rpc connection DSN. Supported TCP and Unix sockets.
listen: tcp://127.0.0.1:6001
+server:
+ command: "php tests/psr-worker-bench.php"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ "RR_RPC": "tcp://127.0.0.1:6001"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+logs:
+ mode: development
+ level: error
+
+http:
+ address: 127.0.0.1:44933
+ max_request_size: 1024
+ middleware: ["gzip", "headers"]
+ uploads:
+ forbid: [".php", ".exe", ".bat"]
+ trusted_subnets:
+ [
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ ]
+ pool:
+ num_workers: 6
+ max_jobs: 0
+ allocate_timeout: 60s
+ destroy_timeout: 60s
+ supervisor:
+ # WatchTick defines how often to check the state of worker (seconds)
+ watch_tick: 10
+ # TTL defines maximum time worker is allowed to live (seconds)
+ ttl: 10
+ # IdleTTL defines maximum duration worker can spend in idle mode. Disabled when 0 (seconds)
+ idle_ttl: 10
+ # ExecTTL defines maximum lifetime per job (seconds)
+ exec_ttl: 10
+ # MaxWorkerMemory limits memory per worker (MB)
+ max_worker_memory: 100
+
+ ssl:
+ port: 8892
+ redirect: false
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+ # rootCa: root.crt
+ fcgi:
+ address: tcp://0.0.0.0:7921
+ http2:
+ enabled: false
+ h2c: false
+ max_concurrent_streams: 128
+
+redis:
+ # UniversalClient is an abstract client which - based on the provided options -
+ # can connect to either clusters, or sentinel-backed failover instances
+ # or simple single-instance servers. This can be useful for testing
+ # cluster-specific applications locally.
+ # if the number of addrs is 1 and master_name is empty, a single-node redis Client will be returned
+
+ # if the number of Addrs is two or more, a ClusterClient will be returned
+ addrs:
+ - "localhost:6379"
+ # if a MasterName is passed a sentinel-backed FailoverClient will be returned
+ master_name: ""
+ username: ""
+ password: ""
+ db: 0
+ sentinel_password: ""
+ route_by_latency: false
+ route_randomly: false
+ dial_timeout: 0 # accepted values [1s, 5m, 3h]
+ max_retries: 1
+ min_retry_backoff: 0 # accepted values [1s, 5m, 3h]
+ max_retry_backoff: 0 # accepted values [1s, 5m, 3h]
+ pool_size: 0
+ min_idle_conns: 0
+ max_conn_age: 0 # accepted values [1s, 5m, 3h]
+ read_timeout: 0 # accepted values [1s, 5m, 3h]
+ write_timeout: 0 # accepted values [1s, 5m, 3h]
+ pool_timeout: 0 # accepted values [1s, 5m, 3h]
+ idle_timeout: 0 # accepted values [1s, 5m, 3h]
+ idle_check_freq: 0 # accepted values [1s, 5m, 3h]
+ read_only: false
+
+# boltdb simple driver
+boltdb:
+ dir: "."
+ file: "rr"
+ bucket: "test"
+ permissions: 0777
+ # keys ttl check interval
+ TTL: 60 # seconds
+
+ # memcached driver
+memcached:
+ addr:
+ - "localhost:11211"
+
+# in memory KV driver
+memory:
+ enabled: true
+ # keys ttl check interval
+ interval: 60
+
metrics:
# prometheus client address (path /metrics added automatically)
address: localhost:2112
-
- # list of metrics to collect from application
collect:
- # metric name
app_metric:
- # type [gauge, counter, histogram, summary]
type: histogram
-
- # short description
help: "Custom application metric"
-
- # metric groups/tags
- labels: [ "type" ]
-
- # for histogram only
- buckets: [ 0.1, 0.2, 0.3, 1.0 ]
-
+ labels: ["type"]
+ buckets: [0.1, 0.2, 0.3, 1.0]
# objectives defines the quantile rank estimates with their respective
# absolute error [ for summary only ]
objectives:
- 1.4: 2.3
- 2.0: 1.4
-
-# http service configuration.
-http:
- # http host to listen.
- address: 0.0.0.0:8080
- # override http error code for the application errors (default 500)
- appErrorCode: 505
- # override http error code for the internal RR errors (default 500)
- internalErrorCode: 505
-
- ssl:
- # custom https port (default 443)
- port: 443
- # force redirect to https connection
- redirect: true
- # ssl cert
- cert: server.crt
- # ssl private key
- key: server.key
- # rootCA certificate
- rootCa: root.crt
-
-
- # HTTP service provides FastCGI as frontend
- fcgi:
- # FastCGI connection DSN. Supported TCP and Unix sockets.
- address: tcp://0.0.0.0:6920
-
- # HTTP service provides HTTP2 transport
- http2:
- # enable HTTP/2, only with TSL
- enabled: true
-
- # enable H2C on TCP connections
- h2c: true
-
- # max transfer channels
- maxConcurrentStreams: 128
-
- # max POST request size, including file uploads in MB.
- maxRequestSize: 200
-
- # file upload configuration.
- uploads:
- # list of file extensions which are forbidden for uploading.
- forbid: [ ".php", ".exe", ".bat" ]
-
- # cidr blocks which can set ip using X-Real-Ip or X-Forwarded-For
- trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
-
- # http worker pool configuration.
- workers:
- # php worker command.
- command: "php psr-worker.php pipes"
-
- # connection method (pipes, tcp://:9000, unix://socket.unix). default "pipes"
- relay: "pipes"
-
- # user under which process will be started
- user: ""
-
- # worker pool configuration.
- pool:
- # number of workers to be serving.
- numWorkers: 4
-
- # maximum jobs per worker, 0 - unlimited.
- maxJobs: 0
-
- # for how long worker is allowed to be bootstrapped.
- allocateTimeout: 60
-
- # amount of time given to worker to gracefully destruct itself.
- destroyTimeout: 60
-
-# Additional HTTP headers and CORS control.
-headers:
- # Middleware to handle CORS requests, https://www.w3.org/TR/cors/
- cors:
- # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin
- allowedOrigin: "*"
-
- # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers
- allowedHeaders: "*"
-
- # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods
- allowedMethods: "GET,POST,PUT,DELETE"
-
- # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials
- allowCredentials: true
-
- # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers
- exposedHeaders: "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma"
-
- # Max allowed age in seconds
- maxAge: 600
-
- # Automatically add headers to every request passed to PHP.
- request:
- "Example-Request-Header": "Value"
-
- # Automatically add headers to every response.
- response:
- "X-Powered-By": "RoadRunner"
-
-# monitors rr server(s)
-limit:
- # check worker state each second
- interval: 1
-
- # custom watch configuration for each service
- services:
- # monitor http workers
- http:
- # maximum allowed memory consumption per worker (soft)
- maxMemory: 100
-
- # maximum time to live for the worker (soft)
- TTL: 0
-
- # maximum allowed amount of time worker can spend in idle before being removed (for weak db connections, soft)
- idleTTL: 0
-
- # max_execution_time (brutal)
- execTTL: 60
-
-# static file serving. remove this section to disable static file serving.
-static:
- # root directory for static file (http would not serve .php and .htaccess files).
- dir: "public"
-
- # list of extensions for forbid for serving.
- forbid: [ ".php", ".htaccess" ]
-
- # Automatically add headers to every request.
- request:
- "Example-Request-Header": "Value"
-
- # Automatically add headers to every response.
- response:
- "X-Powered-By": "RoadRunner"
-
-# health service configuration
-health:
- # http host to serve health requests.
- address: localhost:2113
-
-# reload can reset rr servers when files change
reload:
- # refresh internval (default 1s)
+ # sync interval
interval: 1s
-
- # file extensions to watch, defaults to [.php]
- patterns: [ ".php" ]
-
- # list of services to watch
+ # global patterns to sync
+ patterns: [".go"]
+ # list of included for sync services
services:
http:
- # list of dirs, "" root
- dirs: [ "" ]
-
- # include sub directories
- recursive: true \ No newline at end of file
+ # recursive search for file patterns to add
+ recursive: true
+ # ignored folders
+ ignore: ["vendor"]
+ # service specific file pattens to sync
+ patterns: [".php", ".go", ".md"]
+ # directories to sync. If recursive is set to true,
+ # recursive sync will be applied only to the directories in `dirs` section
+ dirs: ["."]
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 00000000..f43ef860
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,25 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Launch test file",
+ "type": "go",
+ "request": "launch",
+ "mode": "test",
+ "program": "${file}"
+ },
+ {
+ "name": "Launch main debug, race",
+ "type": "go",
+ "request": "launch",
+ "mode": "auto",
+ "showLog": true,
+ "buildFlags": "-race",
+ "args": ["serve", "-c", "../.rr.yaml"],
+ "program": "${workspaceFolder}/cmd/main.go"
+ }
+ ]
+} \ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000..78560788
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,15 @@
+{
+ "workbench.editor.enablePreview": false,
+ "go.testFlags": ["-v", "-tags=debug", "-race"],
+ "go.lintTool": "golangci-lint",
+ "go.lintFlags": [
+ "--fast"
+ ],
+ "cSpell.words": [
+ "asdf",
+ "bbolt",
+ "gofiber",
+ "stopc",
+ "treshholdc"
+ ]
+} \ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 28063ba5..a3f4a6cd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,12 +1,6 @@
CHANGELOG
=========
-v1.9.1 (21.12.2020)
-----------
-- Add `rr --version` flag support (thanks @tarampampam)
-- Update `reload` plugin, remove unnecessary goroutines and locks.
-- Add objectives to the metrics summary.
-- Automated releases build.
v1.9.0 (02.12.2020)
-------------------
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index ae0b283a..ae0b283a 100644..100755
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
diff --git a/Dockerfile b/Dockerfile
index 3c9ce76a..ed143753 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# Image page: <https://hub.docker.com/_/golang>
-FROM golang:1.15.5 as builder
+FROM golang:1.15.6 as builder
# app version and build date must be passed during image building (version without any prefix).
# e.g.: `docker build --build-arg "APP_VERSION=1.2.3" --build-arg "BUILD_TIME=$(date +%FT%T%z)" .`
@@ -26,7 +26,7 @@ RUN set -x \
COPY . .
# compile binary file
-RUN CGO_ENABLED=0 go build -trimpath -ldflags "$LDFLAGS" -o ./rr ./cmd/rr/main.go
+RUN CGO_ENABLED=0 go build -trimpath -ldflags "$LDFLAGS" -o ./rr ./cmd/main.go
# Image page: <https://hub.docker.com/_/alpine>
FROM alpine:3.12
diff --git a/LICENSE b/LICENSE
index d968467f..d968467f 100644..100755
--- a/LICENSE
+++ b/LICENSE
diff --git a/Makefile b/Makefile
index 9ad158ba..665c2143 100755
--- a/Makefile
+++ b/Makefile
@@ -12,7 +12,7 @@ help: ## Show this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {printf " \033[32m%-14s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
build: ## Build RR binary file for local os/arch
- CGO_ENABLED=0 go build -trimpath -ldflags "-s" -o ./rr ./cmd/rr/main.go
+ CGO_ENABLED=0 go build -trimpath -ldflags "-s" -o ./rr ./cmd/main.go
clean: ## Make some clean
rm ./rr
@@ -24,21 +24,43 @@ uninstall: ## Uninstall locally installed RR
rm -f /usr/local/bin/rr
test: ## Run application tests
- test -d ./vendor_php || composer update --prefer-dist --ansi
- go test -v -race -cover
- go test -v -race -cover ./util
- go test -v -race -cover ./service
- go test -v -race -cover ./service/env
- go test -v -race -cover ./service/rpc
- go test -v -race -cover ./service/http
- go test -v -race -cover ./service/static
- go test -v -race -cover ./service/limit
- go test -v -race -cover ./service/headers
- go test -v -race -cover ./service/metrics
- go test -v -race -cover ./service/health
- go test -v -race -cover ./service/gzip
- go test -v -race -cover ./service/reload
+ go clean -testcache
+ docker-compose -f tests/docker-compose.yaml up -d
+ go test -v -race -cover -tags=debug -covermode=atomic ./utils
+ go test -v -race -cover -tags=debug -covermode=atomic ./pkg/pipe
+ go test -v -race -cover -tags=debug -covermode=atomic ./pkg/pool
+ go test -v -race -cover -tags=debug -covermode=atomic ./pkg/socket
+ go test -v -race -cover -tags=debug -covermode=atomic ./pkg/worker
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/http
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/informer
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/reload
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/server
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/checker
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/config
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/gzip
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/headers
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/logger
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/metrics
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/redis
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/resetter
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/rpc
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/static
+ go test -v -race -cover -tags=debug -covermode=atomic ./plugins/kv/boltdb
+ go test -v -race -cover -tags=debug -covermode=atomic ./plugins/kv/memory
+ go test -v -race -cover -tags=debug -covermode=atomic ./plugins/kv/memcached
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/kv/boltdb
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/kv/memory
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/kv/memcached
+ docker-compose -f tests/docker-compose.yaml down
lint: ## Run application linters
- go fmt ./...
- golint ./...
+ golangci-lint run
+kv:
+ docker-compose -f tests/docker-compose.yaml up -d
+ go test -v -race -cover -tags=debug -covermode=atomic ./plugins/kv/boltdb
+ go test -v -race -cover -tags=debug -covermode=atomic ./plugins/kv/memory
+ go test -v -race -cover -tags=debug -covermode=atomic ./plugins/kv/memcached
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/kv/boltdb
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/kv/memory
+ go test -v -race -cover -tags=debug -covermode=atomic ./tests/plugins/kv/memcached
+ docker-compose -f tests/docker-compose.yaml down
diff --git a/README.md b/README.md
index c8776329..73a98693 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,5 @@
+[WIP]
+
<p align="center">
<img src="https://user-images.githubusercontent.com/796136/50286124-6f7f3780-046f-11e9-9f45-e8fedd4f786d.png" height="75px" alt="RoadRunner">
</p>
@@ -14,7 +16,7 @@
</p>
RoadRunner is an open-source (MIT licensed) high-performance PHP application server, load balancer, and process manager.
-It supports running as a service with the ability to extend its functionality on a per-project basis.
+It supports running as a service with the ability to extend its functionality on a per-project basis.
RoadRunner includes PSR-7/PSR-17 compatible HTTP and HTTP/2 server and can be used to replace classic Nginx+FPM setup with much greater performance and flexibility.
@@ -29,7 +31,7 @@ Features:
- PCI DSS compliant
- PSR-7 HTTP server (file uploads, error handling, static files, hot reload, middlewares, event listeners)
- HTTPS and HTTP/2 support (including HTTP/2 Push, H2C)
-- Fully customizable server, FastCGI support
+- A Fully customizable server, FastCGI support
- Flexible environment configuration
- No external PHP dependencies (64bit version required), drop-in (based on [Goridge](https://github.com/spiral/goridge))
- Load balancer, process manager and task pipeline
@@ -39,11 +41,11 @@ Features:
- Automatic worker replacement and safe PHP process destruction
- Worker create/allocate/destroy timeouts
- Max jobs per worker
-- Worker lifecycle management (controller)
+- Worker lifecycle management (controller)
- maxMemory (graceful stop)
- TTL (graceful stop)
- idleTTL (graceful stop)
- - execTTL (brute, max_execution_time)
+ - execTTL (brute, max_execution_time)
- Payload context and body
- Protocol, worker and job level error management (including PHP errors)
- Very fast (~250k rpc calls per second on Ryzen 1700X using 16 threads)
@@ -54,48 +56,13 @@ Features:
Installation:
--------
-To install:
```
-$ composer require spiral/roadrunner
-$ ./vendor/bin/rr get-binary
+go get -u github.com/spiral/roadrunner/v2
```
> For getting roadrunner binary file you can use our docker image: `spiralscout/roadrunner:X.X.X` (more information about image and tags can be found [here](https://hub.docker.com/r/spiralscout/roadrunner/))
-Extensions:
---------
-| Extension | Current Status
-| --- | ---
-spiral/jobs | [![Latest Stable Version](https://poser.pugx.org/spiral/jobs/version)](https://packagist.org/packages/spiral/jobs) [![Build Status](https://travis-ci.org/spiral/jobs.svg?branch=master)](https://travis-ci.org/spiral/jobs) [![Codecov](https://codecov.io/gh/spiral/jobs/branch/master/graph/badge.svg)](https://codecov.io/gh/spiral/jobs/)
-spiral/php-grpc | [![Latest Stable Version](https://poser.pugx.org/spiral/php-grpc/version)](https://packagist.org/packages/spiral/php-grpc) [![Build Status](https://travis-ci.org/spiral/php-grpc.svg?branch=master)](https://travis-ci.org/spiral/php-grpc) [![Codecov](https://codecov.io/gh/spiral/php-grpc/branch/master/graph/badge.svg)](https://codecov.io/gh/spiral/php-grpc/)
-spiral/broadcast | [![Latest Stable Version](https://poser.pugx.org/spiral/broadcast/version)](https://packagist.org/packages/spiral/broadcast) [![Build Status](https://travis-ci.org/spiral/broadcast.svg?branch=master)](https://travis-ci.org/spiral/broadcast) [![Codecov](https://codecov.io/gh/spiral/broadcast/branch/master/graph/badge.svg)](https://codecov.io/gh/spiral/broadcast/)
-spiral/broadcast-ws | [![Latest Stable Version](https://poser.pugx.org/spiral/broadcast-ws/version)](https://packagist.org/packages/spiral/broadcast-ws) [![Build Status](https://travis-ci.org/spiral/broadcast-ws.svg?branch=master)](https://travis-ci.org/spiral/broadcast-ws) [![Codecov](https://codecov.io/gh/spiral/broadcast-ws/branch/master/graph/badge.svg)](https://codecov.io/gh/spiral/broadcast-ws/)
-
-Example:
---------
-
-```php
-<?php
-// worker.php
-ini_set('display_errors', 'stderr');
-include "vendor/autoload.php";
-
-$relay = new Spiral\Goridge\StreamRelay(STDIN, STDOUT);
-$psr7 = new Spiral\RoadRunner\PSR7Client(new Spiral\RoadRunner\Worker($relay));
-
-while ($req = $psr7->acceptRequest()) {
- try {
- $resp = new \Zend\Diactoros\Response();
- $resp->getBody()->write("hello world");
-
- $psr7->respond($resp);
- } catch (\Throwable $e) {
- $psr7->getWorker()->error((string)$e);
- }
-}
-```
-
Configuration can be located in `.rr.yaml` file ([full sample](https://github.com/spiral/roadrunner/blob/master/.rr.yaml)):
```yaml
@@ -116,4 +83,4 @@ $ ./rr serve -v -d
License:
--------
-The MIT License (MIT). Please see [`LICENSE`](./LICENSE) for more information. Maintained by [Spiral Scout](https://spiralscout.com).
+The MIT License (MIT). Please see [`LICENSE`](./LICENSE) for more information. Maintained by [Spiral Scout](https://spiralscout.com). \ No newline at end of file
diff --git a/bin/rr b/bin/rr
deleted file mode 100755
index d7d0b4f2..00000000
--- a/bin/rr
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/env php
-<?php
-/**
- * RoadRunner
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * This file responsive for cli commands
- */
-declare(strict_types=1);
-
-foreach ([
- __DIR__ . '/../../../autoload.php',
- __DIR__ . '/../vendor/autoload.php',
- __DIR__ . '/vendor/autoload.php',
- __DIR__ . '/../vendor_php/autoload.php'
- ] as $file) {
- if (file_exists($file)) {
- define('RR_COMPOSER_INSTALL', $file);
-
- break;
- }
-}
-
-unset($file);
-
-if (!defined('RR_COMPOSER_INSTALL')) {
- fwrite(
- STDERR,
- 'You need to set up the project dependencies using Composer:' . PHP_EOL . PHP_EOL .
- ' composer install' . PHP_EOL . PHP_EOL .
- 'You can learn all about Composer on https://getcomposer.org/.' . PHP_EOL
- );
-
- die(1);
-}
-
-if (RRHelper::getOSType() !== 'linux' && !class_exists('ZipArchive')) {
- fwrite(STDERR, 'Extension `php-zip` is required.' . PHP_EOL);
- die(1);
-}
-
-if (!function_exists('curl_init')) {
- fwrite(STDERR, 'Extension `php-curl` is required.' . PHP_EOL);
- die(1);
-}
-
-require RR_COMPOSER_INSTALL;
-
-use Symfony\Component\Console\Application;
-use Symfony\Component\Console\Helper\ProgressBar;
-use Symfony\Component\Console\Input\InputArgument;
-use Symfony\Component\Console\Input\InputInterface;
-use Symfony\Component\Console\Output\OutputInterface;
-use Symfony\Component\Console\Question\ConfirmationQuestion;
-use function Couchbase\defaultDecoder;
-
-class RRHelper
-{
- /**
- * @var string
- */
- public const SELF_PACKAGE_NAME = 'spiral/roadrunner';
-
- /**
- * Returns version of RoadRunner based on current package version.
- *
- * @return string Version of RoadRunner (eg.: `1.8.0`)
- */
- public static function getVersion(): string
- {
- $version = \PackageVersions\Versions::getVersion(self::SELF_PACKAGE_NAME);
-
- if (\is_int($delimiter_position = \mb_strpos($version, '@'))) {
- $version = \mb_substr($version, 0, (int) $delimiter_position);
- }
-
- return \ltrim($version, 'vV');
- }
-
- /**
- * Returns OS Type for filename
- *
- * @return string OS Type
- */
- public static function getOSType(): string
- {
- switch (PHP_OS) {
- case 'Darwin':
- return 'darwin';
- case 'Linux':
- return 'linux';
- case 'FreeBSD':
- return 'freebsd';
- case 'WIN32':
- case 'WINNT':
- case 'Windows':
- return 'windows';
- default:
- return 'linux';
- }
- }
-
- /**
- * @return string
- * @throws Exception
- */
- public static function getSignature(): string
- {
- return 'roadrunner-' . self::getVersion() . '-' . self::getOSType() . '-amd64';
- }
-
- /**
- * Returns generated URL to zip file on GitHub with binary file
- *
- * @return string URL
- * @throws Exception
- */
- public static function getBinaryDownloadUrl(): string
- {
- $ext = '.zip';
- if (self::getOSType() == 'linux') {
- $ext = '.tar.gz';
- }
-
- return 'https://github.com/spiral/roadrunner/releases/download/v'
- . static::getVersion() . '/' . self::getSignature()
- . $ext;
- }
-
- /**
- * Extracts the roadrunner RR binary into given location.
- *
- * @param string $archive
- * @param string $target
- * @throws Exception
- */
- public static function extractBinary(string $archive, string $target)
- {
- if (self::getOSType() !== 'linux') {
- self::extractZIP($archive, $target);
- } else {
- self::extractTAR($archive, $target);
- }
- }
-
- /**
- * @param string $archive
- * @param string $target
- * @throws Exception
- */
- protected static function extractZIP(string $archive, string $target)
- {
- $zip = new ZipArchive();
- $zip->open($archive);
-
- $name = self::getSignature() . '/rr';
- if (self::getOSType() == 'windows') {
- $name .= '.exe';
- }
-
- $stream = $zip->getStream($name);
- if (!is_resource($stream)) {
- return;
- }
-
- $to = fopen($target, 'w');
- stream_copy_to_stream($stream, $to);
- fclose($to);
-
- $zip->close();
- }
-
- /**
- * @param string $archive
- * @param string $target
- * @throws Exception
- */
- protected static function extractTAR(string $archive, string $target)
- {
- $arch = new PharData($archive);
- $arch->extractTo('./', self::getSignature() . '/rr');
-
- copy('./' . self::getSignature() . '/rr', $target);
- unlink('./' . self::getSignature() . '/rr');
- rmdir('./' . self::getSignature());
- }
-}
-
-(new Application('RoadRunner', RRHelper::getVersion()))
- ->register('get-binary')
- ->setDescription("Install or update RoadRunner binaries in specified folder (current folder by default)")
- ->addOption('location', 'l', InputArgument::OPTIONAL, 'destination folder', '.')
- ->setCode(function (InputInterface $input, OutputInterface $output) {
- $output->writeln('<info>Updating binary file of RoadRunner</info>');
-
- $finalFile = $input->getOption('location') . DIRECTORY_SEPARATOR . 'rr';
- if (RRHelper::getOSType() == 'windows') {
- $finalFile .= '.exe';
- }
-
- if (is_file($finalFile)) {
- $version = RRHelper::getVersion();
-
- $previousVersion = preg_match(
- '#Version:.+(\d+\.\d+\.\d+)#',
- (string)shell_exec($finalFile),
- $matches
- ) ? $matches[1] : "";
-
- $output->writeln('<error>RoadRunner binary file already exists!</error>');
- $helper = $this->getHelper('question');
-
- if (version_compare($previousVersion, $version) === 0) {
- $output->writeln(sprintf('<info>Current version: %s</info>', $previousVersion));
- $question = new ConfirmationQuestion(
- sprintf('Skip update to the same version: %s ? [Y/n]', $version)
- );
- if ($helper->ask($input, $output, $question)) {
- return;
- }
- } else {
- $question = new ConfirmationQuestion('Do you want overwrite it? [Y/n]');
- if (!$helper->ask($input, $output, $question)) {
- return;
- }
- }
- }
-
- $output->writeln('<info>Downloading RoadRunner archive for <fg=cyan>' . ucfirst(RRHelper::getOSType()) . '</fg=cyan></info>');
-
- $progressBar = new ProgressBar($output);
- $progressBar->setFormat('verbose');
-
- $zipFileName = 'rr_zip_'.random_int(0, 10000);
- if (RRHelper::getOSType() == 'linux') {
- $zipFileName .= '.tar.gz';
- }
-
- $zipFile = fopen($zipFileName, "w+");
- $curlResource = curl_init();
-
- curl_setopt($curlResource, CURLOPT_URL, RRHelper::getBinaryDownloadUrl());
- curl_setopt($curlResource, CURLOPT_RETURNTRANSFER, true);
- curl_setopt($curlResource, CURLOPT_BINARYTRANSFER, true);
- curl_setopt($curlResource, CURLOPT_SSL_VERIFYPEER, false);
- curl_setopt($curlResource, CURLOPT_FOLLOWLOCATION, true);
- curl_setopt($curlResource, CURLOPT_FILE, $zipFile);
- curl_setopt($curlResource, CURLOPT_PROGRESSFUNCTION,
- function ($resource, $download_size, $downloaded, $upload_size, $uploaded) use (&$progressBar, $output) {
- if ($download_size == 0) {
- return;
- }
-
- if ($progressBar->getStartTime() === 0) {
- $progressBar->start();
- }
-
- if ($progressBar->getMaxSteps() != $download_size) {
- /**
- * Workaround for symfony < 4.1.x, for example PHP 7.0 will use 3.x
- * feature #26449 Make ProgressBar::setMaxSteps public (ostrolucky)
- */
- $progressBar = new ProgressBar($output, $download_size);
- }
-
- $progressBar->setFormat('[%bar%] %percent:3s%% %elapsed:6s%/%estimated:-6s% ' . intval($download_size / 1024) . 'KB');
- $progressBar->setProgress($downloaded);
- });
- curl_setopt($curlResource, CURLOPT_NOPROGRESS, false); // needed to make progress function work
- curl_setopt($curlResource, CURLOPT_HEADER, 0);
- curl_exec($curlResource);
- curl_close($curlResource);
- fclose($zipFile);
-
- $progressBar->finish();
- $output->writeln("");
-
- $output->writeln('<info>Unpacking <comment>' . basename(RRHelper::getBinaryDownloadUrl()) . '</comment></info>');
-
- RRHelper::extractBinary($zipFileName, $finalFile);
- unlink($zipFileName);
-
- if (!file_exists($finalFile) || filesize($finalFile) === 0) {
- throw new Exception('Unable to extract the file.');
- }
-
- chmod($finalFile, 0755);
- $output->writeln('<info>Binary file updated!</info>');
- })
- ->getApplication()
- ->register("init-config")
- ->setDescription("Inits default .rr.yaml config in specified folder (current folder by default)")
- ->addOption('location', 'l', InputArgument::OPTIONAL, 'destination folder', '.')
- ->setCode(function (InputInterface $input, OutputInterface $output) {
- if (is_file($input->getOption('location') . DIRECTORY_SEPARATOR . '.rr.yaml')) {
- $output->writeln('<error>Config file already exists!</error>');
- $helper = $this->getHelper('question');
- $question = new ConfirmationQuestion('Do you want overwrite it? [Y/n] ');
-
- if (!$helper->ask($input, $output, $question)) {
- return;
- }
- }
-
- copy(
- __DIR__ . DIRECTORY_SEPARATOR . '..' . DIRECTORY_SEPARATOR . '.rr.yaml',
- $input->getOption('location') . DIRECTORY_SEPARATOR . '.rr.yaml'
- );
- $output->writeln('<info>Config file created!</info>');
- })
- ->getApplication()
- ->run();
diff --git a/bors.toml b/bors.toml
index 2da84249..e35b52a3 100644..100755
--- a/bors.toml
+++ b/bors.toml
@@ -1,18 +1,16 @@
status = [
- 'Build (PHP 7.2, basic setup)',
- 'Build (PHP 7.2, lowest setup)',
- 'Build (PHP 7.3, basic setup)',
- 'Build (PHP 7.3, lowest setup)',
- 'Build (PHP 8.0, basic setup)',
- 'Build (PHP 8.0, lowest setup)',
- 'Build (Go 1.14, PHP 7.2)',
- 'Build (Go 1.15, PHP 7.2)',
- 'Build (Go 1.14, PHP 7.3)',
- 'Build (Go 1.15, PHP 7.3)',
- 'Build (Go 1.14, PHP 7.4)',
- 'Build (Go 1.15, PHP 7.4)',
- 'Build (Go 1.14, PHP 8.0)',
- 'Build (Go 1.15, PHP 8.0)',
+ 'Build (Go 1.14, PHP 7.4, OS ubuntu-latest)',
+ 'Build (Go 1.14, PHP 7.4, OS windows-latest)',
+ 'Build (Go 1.14, PHP 7.4, OS macos-latest)',
+ 'Build (Go 1.15, PHP 7.4, OS ubuntu-latest)',
+ 'Build (Go 1.15, PHP 7.4, OS windows-latest)',
+ 'Build (Go 1.15, PHP 7.4, OS macos-latest)',
+ 'Build (Go 1.14, PHP 8.0, OS ubuntu-latest)',
+ 'Build (Go 1.14, PHP 8.0, OS windows-latest)',
+ 'Build (Go 1.14, PHP 8.0, OS macos-latest)',
+ 'Build (Go 1.15, PHP 8.0, OS ubuntu-latest)',
+ 'Build (Go 1.15, PHP 8.0, OS windows-latest)',
+ 'Build (Go 1.15, PHP 8.0, OS macos-latest)',
'Golang-CI (lint)',
'Build docker image',
]
diff --git a/cmd/cli/reset.go b/cmd/cli/reset.go
new file mode 100644
index 00000000..504d88ad
--- /dev/null
+++ b/cmd/cli/reset.go
@@ -0,0 +1,107 @@
+package cli
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/fatih/color"
+ "github.com/mattn/go-runewidth"
+ "github.com/spf13/cobra"
+ "github.com/spiral/errors"
+ "github.com/vbauerster/mpb/v5"
+ "github.com/vbauerster/mpb/v5/decor"
+)
+
+// List is the resetter.List RPC method
+const List string = "resetter.List"
+
+// Reset is the resetter.Reset RPC method
+const Reset string = "resetter.Reset"
+
+func init() {
+ root.AddCommand(&cobra.Command{
+ Use: "reset",
+ Short: "Reset workers of all or specific RoadRunner service",
+ RunE: resetHandler,
+ })
+}
+
+func resetHandler(cmd *cobra.Command, args []string) error {
+ const op = errors.Op("reset handler")
+ client, err := RPCClient()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = client.Close()
+ }()
+
+ var services []string
+ if len(args) != 0 {
+ services = args
+ } else {
+ err = client.Call(List, true, &services)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ }
+
+ var wg sync.WaitGroup
+ pr := mpb.New(mpb.WithWaitGroup(&wg), mpb.WithWidth(6))
+ wg.Add(len(services))
+
+ for _, service := range services {
+ var (
+ bar *mpb.Bar
+ name = runewidth.FillRight(fmt.Sprintf("Resetting plugin: [%s]", color.HiYellowString(service)), 27)
+ result = make(chan interface{})
+ )
+
+ bar = pr.AddSpinner(
+ 1,
+ mpb.SpinnerOnMiddle,
+ mpb.SpinnerStyle([]string{"∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"}),
+ mpb.PrependDecorators(decor.Name(name)),
+ mpb.AppendDecorators(onComplete(result)),
+ )
+
+ // simulating some work
+ go func(service string, result chan interface{}) {
+ defer wg.Done()
+ defer bar.Increment()
+
+ var done bool
+ err = client.Call(Reset, service, &done)
+ if err != nil {
+ result <- errors.E(op, err)
+ return
+ }
+ result <- nil
+ }(service, result)
+ }
+
+ pr.Wait()
+ return nil
+}
+
+func onComplete(result chan interface{}) decor.Decorator {
+ var (
+ msg = ""
+ fn = func(s decor.Statistics) string {
+ select {
+ case r := <-result:
+ if err, ok := r.(error); ok {
+ msg = color.HiRedString(err.Error())
+ return msg
+ }
+
+ msg = color.HiGreenString("done")
+ return msg
+ default:
+ return msg
+ }
+ }
+ )
+
+ return decor.Any(fn)
+}
diff --git a/cmd/cli/root.go b/cmd/cli/root.go
new file mode 100644
index 00000000..06a84a82
--- /dev/null
+++ b/cmd/cli/root.go
@@ -0,0 +1,101 @@
+package cli
+
+import (
+ "log"
+ "net/rpc"
+ "os"
+ "path/filepath"
+
+ "github.com/spiral/errors"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+
+ "github.com/spiral/roadrunner/v2/plugins/config"
+
+ "github.com/spf13/cobra"
+ "github.com/spiral/endure"
+)
+
+var (
+ // WorkDir is working directory
+ WorkDir string
+ // CfgFile is path to the .rr.yaml
+ CfgFile string
+ // Container is the pointer to the Endure container
+ Container *endure.Endure
+ cfg *config.Viper
+ root = &cobra.Command{
+ Use: "rr",
+ SilenceErrors: true,
+ SilenceUsage: true,
+ Version: Version,
+ }
+)
+
+func Execute() {
+ if err := root.Execute(); err != nil {
+ // exit with error, fatal invoke os.Exit(1)
+ log.Fatal(err)
+ }
+}
+
+func init() {
+ root.PersistentFlags().StringVarP(&CfgFile, "config", "c", ".rr.yaml", "config file (default is .rr.yaml)")
+ root.PersistentFlags().StringVarP(&WorkDir, "WorkDir", "w", "", "work directory")
+
+ cobra.OnInitialize(func() {
+ if CfgFile != "" {
+ if absPath, err := filepath.Abs(CfgFile); err == nil {
+ CfgFile = absPath
+
+ // force working absPath related to config file
+ if err := os.Chdir(filepath.Dir(absPath)); err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ if WorkDir != "" {
+ if err := os.Chdir(WorkDir); err != nil {
+ panic(err)
+ }
+ }
+
+ cfg = &config.Viper{}
+ cfg.Path = CfgFile
+ cfg.Prefix = "rr"
+
+ // register config
+ err := Container.Register(cfg)
+ if err != nil {
+ panic(err)
+ }
+ })
+}
+
+// RPCClient is using to make a requests to the ./rr reset, ./rr workers
+func RPCClient() (*rpc.Client, error) {
+ rpcConfig := &rpcPlugin.Config{}
+
+ err := cfg.Init()
+ if err != nil {
+ return nil, err
+ }
+
+ if !cfg.Has(rpcPlugin.PluginName) {
+ return nil, errors.E("rpc service disabled")
+ }
+
+ err = cfg.UnmarshalKey(rpcPlugin.PluginName, rpcConfig)
+ if err != nil {
+ return nil, err
+ }
+ rpcConfig.InitDefaults()
+
+ conn, err := rpcConfig.Dialer()
+ if err != nil {
+ return nil, err
+ }
+
+ return rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)), nil
+}
diff --git a/cmd/cli/serve.go b/cmd/cli/serve.go
new file mode 100644
index 00000000..2fe54932
--- /dev/null
+++ b/cmd/cli/serve.go
@@ -0,0 +1,63 @@
+package cli
+
+import (
+ "log"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/spf13/cobra"
+ "github.com/spiral/errors"
+ "go.uber.org/multierr"
+)
+
+func init() {
+ root.AddCommand(&cobra.Command{
+ Use: "serve",
+ Short: "Start RoadRunner server",
+ RunE: handler,
+ })
+}
+
+func handler(cmd *cobra.Command, args []string) error {
+ const op = errors.Op("handle serve command")
+ /*
+ We need to have path to the config at the RegisterTarget stage
+ But after cobra.Execute, because cobra fills up cli variables on this stage
+ */
+
+ err := Container.Init()
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ errCh, err := Container.Serve()
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // https://golang.org/pkg/os/signal/#Notify
+ // should be of buffer size at least 1
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
+
+ for {
+ select {
+ case e := <-errCh:
+ err = multierr.Append(err, e.Error)
+ log.Printf("error occurred: %v, service: %s", e.Error.Error(), e.VertexID)
+ er := Container.Stop()
+ if er != nil {
+ err = multierr.Append(err, er)
+ return errors.E(op, err)
+ }
+ return errors.E(op, err)
+ case <-c:
+ err = Container.Stop()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ }
+ }
+}
diff --git a/cmd/rr/cmd/version.go b/cmd/cli/version.go
index a550c682..89728bd2 100644
--- a/cmd/rr/cmd/version.go
+++ b/cmd/cli/version.go
@@ -1,9 +1,9 @@
-package cmd
+package cli
var (
// Version - defines build version.
- Version = "local"
+ Version string = "local"
// BuildTime - defined build time.
- BuildTime = "development"
+ BuildTime string = "development"
)
diff --git a/cmd/cli/workers.go b/cmd/cli/workers.go
new file mode 100644
index 00000000..03639aa4
--- /dev/null
+++ b/cmd/cli/workers.go
@@ -0,0 +1,110 @@
+package cli
+
+import (
+ "fmt"
+ "log"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ tm "github.com/buger/goterm"
+ "github.com/fatih/color"
+ "github.com/spf13/cobra"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/informer"
+ "github.com/spiral/roadrunner/v2/tools"
+)
+
+var (
+ interactive bool
+)
+
+const InformerList string = "informer.List"
+const InformerWorkers string = "informer.Workers"
+
+func init() {
+ workersCommand := &cobra.Command{
+ Use: "workers",
+ Short: "Show information about active roadrunner workers",
+ RunE: workersHandler,
+ }
+
+ workersCommand.Flags().BoolVarP(
+ &interactive,
+ "interactive",
+ "i",
+ false,
+ "render interactive workers table",
+ )
+
+ root.AddCommand(workersCommand)
+}
+
+func workersHandler(cmd *cobra.Command, args []string) error {
+ const op = errors.Op("workers cobra handler")
+ // get RPC client
+ client, err := RPCClient()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ err := client.Close()
+ if err != nil {
+ log.Printf("error when closing RPCClient: error %v", err)
+ }
+ }()
+
+ var plugins []string
+ // assume user wants to show workers from particular plugin
+ if len(args) != 0 {
+ plugins = args
+ } else {
+ err = client.Call(InformerList, true, &plugins)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ }
+
+ if !interactive {
+ return showWorkers(plugins, client)
+ }
+
+ // https://golang.org/pkg/os/signal/#Notify
+ // should be of buffer size at least 1
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
+
+ tm.Clear()
+ tt := time.NewTicker(time.Second)
+ defer tt.Stop()
+ for {
+ select {
+ case <-c:
+ return nil
+ case <-tt.C:
+ tm.MoveCursor(1, 1)
+ err := showWorkers(plugins, client)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ tm.Flush()
+ }
+ }
+}
+
+func showWorkers(plugins []string, client *rpc.Client) error {
+ const op = errors.Op("show workers")
+ for _, plugin := range plugins {
+ list := &informer.WorkerList{}
+ err := client.Call(InformerWorkers, plugin, &list)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ fmt.Printf("Workers of [%s]:\n", color.HiYellowString(plugin))
+ tools.WorkerTable(os.Stdout, list.Workers).Render()
+ }
+ return nil
+}
diff --git a/cmd/main.go b/cmd/main.go
new file mode 100644
index 00000000..56422e82
--- /dev/null
+++ b/cmd/main.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "log"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/cmd/cli"
+ "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/informer"
+
+ "github.com/spiral/roadrunner/v2/plugins/kv/memcached"
+ "github.com/spiral/roadrunner/v2/plugins/kv/memory"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/metrics"
+ "github.com/spiral/roadrunner/v2/plugins/redis"
+ "github.com/spiral/roadrunner/v2/plugins/reload"
+ "github.com/spiral/roadrunner/v2/plugins/resetter"
+ "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+)
+
+func main() {
+ var err error
+ cli.Container, err = endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel), endure.RetryOnFail(false))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ err = cli.Container.RegisterAll(
+ // logger plugin
+ &logger.ZapLogger{},
+ // metrics plugin
+ &metrics.Plugin{},
+ // redis plugin (internal)
+ &redis.Plugin{},
+ // http server plugin
+ &http.Plugin{},
+ // reload plugin
+ &reload.Plugin{},
+ // informer plugin (./rr workers, ./rr workers -i)
+ &informer.Plugin{},
+ // resetter plugin (./rr reset)
+ &resetter.Plugin{},
+ // rpc plugin (workers, reset)
+ &rpc.Plugin{},
+ // server plugin (NewWorker, NewWorkerPool)
+ &server.Plugin{},
+ // memcached kv plugin
+ &memcached.Plugin{},
+ // in-memory kv plugin
+ &memory.Plugin{},
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ cli.Execute()
+}
diff --git a/cmd/rr/LICENSE b/cmd/rr/LICENSE
deleted file mode 100644
index efb98c87..00000000
--- a/cmd/rr/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2018 SpiralScout
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE. \ No newline at end of file
diff --git a/cmd/rr/cmd/root.go b/cmd/rr/cmd/root.go
deleted file mode 100644
index 13d74d25..00000000
--- a/cmd/rr/cmd/root.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package cmd
-
-import (
- "log"
- "net/http"
- "net/http/pprof"
- "os"
-
- "github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
- "github.com/spiral/roadrunner/cmd/util"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/limit"
-)
-
-// Services bus for all the commands.
-var (
- cfgFile, workDir, logFormat string
- override []string
- mergeJson string
-
- // Verbose enables verbosity mode (container specific).
- Verbose bool
-
- // Debug enables debug mode (service specific).
- Debug bool
-
- // Logger - shared logger.
- Logger = logrus.New()
-
- // Container - shared service bus.
- Container = service.NewContainer(Logger)
-
- // CLI is application endpoint.
- CLI = &cobra.Command{
- Use: "rr",
- SilenceErrors: true,
- SilenceUsage: true,
- Version: Version, // allows to use `--version` flag
- Short: util.Sprintf(
- "<green>RoadRunner</reset>, PHP Application Server\nVersion: <yellow+hb>%s</reset>, %s",
- Version,
- BuildTime,
- ),
- }
-)
-
-// Execute adds all child commands to the CLI command and sets flags appropriately.
-// This is called by main.main(). It only needs to happen once to the CLI.
-func Execute() {
- if err := CLI.Execute(); err != nil {
- util.ExitWithError(err)
- }
-}
-
-func init() {
- CLI.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
- CLI.PersistentFlags().BoolVarP(&Debug, "debug", "d", false, "debug mode")
- CLI.PersistentFlags().StringVarP(&logFormat, "logFormat", "l", "color", "select log formatter (color, json, plain)")
- CLI.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file (default is .rr.yaml)")
- CLI.PersistentFlags().StringVarP(&workDir, "workDir", "w", "", "work directory")
- CLI.PersistentFlags().StringVarP(&mergeJson, "jsonConfig", "j", "", "merge json configuration")
-
- CLI.PersistentFlags().StringArrayVarP(
- &override,
- "override",
- "o",
- nil,
- "override config value (dot.notation=value)",
- )
-
- cobra.OnInitialize(func() {
- if Verbose {
- Logger.SetLevel(logrus.DebugLevel)
- }
-
- configureLogger(logFormat)
-
- cfg, err := util.LoadConfig(cfgFile, []string{"."}, ".rr", override, mergeJson)
- if err != nil {
- Logger.Warnf("config: %s", err)
- return
- }
-
- if workDir != "" {
- if err := os.Chdir(workDir); err != nil {
- util.ExitWithError(err)
- }
- }
-
- if err := Container.Init(cfg); err != nil {
- util.ExitWithError(err)
- }
-
- // global watcher config
- if Verbose {
- wcv, _ := Container.Get(limit.ID)
- if wcv, ok := wcv.(*limit.Service); ok {
- wcv.AddListener(func(event int, ctx interface{}) {
- util.LogEvent(Logger, event, ctx)
- })
- }
- }
-
- // if debug --> also run pprof service
- if Debug {
- go runDebugServer()
- }
- })
-}
-func runDebugServer() {
- mux := http.NewServeMux()
- mux.HandleFunc("/debug/pprof/", pprof.Index)
- mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
- mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
- mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
- mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
- srv := http.Server{
- Addr: ":6061",
- Handler: mux,
- }
-
- if err := srv.ListenAndServe(); err != nil {
- log.Fatal(err)
- }
-}
-
-func configureLogger(format string) {
- util.Colorize = false
- switch format {
- case "color", "default":
- util.Colorize = true
- Logger.Formatter = &logrus.TextFormatter{ForceColors: true}
- case "plain":
- Logger.Formatter = &logrus.TextFormatter{DisableColors: true}
- case "json":
- Logger.Formatter = &logrus.JSONFormatter{}
- }
-}
diff --git a/cmd/rr/cmd/serve.go b/cmd/rr/cmd/serve.go
deleted file mode 100644
index 70682780..00000000
--- a/cmd/rr/cmd/serve.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package cmd
-
-import (
- "os"
- "os/signal"
- "sync"
- "syscall"
-
- "github.com/spf13/cobra"
-)
-
-func init() {
- CLI.AddCommand(&cobra.Command{
- Use: "serve",
- Short: "Serve RoadRunner service(s)",
- RunE: serveHandler,
- })
-}
-
-func serveHandler(cmd *cobra.Command, args []string) error {
- // https://golang.org/pkg/os/signal/#Notify
- // should be of buffer size at least 1
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
-
- wg := &sync.WaitGroup{}
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- // get the signal
- <-c
- Container.Stop()
- }()
-
- // blocking operation
- if err := Container.Serve(); err != nil {
- return err
- }
-
- wg.Wait()
-
- return nil
-}
diff --git a/cmd/rr/cmd/stop.go b/cmd/rr/cmd/stop.go
deleted file mode 100644
index 7b4794e7..00000000
--- a/cmd/rr/cmd/stop.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package cmd
-
-import (
- "github.com/spf13/cobra"
- "github.com/spiral/roadrunner/cmd/util"
-)
-
-func init() {
- CLI.AddCommand(&cobra.Command{
- Use: "stop",
- Short: "Stop RoadRunner server",
- RunE: stopHandler,
- })
-}
-
-func stopHandler(cmd *cobra.Command, args []string) error {
- client, err := util.RPCClient(Container)
- if err != nil {
- return err
- }
-
- util.Printf("<green>Stopping RoadRunner</reset>: ")
-
- var r string
- if err := client.Call("system.Stop", true, &r); err != nil {
- return err
- }
-
- util.Printf("<green+hb>done</reset>\n")
- return client.Close()
-}
diff --git a/cmd/rr/http/debug.go b/cmd/rr/http/debug.go
deleted file mode 100644
index 02023e36..00000000
--- a/cmd/rr/http/debug.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package http
-
-import (
- "fmt"
- "net"
- "net/http"
- "strings"
- "time"
-
- "github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
- "github.com/spiral/roadrunner"
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
- "github.com/spiral/roadrunner/cmd/util"
- rrhttp "github.com/spiral/roadrunner/service/http"
-)
-
-func init() {
- cobra.OnInitialize(func() {
- if rr.Debug {
- svc, _ := rr.Container.Get(rrhttp.ID)
- if svc, ok := svc.(*rrhttp.Service); ok {
- svc.AddListener((&debugger{logger: rr.Logger}).listener)
- }
- }
- })
-}
-
-// listener provide debug callback for system events. With colors!
-type debugger struct{ logger *logrus.Logger }
-
-// listener listens to http events and generates nice looking output.
-func (s *debugger) listener(event int, ctx interface{}) {
- if util.LogEvent(s.logger, event, ctx) {
- // handler by default debug package
- return
- }
-
- // http events
- switch event {
- case rrhttp.EventResponse:
- e := ctx.(*rrhttp.ResponseEvent)
- s.logger.Info(util.Sprintf(
- "<cyan+h>%s</reset> %s %s <white+hb>%s</reset> %s",
- e.Request.RemoteAddr,
- elapsed(e.Elapsed()),
- statusColor(e.Response.Status),
- e.Request.Method,
- e.Request.URI,
- ))
-
- case rrhttp.EventError:
- e := ctx.(*rrhttp.ErrorEvent)
-
- if _, ok := e.Error.(roadrunner.JobError); ok {
- s.logger.Info(util.Sprintf(
- "<cyan+h>%s</reset> %s %s <white+hb>%s</reset> %s",
- addr(e.Request.RemoteAddr),
- elapsed(e.Elapsed()),
- statusColor(500),
- e.Request.Method,
- uri(e.Request),
- ))
- } else {
- s.logger.Info(util.Sprintf(
- "<cyan+h>%s</reset> %s %s <white+hb>%s</reset> %s <red>%s</reset>",
- addr(e.Request.RemoteAddr),
- elapsed(e.Elapsed()),
- statusColor(500),
- e.Request.Method,
- uri(e.Request),
- e.Error,
- ))
- }
- }
-}
-
-func statusColor(status int) string {
- if status < 300 {
- return util.Sprintf("<green>%v</reset>", status)
- }
-
- if status < 400 {
- return util.Sprintf("<cyan>%v</reset>", status)
- }
-
- if status < 500 {
- return util.Sprintf("<yellow>%v</reset>", status)
- }
-
- return util.Sprintf("<red>%v</reset>", status)
-}
-
-func uri(r *http.Request) string {
- if r.TLS != nil {
- return fmt.Sprintf("https://%s%s", r.Host, r.URL.String())
- }
-
- return fmt.Sprintf("http://%s%s", r.Host, r.URL.String())
-}
-
-// fits duration into 5 characters
-func elapsed(d time.Duration) string {
- var v string
- switch {
- case d > 100*time.Second:
- v = fmt.Sprintf("%.1fs", d.Seconds())
- case d > 10*time.Second:
- v = fmt.Sprintf("%.2fs", d.Seconds())
- case d > time.Second:
- v = fmt.Sprintf("%.3fs", d.Seconds())
- case d > 100*time.Millisecond:
- v = fmt.Sprintf("%.0fms", d.Seconds()*1000)
- case d > 10*time.Millisecond:
- v = fmt.Sprintf("%.1fms", d.Seconds()*1000)
- default:
- v = fmt.Sprintf("%.2fms", d.Seconds()*1000)
- }
-
- if d > time.Second {
- return util.Sprintf("<red>{%v}</reset>", v)
- }
-
- if d > time.Millisecond*500 {
- return util.Sprintf("<yellow>{%v}</reset>", v)
- }
-
- return util.Sprintf("<gray+hb>{%v}</reset>", v)
-}
-
-func addr(addr string) string {
- // otherwise, return remote address as is
- if !strings.ContainsRune(addr, ':') {
- return addr
- }
-
- addr, _, _ = net.SplitHostPort(addr)
- return addr
-}
diff --git a/cmd/rr/http/metrics.go b/cmd/rr/http/metrics.go
deleted file mode 100644
index 6aad560e..00000000
--- a/cmd/rr/http/metrics.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package http
-
-import (
- "strconv"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/spf13/cobra"
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
- rrhttp "github.com/spiral/roadrunner/service/http"
- "github.com/spiral/roadrunner/service/metrics"
- "github.com/spiral/roadrunner/util"
-)
-
-func init() {
- cobra.OnInitialize(func() {
- svc, _ := rr.Container.Get(metrics.ID)
- mtr, ok := svc.(*metrics.Service)
- if !ok || !mtr.Enabled() {
- return
- }
-
- ht, _ := rr.Container.Get(rrhttp.ID)
- if ht, ok := ht.(*rrhttp.Service); ok {
- collector := newCollector()
-
- // register metrics
- mtr.MustRegister(collector.requestCounter)
- mtr.MustRegister(collector.requestDuration)
- mtr.MustRegister(collector.workersMemory)
-
- // collect events
- ht.AddListener(collector.listener)
-
- // update memory usage every 10 seconds
- go collector.collectMemory(ht, time.Second*10)
- }
- })
-}
-
-// listener provide debug callback for system events. With colors!
-type metricCollector struct {
- requestCounter *prometheus.CounterVec
- requestDuration *prometheus.HistogramVec
- workersMemory prometheus.Gauge
-}
-
-func newCollector() *metricCollector {
- return &metricCollector{
- requestCounter: prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "rr_http_request_total",
- Help: "Total number of handled http requests after server restart.",
- },
- []string{"status"},
- ),
- requestDuration: prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Name: "rr_http_request_duration_seconds",
- Help: "HTTP request duration.",
- },
- []string{"status"},
- ),
- workersMemory: prometheus.NewGauge(
- prometheus.GaugeOpts{
- Name: "rr_http_workers_memory_bytes",
- Help: "Memory usage by HTTP workers.",
- },
- ),
- }
-}
-
-// listener listens to http events and generates nice looking output.
-func (c *metricCollector) listener(event int, ctx interface{}) {
- // http events
- switch event {
- case rrhttp.EventResponse:
- e := ctx.(*rrhttp.ResponseEvent)
-
- c.requestCounter.With(prometheus.Labels{
- "status": strconv.Itoa(e.Response.Status),
- }).Inc()
-
- c.requestDuration.With(prometheus.Labels{
- "status": strconv.Itoa(e.Response.Status),
- }).Observe(e.Elapsed().Seconds())
-
- case rrhttp.EventError:
- e := ctx.(*rrhttp.ErrorEvent)
-
- c.requestCounter.With(prometheus.Labels{
- "status": "500",
- }).Inc()
-
- c.requestDuration.With(prometheus.Labels{
- "status": "500",
- }).Observe(e.Elapsed().Seconds())
- }
-}
-
-// collect memory usage by server workers
-func (c *metricCollector) collectMemory(service *rrhttp.Service, tick time.Duration) {
- started := false
- for {
- server := service.Server()
- if server == nil && started {
- // stopped
- return
- }
-
- started = true
-
- if workers, err := util.ServerState(server); err == nil {
- sum := 0.0
- for _, w := range workers {
- sum = sum + float64(w.MemoryUsage)
- }
-
- c.workersMemory.Set(sum)
- }
-
- time.Sleep(tick)
- }
-}
diff --git a/cmd/rr/http/reset.go b/cmd/rr/http/reset.go
deleted file mode 100644
index 3008848a..00000000
--- a/cmd/rr/http/reset.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package http
-
-import (
- "github.com/spf13/cobra"
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
- "github.com/spiral/roadrunner/cmd/util"
-)
-
-func init() {
- rr.CLI.AddCommand(&cobra.Command{
- Use: "http:reset",
- Short: "Reload RoadRunner worker pool for the HTTP service",
- RunE: reloadHandler,
- })
-}
-
-func reloadHandler(cmd *cobra.Command, args []string) error {
- client, err := util.RPCClient(rr.Container)
- if err != nil {
- return err
- }
- defer client.Close()
-
- util.Printf("<green>Restarting http worker pool</reset>: ")
-
- var r string
- if err := client.Call("http.Reset", true, &r); err != nil {
- return err
- }
-
- util.Printf("<green+hb>done</reset>\n")
- return nil
-}
diff --git a/cmd/rr/http/workers.go b/cmd/rr/http/workers.go
deleted file mode 100644
index be6d4038..00000000
--- a/cmd/rr/http/workers.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package http
-
-import (
- "net/rpc"
- "os"
- "os/signal"
- "syscall"
- "time"
-
- tm "github.com/buger/goterm"
- "github.com/spf13/cobra"
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
- "github.com/spiral/roadrunner/cmd/util"
- "github.com/spiral/roadrunner/service/http"
-)
-
-var (
- interactive bool
- stopSignal = make(chan os.Signal, 1)
-)
-
-func init() {
- workersCommand := &cobra.Command{
- Use: "http:workers",
- Short: "List workers associated with RoadRunner HTTP service",
- RunE: workersHandler,
- }
-
- workersCommand.Flags().BoolVarP(
- &interactive,
- "interactive",
- "i",
- false,
- "render interactive workers table",
- )
-
- rr.CLI.AddCommand(workersCommand)
-
- signal.Notify(stopSignal, syscall.SIGTERM)
- signal.Notify(stopSignal, syscall.SIGINT)
-}
-
-func workersHandler(cmd *cobra.Command, args []string) (err error) {
- defer func() {
- if r, ok := recover().(error); ok {
- err = r
- }
- }()
-
- client, err := util.RPCClient(rr.Container)
- if err != nil {
- return err
- }
- defer client.Close()
-
- if !interactive {
- showWorkers(client)
- return nil
- }
-
- tm.Clear()
- for {
- select {
- case <-stopSignal:
- return nil
- case <-time.NewTicker(time.Millisecond * 500).C:
- tm.MoveCursor(1, 1)
- showWorkers(client)
- tm.Flush()
- }
- }
-}
-
-func showWorkers(client *rpc.Client) {
- var r http.WorkerList
- if err := client.Call("http.Workers", true, &r); err != nil {
- panic(err)
- }
-
- util.WorkerTable(r.Workers).Render()
-}
diff --git a/cmd/rr/limit/debug.go b/cmd/rr/limit/debug.go
deleted file mode 100644
index b9d919dc..00000000
--- a/cmd/rr/limit/debug.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package limit
-
-import (
- "github.com/sirupsen/logrus"
- "github.com/spf13/cobra"
- "github.com/spiral/roadrunner"
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
- "github.com/spiral/roadrunner/cmd/util"
- "github.com/spiral/roadrunner/service/limit"
-)
-
-func init() {
- cobra.OnInitialize(func() {
- if rr.Debug {
- svc, _ := rr.Container.Get(limit.ID)
- if svc, ok := svc.(*limit.Service); ok {
- svc.AddListener((&debugger{logger: rr.Logger}).listener)
- }
- }
- })
-}
-
-// listener provide debug callback for system events. With colors!
-type debugger struct{ logger *logrus.Logger }
-
-// listener listens to http events and generates nice looking output.
-func (s *debugger) listener(event int, ctx interface{}) {
- if util.LogEvent(s.logger, event, ctx) {
- // handler by default debug package
- return
- }
-
- // watchers
- switch event {
- case limit.EventTTL:
- w := ctx.(roadrunner.WorkerError)
- s.logger.Debug(util.Sprintf(
- "<white+hb>worker.%v</reset> <yellow>%s</reset>",
- *w.Worker.Pid,
- w.Caused,
- ))
- return
-
- case limit.EventIdleTTL:
- w := ctx.(roadrunner.WorkerError)
- s.logger.Debug(util.Sprintf(
- "<white+hb>worker.%v</reset> <yellow>%s</reset>",
- *w.Worker.Pid,
- w.Caused,
- ))
- return
-
- case limit.EventMaxMemory:
- w := ctx.(roadrunner.WorkerError)
- s.logger.Error(util.Sprintf(
- "<white+hb>worker.%v</reset> <red>%s</reset>",
- *w.Worker.Pid,
- w.Caused,
- ))
- return
-
- case limit.EventExecTTL:
- w := ctx.(roadrunner.WorkerError)
- s.logger.Error(util.Sprintf(
- "<white+hb>worker.%v</reset> <red>%s</reset>",
- *w.Worker.Pid,
- w.Caused,
- ))
- return
- }
-}
diff --git a/cmd/rr/limit/metrics.go b/cmd/rr/limit/metrics.go
deleted file mode 100644
index 947f53fe..00000000
--- a/cmd/rr/limit/metrics.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package limit
-
-import (
- "github.com/prometheus/client_golang/prometheus"
- "github.com/spf13/cobra"
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
- rrlimit "github.com/spiral/roadrunner/service/limit"
- "github.com/spiral/roadrunner/service/metrics"
-)
-
-func init() {
- cobra.OnInitialize(func() {
- svc, _ := rr.Container.Get(metrics.ID)
- mtr, ok := svc.(*metrics.Service)
- if !ok || !mtr.Enabled() {
- return
- }
-
- ht, _ := rr.Container.Get(rrlimit.ID)
- if ht, ok := ht.(*rrlimit.Service); ok {
- collector := newCollector()
-
- // register metrics
- mtr.MustRegister(collector.maxMemory)
-
- // collect events
- ht.AddListener(collector.listener)
- }
- })
-}
-
-// listener provide debug callback for system events. With colors!
-type metricCollector struct {
- maxMemory prometheus.Counter
- maxExecutionTime prometheus.Counter
-}
-
-func newCollector() *metricCollector {
- return &metricCollector{
- maxMemory: prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "rr_limit_max_memory",
- Help: "Total number of workers that was killed because they reached max memory limit.",
- },
- ),
- maxExecutionTime: prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "rr_limit_max_execution_time",
- Help: "Total number of workers that was killed because they reached max execution time limit.",
- },
- ),
- }
-}
-
-// listener listens to http events and generates nice looking output.
-func (c *metricCollector) listener(event int, ctx interface{}) {
- switch event {
- case rrlimit.EventMaxMemory:
- c.maxMemory.Inc()
- case rrlimit.EventExecTTL:
- c.maxExecutionTime.Inc()
- }
-}
diff --git a/cmd/rr/main.go b/cmd/rr/main.go
deleted file mode 100644
index 54a1f060..00000000
--- a/cmd/rr/main.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package main
-
-import (
- rr "github.com/spiral/roadrunner/cmd/rr/cmd"
-
- // services (plugins)
- "github.com/spiral/roadrunner/service/env"
- "github.com/spiral/roadrunner/service/gzip"
- "github.com/spiral/roadrunner/service/headers"
- "github.com/spiral/roadrunner/service/health"
- "github.com/spiral/roadrunner/service/http"
- "github.com/spiral/roadrunner/service/limit"
- "github.com/spiral/roadrunner/service/metrics"
- "github.com/spiral/roadrunner/service/reload"
- "github.com/spiral/roadrunner/service/rpc"
- "github.com/spiral/roadrunner/service/static"
-
- // additional commands and debug handlers
- _ "github.com/spiral/roadrunner/cmd/rr/http"
- _ "github.com/spiral/roadrunner/cmd/rr/limit"
-)
-
-func main() {
- rr.Container.Register(env.ID, &env.Service{})
- rr.Container.Register(rpc.ID, &rpc.Service{})
- rr.Container.Register(http.ID, &http.Service{})
- rr.Container.Register(metrics.ID, &metrics.Service{})
- rr.Container.Register(headers.ID, &headers.Service{})
- rr.Container.Register(static.ID, &static.Service{})
- rr.Container.Register(limit.ID, &limit.Service{})
- rr.Container.Register(health.ID, &health.Service{})
- rr.Container.Register(gzip.ID, &gzip.Service{})
- rr.Container.Register(reload.ID, &reload.Service{})
-
- // you can register additional commands using cmd.CLI
- rr.Execute()
-}
diff --git a/cmd/util/config.go b/cmd/util/config.go
deleted file mode 100644
index 674260a8..00000000
--- a/cmd/util/config.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package util
-
-import (
- "bytes"
- "fmt"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/spf13/viper"
- "github.com/spiral/roadrunner/service"
-)
-
-// ConfigWrapper provides interface bridge between v configs and service.Config.
-type ConfigWrapper struct {
- v *viper.Viper
-}
-
-// Get nested config section (sub-map), returns nil if section not found.
-func (w *ConfigWrapper) Get(key string) service.Config {
- sub := w.v.Sub(key)
- if sub == nil {
- return nil
- }
-
- return &ConfigWrapper{sub}
-}
-
-// Unmarshal unmarshal config data into given struct.
-func (w *ConfigWrapper) Unmarshal(out interface{}) error {
- return w.v.Unmarshal(out)
-}
-
-// LoadConfig config and merge it's values with set of flags.
-func LoadConfig(cfgFile string, path []string, name string, flags []string, jsonConfig string) (*ConfigWrapper, error) {
- cfg := viper.New()
-
- if cfgFile != "" {
- if absPath, err := filepath.Abs(cfgFile); err == nil {
- cfgFile = absPath
-
- // force working absPath related to config file
- if err := os.Chdir(filepath.Dir(absPath)); err != nil {
- return nil, err
- }
- }
-
- // Use cfg file from the flag.
- cfg.SetConfigFile(cfgFile)
-
- if dir, err := filepath.Abs(cfgFile); err == nil {
- // force working absPath related to config file
- if err := os.Chdir(filepath.Dir(dir)); err != nil {
- return nil, err
- }
- }
- } else {
- // automatic location
- for _, p := range path {
- cfg.AddConfigPath(p)
- }
-
- cfg.SetConfigName(name)
- }
-
- // read in environment variables that match
- cfg.AutomaticEnv()
- cfg.SetEnvPrefix("rr")
- cfg.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
-
- // If a cfg file is found, read it in.
- if err := cfg.ReadInConfig(); err != nil {
- if len(flags) == 0 && jsonConfig == "" {
- return nil, err
- }
- }
-
- // merge included configs
- if include, ok := cfg.Get("include").([]interface{}); ok {
- for _, file := range include {
- filename, ok := file.(string)
- if !ok {
- continue
- }
-
- partial := viper.New()
- partial.AutomaticEnv()
- partial.SetEnvPrefix("rr")
- partial.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
- partial.SetConfigFile(filename)
-
- if err := partial.ReadInConfig(); err != nil {
- return nil, err
- }
-
- // merging
- if err := cfg.MergeConfigMap(partial.AllSettings()); err != nil {
- return nil, err
- }
- }
- }
-
- // automatically inject ENV variables using ${ENV} pattern
- for _, key := range cfg.AllKeys() {
- val := cfg.Get(key)
- cfg.Set(key, parseEnv(val))
- }
-
- // merge with console flags
- if len(flags) != 0 {
- for _, f := range flags {
- k, v, err := parseFlag(f)
- if err != nil {
- return nil, err
- }
-
- cfg.Set(k, v)
- }
- }
-
- if jsonConfig != "" {
- jConfig := viper.New()
- jConfig.AutomaticEnv()
- jConfig.SetEnvPrefix("rr")
- jConfig.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
-
- jConfig.SetConfigType("json")
- if err := jConfig.ReadConfig(bytes.NewBufferString(jsonConfig)); err != nil {
- return nil, err
- }
-
- // merging
- if err := cfg.MergeConfigMap(jConfig.AllSettings()); err != nil {
- return nil, err
- }
- }
-
- merged := viper.New()
-
- // we have to copy all the merged values into new config in order normalize it (viper bug?)
- if err := merged.MergeConfigMap(cfg.AllSettings()); err != nil {
- return nil, err
- }
-
- return &ConfigWrapper{merged}, nil
-}
-
-func parseFlag(flag string) (string, string, error) {
- if !strings.Contains(flag, "=") {
- return "", "", fmt.Errorf("invalid flag `%s`", flag)
- }
-
- parts := strings.SplitN(strings.TrimLeft(flag, " \"'`"), "=", 2)
-
- return strings.Trim(parts[0], " \n\t"), parseValue(strings.Trim(parts[1], " \n\t")), nil
-}
-
-func parseValue(value string) string {
- escape := []rune(value)[0]
-
- if escape == '"' || escape == '\'' || escape == '`' {
- value = strings.Trim(value, string(escape))
- value = strings.Replace(value, fmt.Sprintf("\\%s", string(escape)), string(escape), -1)
- }
-
- return value
-}
-
-func parseEnv(value interface{}) interface{} {
- str, ok := value.(string)
- if !ok || len(str) <= 3 {
- return value
- }
-
- if str[0:2] == "${" && str[len(str)-1:] == "}" {
- if v, ok := os.LookupEnv(str[2 : len(str)-1]); ok {
- return v
- }
- }
-
- return str
-}
diff --git a/cmd/util/cprint.go b/cmd/util/cprint.go
deleted file mode 100644
index 37cb0bc5..00000000
--- a/cmd/util/cprint.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package util
-
-import (
- "fmt"
- "os"
- "regexp"
- "strings"
-
- "github.com/mgutz/ansi"
-)
-
-var (
- reg *regexp.Regexp
-
- // Colorize enables colors support.
- Colorize = true
-)
-
-func init() {
- reg, _ = regexp.Compile(`<([^>]+)>`)
-}
-
-// Printf works identically to fmt.Print but adds `<white+hb>color formatting support for CLI</reset>`.
-func Printf(format string, args ...interface{}) {
- fmt.Print(Sprintf(format, args...))
-}
-
-// Sprintf works identically to fmt.Sprintf but adds `<white+hb>color formatting support for CLI</reset>`.
-func Sprintf(format string, args ...interface{}) string {
- format = reg.ReplaceAllStringFunc(format, func(s string) string {
- if !Colorize {
- return ""
- }
-
- return ansi.ColorCode(strings.Trim(s, "<>/"))
- })
-
- return fmt.Sprintf(format, args...)
-}
-
-// Panicf prints `<white+hb>color formatted message to STDERR</reset>`.
-func Panicf(format string, args ...interface{}) error {
- _, err := fmt.Fprint(os.Stderr, Sprintf(format, args...))
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/cmd/util/debug.go b/cmd/util/debug.go
deleted file mode 100644
index c5cf68bb..00000000
--- a/cmd/util/debug.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package util
-
-import (
- "strings"
-
- "github.com/sirupsen/logrus"
- "github.com/spiral/roadrunner"
-)
-
-// LogEvent outputs rr event into given logger and return false if event was not handled.
-func LogEvent(logger *logrus.Logger, event int, ctx interface{}) bool {
- switch event {
- case roadrunner.EventWorkerKill:
- w := ctx.(*roadrunner.Worker)
- logger.Warning(Sprintf(
- "<white+hb>worker.%v</reset> <yellow>killed</reset>",
- *w.Pid,
- ))
- return true
- case roadrunner.EventWorkerError:
- err := ctx.(roadrunner.WorkerError)
- logger.Error(Sprintf(
- "<white+hb>worker.%v</reset> <red>%s</reset>",
- *err.Worker.Pid,
- err.Caused,
- ))
- return true
- }
-
- // outputs
- switch event {
- case roadrunner.EventStderrOutput:
- for _, line := range strings.Split(string(ctx.([]byte)), "\n") {
- if line == "" {
- continue
- }
-
- logger.Warning(strings.Trim(line, "\r\n"))
- }
-
- return true
- }
-
- // rr server events
- switch event {
- case roadrunner.EventServerFailure:
- logger.Error(Sprintf("<red>server is dead</reset>"))
- return true
- }
-
- // pool events
- switch event {
- case roadrunner.EventPoolConstruct:
- logger.Debug(Sprintf("<cyan>new worker pool</reset>"))
- return true
- case roadrunner.EventPoolError:
- logger.Error(Sprintf("<red>%s</reset>", ctx))
- return true
- }
-
- return false
-}
diff --git a/cmd/util/exit.go b/cmd/util/exit.go
deleted file mode 100644
index 8871a483..00000000
--- a/cmd/util/exit.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package util
-
-import (
- "os"
-)
-
-// ExitWithError prints error and exits with error code`.
-func ExitWithError(err error) {
- errP := Panicf("<red+hb>Error:</reset> <red>%s</reset>\n", err)
- if errP != nil {
- // in case of error during Panicf, print this error via build-int print function
- println("error occurred during fmt.Fprint: " + err.Error())
- }
- os.Exit(1)
-}
diff --git a/cmd/util/rpc.go b/cmd/util/rpc.go
deleted file mode 100644
index cb88943e..00000000
--- a/cmd/util/rpc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package util
-
-import (
- "errors"
- "net/rpc"
-
- "github.com/spiral/roadrunner/service"
- rrpc "github.com/spiral/roadrunner/service/rpc"
-)
-
-// RPCClient returns RPC client associated with given rr service container.
-func RPCClient(container service.Container) (*rpc.Client, error) {
- svc, st := container.Get(rrpc.ID)
- if st < service.StatusOK {
- return nil, errors.New("RPC service is not configured")
- }
-
- return svc.(*rrpc.Service).Client()
-}
diff --git a/codecov.yml b/codecov.yml
index 672717e3..eb499c3a 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -10,3 +10,10 @@ coverage:
target: auto
threshold: 0%
informational: true
+
+# do not include tests folders
+ignore:
+ - "tests"
+ - "plugins/kv/boltdb/plugin_unit_test.go"
+ - "plugins/kv/memcached/plugin_unit_test.go"
+ - "plugins/kv/memory/plugin_unit_test.go" \ No newline at end of file
diff --git a/composer.json b/composer.json
deleted file mode 100644
index f64a9263..00000000
--- a/composer.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "name": "spiral/roadrunner",
- "type": "server",
- "description": "High-performance PHP application server, load-balancer and process manager written in Golang",
- "license": "MIT",
- "authors": [
- {
- "name": "Anton Titov / Wolfy-J",
- "email": "[email protected]"
- },
- {
- "name": "RoadRunner Community",
- "homepage": "https://github.com/spiral/roadrunner/graphs/contributors"
- }
- ],
- "require": {
- "php": "^7.2 || ^8.0",
- "ext-json": "*",
- "ext-curl": "*",
- "spiral/goridge": "^2.4.2",
- "psr/http-factory": "^1.0.1",
- "psr/http-message": "^1.0.1",
- "symfony/console": "^2.5.0 || ^3.0.0 || ^4.0.0 || ^5.0.0",
- "laminas/laminas-diactoros": "^1.3.6 || ^2.0",
- "composer/package-versions-deprecated": "^1.8"
- },
- "config": {
- "vendor-dir": "vendor_php"
- },
- "require-dev": {
- "phpstan/phpstan": "~0.12.34"
- },
- "scripts": {
- "analyze": "phpstan analyze -c ./phpstan.neon.dist --no-progress --ansi"
- },
- "autoload": {
- "psr-4": {
- "Spiral\\RoadRunner\\": "src/"
- }
- },
- "bin": [
- "bin/rr"
- ]
-}
diff --git a/config.go b/config.go
deleted file mode 100644
index 1a1d0a0a..00000000
--- a/config.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "runtime"
- "time"
-)
-
-// Config defines basic behaviour of worker creation and handling process.
-type Config struct {
- // Error code for pool errors in the http handler (default 500)
- PoolErrorCode uint64
-
- // NumWorkers defines how many sub-processes can be run at once. This value
- // might be doubled by Swapper while hot-swap.
- NumWorkers int64
-
- // MaxJobs defines how many executions is allowed for the worker until
- // it's destruction. set 1 to create new process for each new task, 0 to let
- // worker handle as many tasks as it can.
- MaxJobs int64
-
- // AllocateTimeout defines for how long pool will be waiting for a worker to
- // be freed to handle the task.
- AllocateTimeout time.Duration
-
- // DestroyTimeout defines for how long pool should be waiting for worker to
- // properly stop, if timeout reached worker will be killed.
- DestroyTimeout time.Duration
-}
-
-// InitDefaults allows to init blank config with pre-defined set of default values.
-func (cfg *Config) InitDefaults() error {
- cfg.AllocateTimeout = time.Minute
- cfg.DestroyTimeout = time.Minute
- cfg.NumWorkers = int64(runtime.NumCPU())
-
- return nil
-}
-
-// Valid returns error if config not valid.
-func (cfg *Config) Valid() error {
- if cfg.NumWorkers == 0 {
- return fmt.Errorf("pool.NumWorkers must be set")
- }
-
- if cfg.AllocateTimeout == 0 {
- return fmt.Errorf("pool.AllocateTimeout must be set")
- }
-
- if cfg.DestroyTimeout == 0 {
- return fmt.Errorf("pool.DestroyTimeout must be set")
- }
-
- return nil
-}
diff --git a/config_test.go b/config_test.go
deleted file mode 100644
index 41e2059d..00000000
--- a/config_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package roadrunner
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_NumWorkers(t *testing.T) {
- cfg := Config{
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second * 10,
- }
- err := cfg.Valid()
-
- assert.NotNil(t, err)
- assert.Equal(t, "pool.NumWorkers must be set", err.Error())
-}
-
-func Test_NumWorkers_Default(t *testing.T) {
- cfg := Config{
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second * 10,
- }
-
- assert.NoError(t, cfg.InitDefaults())
- err := cfg.Valid()
- assert.Nil(t, err)
-}
-
-func Test_AllocateTimeout(t *testing.T) {
- cfg := Config{
- NumWorkers: 10,
- DestroyTimeout: time.Second * 10,
- }
- err := cfg.Valid()
-
- assert.NotNil(t, err)
- assert.Equal(t, "pool.AllocateTimeout must be set", err.Error())
-}
-
-func Test_DestroyTimeout(t *testing.T) {
- cfg := Config{
- NumWorkers: 10,
- AllocateTimeout: time.Second,
- }
- err := cfg.Valid()
-
- assert.NotNil(t, err)
- assert.Equal(t, "pool.DestroyTimeout must be set", err.Error())
-}
diff --git a/controller.go b/controller.go
deleted file mode 100644
index 2079f052..00000000
--- a/controller.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package roadrunner
-
-// Controller observes pool state and decides if any worker must be destroyed.
-type Controller interface {
- // Lock controller on given pool instance.
- Attach(p Pool) Controller
-
- // Detach pool watching.
- Detach()
-}
-
-// Attacher defines the ability to attach rr controller.
-type Attacher interface {
- // Attach attaches controller to the service.
- Attach(c Controller)
-}
diff --git a/controller_test.go b/controller_test.go
deleted file mode 100644
index 75b4d33e..00000000
--- a/controller_test.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "runtime"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-type eWatcher struct {
- p Pool
- onAttach func(p Pool)
- onDetach func(p Pool)
-}
-
-func (w *eWatcher) Attach(p Pool) Controller {
- wp := &eWatcher{p: p, onAttach: w.onAttach, onDetach: w.onDetach}
-
- if wp.onAttach != nil {
- wp.onAttach(p)
- }
-
- return wp
-}
-
-func (w *eWatcher) Detach() {
- if w.onDetach != nil {
- w.onDetach(w.p)
- }
-}
-
-func (w *eWatcher) remove(wr *Worker, err error) {
- w.p.Remove(wr, err)
-}
-
-func Test_WatcherWatch(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- rr.Attach(&eWatcher{})
- assert.NoError(t, rr.Start())
-
- assert.NotNil(t, rr.pController)
- assert.Equal(t, rr.pController.(*eWatcher).p, rr.pool)
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_WatcherReattach(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- rr.Attach(&eWatcher{})
- assert.NoError(t, rr.Start())
-
- assert.NotNil(t, rr.pController)
- assert.Equal(t, rr.pController.(*eWatcher).p, rr.pool)
-
- oldWatcher := rr.pController
-
- assert.NoError(t, rr.Reset())
-
- assert.NotNil(t, rr.pController)
- assert.Equal(t, rr.pController.(*eWatcher).p, rr.pool)
- assert.NotEqual(t, oldWatcher, rr.pController)
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_WatcherAttachDetachSequence(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- var attachedPool Pool
-
- rr.Attach(&eWatcher{
- onAttach: func(p Pool) {
- attachedPool = p
- },
- onDetach: func(p Pool) {
- assert.Equal(t, attachedPool, p)
- },
- })
- assert.NoError(t, rr.Start())
-
- assert.NotNil(t, rr.pController)
- assert.Equal(t, rr.pController.(*eWatcher).p, rr.pool)
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_RemoveWorkerOnAllocation(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php pid pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- rr.Attach(&eWatcher{})
- assert.NoError(t, rr.Start())
-
- wr := rr.Workers()[0]
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
- assert.NoError(t, err)
- assert.Equal(t, fmt.Sprintf("%v", *wr.Pid), res.String())
- lastPid := res.String()
-
- rr.pController.(*eWatcher).remove(wr, nil)
-
- res, err = rr.Exec(&Payload{Body: []byte("hello")})
- assert.NoError(t, err)
- assert.NotEqual(t, lastPid, res.String())
-
- assert.NotEqual(t, StateReady, wr.state.Value())
-
- _, ok := rr.pool.(*StaticPool).remove.Load(wr)
- assert.False(t, ok)
-}
-
-func Test_RemoveWorkerAfterTask(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php slow-pid pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- rr.Attach(&eWatcher{})
- assert.NoError(t, rr.Start())
-
- wr := rr.Workers()[0]
- lastPid := ""
-
- wait := make(chan interface{})
- go func() {
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
- assert.NoError(t, err)
- assert.Equal(t, fmt.Sprintf("%v", *wr.Pid), res.String())
- lastPid = res.String()
-
- close(wait)
- }()
-
- // wait for worker execution to be in progress
- time.Sleep(time.Millisecond * 250)
- rr.pController.(*eWatcher).remove(wr, nil)
-
- <-wait
-
- // must be replaced
- assert.NotEqual(t, lastPid, fmt.Sprintf("%v", rr.Workers()[0]))
-
- // must not be registered within the pool
- rr.pController.(*eWatcher).remove(wr, nil)
-}
diff --git a/dput.cf b/dput.cf
new file mode 100644
index 00000000..d784a825
--- /dev/null
+++ b/dput.cf
@@ -0,0 +1,5 @@
+[roadrunner]
+fqdn = ppa.launchpad.net
+method = ftp
+incoming = 48d90782/ubuntu/roadrunner
+login = anonymous
diff --git a/error_buffer.go b/error_buffer.go
deleted file mode 100644
index 0fc020c7..00000000
--- a/error_buffer.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package roadrunner
-
-import (
- "sync"
- "time"
-)
-
-const (
- // EventStderrOutput - is triggered when worker sends data into stderr. The context
- // is error message ([]byte).
- EventStderrOutput = 1900
-
- // WaitDuration - for how long error buffer should attempt to aggregate error messages
- // before merging output together since lastError update (required to keep error update together).
- WaitDuration = 100 * time.Millisecond
-)
-
-// thread safe errBuffer
-type errBuffer struct {
- mu sync.Mutex
- buf []byte
- last int
- wait *time.Timer
- update chan interface{}
- stop chan interface{}
- lsn func(event int, ctx interface{})
-}
-
-func newErrBuffer() *errBuffer {
- eb := &errBuffer{
- buf: make([]byte, 0),
- update: make(chan interface{}),
- wait: time.NewTimer(WaitDuration),
- stop: make(chan interface{}),
- }
-
- go func() {
- for {
- select {
- case <-eb.update:
- eb.wait.Reset(WaitDuration)
- case <-eb.wait.C:
- eb.mu.Lock()
- if len(eb.buf) > eb.last {
- if eb.lsn != nil {
- eb.lsn(EventStderrOutput, eb.buf[eb.last:])
- eb.buf = eb.buf[0:0]
- }
-
- eb.last = len(eb.buf)
- }
- eb.mu.Unlock()
- case <-eb.stop:
- eb.wait.Stop()
-
- eb.mu.Lock()
- if len(eb.buf) > eb.last {
- if eb.lsn != nil {
- eb.lsn(EventStderrOutput, eb.buf[eb.last:])
- }
-
- eb.last = len(eb.buf)
- }
- eb.mu.Unlock()
- return
- }
- }
- }()
-
- return eb
-}
-
-// Listen attaches error stream even listener.
-func (eb *errBuffer) Listen(l func(event int, ctx interface{})) {
- eb.mu.Lock()
- eb.lsn = l
- eb.mu.Unlock()
-}
-
-// Len returns the number of buf of the unread portion of the errBuffer;
-// buf.Len() == len(buf.Bytes()).
-func (eb *errBuffer) Len() int {
- eb.mu.Lock()
- defer eb.mu.Unlock()
-
- // currently active message
- return len(eb.buf)
-}
-
-// Write appends the contents of pool to the errBuffer, growing the errBuffer as
-// needed. The return value n is the length of pool; err is always nil.
-func (eb *errBuffer) Write(p []byte) (int, error) {
- eb.mu.Lock()
- eb.buf = append(eb.buf, p...)
- eb.mu.Unlock()
- eb.update <- nil
-
- return len(p), nil
-}
-
-// Strings fetches all errBuffer data into string.
-func (eb *errBuffer) String() string {
- eb.mu.Lock()
- defer eb.mu.Unlock()
-
- return string(eb.buf)
-}
-
-// Close aggregation timer.
-func (eb *errBuffer) Close() error {
- close(eb.stop)
- return nil
-}
diff --git a/error_buffer_test.go b/error_buffer_test.go
deleted file mode 100644
index c112159f..00000000
--- a/error_buffer_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package roadrunner
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestErrBuffer_Write_Len(t *testing.T) {
- buf := newErrBuffer()
- defer func() {
- err := buf.Close()
- if err != nil {
- t.Errorf("error during closing the buffer: error %v", err)
- }
- }()
-
- _, err := buf.Write([]byte("hello"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
- assert.Equal(t, 5, buf.Len())
- assert.Equal(t, "hello", buf.String())
-}
-
-func TestErrBuffer_Write_Event(t *testing.T) {
- buf := newErrBuffer()
- defer func() {
- err := buf.Close()
- if err != nil {
- t.Errorf("error during closing the buffer: error %v", err)
- }
- }()
-
- tr := make(chan interface{})
- buf.Listen(func(event int, ctx interface{}) {
- assert.Equal(t, EventStderrOutput, event)
- assert.Equal(t, []byte("hello\n"), ctx)
- close(tr)
- })
-
- _, err := buf.Write([]byte("hello\n"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
- <-tr
-
- // messages are read
- assert.Equal(t, 0, buf.Len())
-}
-
-func TestErrBuffer_Write_Event_Separated(t *testing.T) {
- buf := newErrBuffer()
- defer func() {
- err := buf.Close()
- if err != nil {
- t.Errorf("error during closing the buffer: error %v", err)
- }
- }()
-
- tr := make(chan interface{})
- buf.Listen(func(event int, ctx interface{}) {
- assert.Equal(t, EventStderrOutput, event)
- assert.Equal(t, []byte("hello\nending"), ctx)
- close(tr)
- })
-
- _, err := buf.Write([]byte("hel"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- _, err = buf.Write([]byte("lo\n"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- _, err = buf.Write([]byte("ending"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- <-tr
- assert.Equal(t, 0, buf.Len())
- assert.Equal(t, "", buf.String())
-}
-
-func TestErrBuffer_Write_Event_Separated_NoListener(t *testing.T) {
- buf := newErrBuffer()
- defer func() {
- err := buf.Close()
- if err != nil {
- t.Errorf("error during closing the buffer: error %v", err)
- }
- }()
-
- _, err := buf.Write([]byte("hel"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- _, err = buf.Write([]byte("lo\n"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- _, err = buf.Write([]byte("ending"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- assert.Equal(t, 12, buf.Len())
- assert.Equal(t, "hello\nending", buf.String())
-}
-
-func TestErrBuffer_Write_Remaining(t *testing.T) {
- buf := newErrBuffer()
- defer func() {
- err := buf.Close()
- if err != nil {
- t.Errorf("error during closing the buffer: error %v", err)
- }
- }()
-
- _, err := buf.Write([]byte("hel"))
- if err != nil {
- t.Errorf("fail to write: error %v", err)
- }
-
- assert.Equal(t, 3, buf.Len())
- assert.Equal(t, "hel", buf.String())
-}
diff --git a/errors.go b/errors.go
deleted file mode 100644
index db995721..00000000
--- a/errors.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package roadrunner
-
-// JobError is job level error (no worker halt), wraps at top
-// of error context
-type JobError []byte
-
-// Error converts error context to string
-func (je JobError) Error() string {
- return string(je)
-}
-
-// WorkerError is worker related error
-type WorkerError struct {
- // Worker
- Worker *Worker
-
- // Caused error
- Caused error
-}
-
-// Error converts error context to string
-func (e WorkerError) Error() string {
- return e.Caused.Error()
-}
diff --git a/errors_test.go b/errors_test.go
deleted file mode 100644
index 75a86840..00000000
--- a/errors_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package roadrunner
-
-import (
- "errors"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_JobError_Error(t *testing.T) {
- e := JobError([]byte("error"))
- assert.Equal(t, "error", e.Error())
-}
-
-func Test_WorkerError_Error(t *testing.T) {
- e := WorkerError{Worker: nil, Caused: errors.New("error")}
- assert.Equal(t, "error", e.Error())
-}
diff --git a/factory.go b/factory.go
deleted file mode 100644
index 3c304824..00000000
--- a/factory.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package roadrunner
-
-import "os/exec"
-
-// Factory is responsible of wrapping given command into tasks worker.
-type Factory interface {
- // SpawnWorker creates new worker process based on given command.
- // Process must not be started.
- SpawnWorker(cmd *exec.Cmd) (w *Worker, err error)
-
- // Close the factory and underlying connections.
- Close() error
-}
diff --git a/go.mod b/go.mod
index ccaff45e..92c9953f 100644..100755
--- a/go.mod
+++ b/go.mod
@@ -1,28 +1,38 @@
-module github.com/spiral/roadrunner
+module github.com/spiral/roadrunner/v2
go 1.15
require (
github.com/NYTimes/gziphandler v1.1.1
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
- github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37
- github.com/cenkalti/backoff/v4 v4.0.0
+ github.com/alicebob/miniredis/v2 v2.14.1
+ github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
+ github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129
github.com/dustin/go-humanize v1.0.0
+ github.com/fatih/color v1.10.0
github.com/go-ole/go-ole v1.2.4 // indirect
+ github.com/go-redis/redis/v8 v8.4.4
+ github.com/gofiber/fiber/v2 v2.3.0
+ github.com/golang/mock v1.4.4
+ github.com/hashicorp/go-multierror v1.1.0
github.com/json-iterator/go v1.1.10
- github.com/mattn/go-colorable v0.1.7 // indirect
- github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d
+ github.com/mattn/go-runewidth v0.0.9
github.com/olekukonko/tablewriter v0.0.4
- github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1
- github.com/shirou/gopsutil v2.20.7+incompatible
- github.com/sirupsen/logrus v1.6.0
- github.com/spf13/cobra v1.0.0
+ github.com/shirou/gopsutil v3.20.11+incompatible
+ github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.1
- github.com/spiral/goridge/v2 v2.4.6
+ github.com/spiral/endure v1.0.0-beta20
+ github.com/spiral/errors v1.0.7
+ github.com/spiral/goridge/v3 v3.0.0-beta8
github.com/stretchr/testify v1.6.1
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
+ github.com/vbauerster/mpb/v5 v5.4.0
github.com/yookoala/gofast v0.4.0
- golang.org/x/net v0.0.0-20200222125558-5a598a2470a0
- golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1
+ go.etcd.io/bbolt v1.3.5
+ go.uber.org/multierr v1.6.0
+ go.uber.org/zap v1.16.0
+ golang.org/x/net v0.0.0-20201216054612-986b41b23924
+ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
+ golang.org/x/sys v0.0.0-20201221093633-bc327ba9c2f0
)
diff --git a/go.sum b/go.sum
index b91bd265..c4f1f187 100644..100755
--- a/go.sum
+++ b/go.sum
@@ -19,10 +19,20 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
+github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
+github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
+github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/miniredis/v2 v2.14.1 h1:GjlbSeoJ24bzdLRs13HoMEeaRZx9kg5nHoRW7QV/nCs=
+github.com/alicebob/miniredis/v2 v2.14.1/go.mod h1:uS970Sw5Gs9/iK3yBg0l9Uj9s25wXxSpQUE9EaJ/Blg=
+github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
+github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -34,15 +44,24 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37 h1:uxxtrnACqI9zK4ENDMf0WpXfUsHP5V8liuq5QdgDISU=
+github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
+github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
github.com/buger/goterm v0.0.0-20181115115552-c206103e1f37/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U=
-github.com/cenkalti/backoff/v4 v4.0.0 h1:6VeaLF9aI+MAUQ95106HwWzYZgJJpZ4stumjj6RFYAU=
+github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4=
+github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U=
github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
+github.com/cenkalti/backoff/v4 v4.1.0 h1:c8LkOFQTzuO0WBM/ae5HdGQuZPfPxp7lqBRwQRm4fSc=
+github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -55,12 +74,30 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
+github.com/docker/docker v20.10.2+incompatible h1:vFgEHPqWBTp4pTjdLwjAA4bSo3gvIGOYwuJTlEjVBCw=
+github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-ini/ini v1.38.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
@@ -70,17 +107,24 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
+github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc=
+github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY=
github.com/go-restit/lzjson v0.0.0-20161206095556-efe3c53acc68/go.mod h1:7vXSKQt83WmbPeyVjCfNT9YDJ5BUFmcwFsEjI9SCvYM=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gofiber/fiber/v2 v2.3.0 h1:82ufvLne0cxzdkDOeLkUmteA+z1uve9JQ/ZFsMOnkzc=
+github.com/gofiber/fiber/v2 v2.3.0/go.mod h1:f8BRRIMjMdRyt2qmJ/0Sea3j3rwwfufPrh9WNBRiVZ0=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -96,8 +140,9 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -115,11 +160,15 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
@@ -134,6 +183,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -147,9 +197,10 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg=
+github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -157,21 +208,23 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@@ -190,9 +243,23 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
+github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U=
+github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -205,8 +272,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
@@ -216,14 +283,14 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@@ -233,9 +300,11 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/shirou/gopsutil v2.20.7+incompatible h1:Ymv4OD12d6zm+2yONe39VSmp2XooJe8za7ngOLW/o/w=
github.com/shirou/gopsutil v2.20.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto=
+github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -253,23 +322,41 @@ github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spiral/goridge/v2 v2.4.6 h1:9u/mrxCtOSy0lnumrpPCSOlGBX/Vprid/hFsnzWrd6k=
+github.com/spiral/endure v1.0.0-beta20 h1:QD3EJ6CRLgeo/6trfnlUcQhH3vrK8Hvf9ceDpde+yss=
+github.com/spiral/endure v1.0.0-beta20/go.mod h1:qCU2/4gAItVESzUK0yPExmUTlTcpRLqJUgcV+nqxn+o=
+github.com/spiral/errors v1.0.4/go.mod h1:SwMSZVdZkkJVgXNNafccqOaxWg0XPzVU/dEdUEInE0o=
+github.com/spiral/errors v1.0.5 h1:TwlR9cZtTgnZrSngcEUpyiMO9yJ45gdQ+XcrCRoCCAM=
+github.com/spiral/errors v1.0.5/go.mod h1:SwMSZVdZkkJVgXNNafccqOaxWg0XPzVU/dEdUEInE0o=
+github.com/spiral/errors v1.0.6 h1:berk5ShEILSw6DplUVv9Ea1wGdk2WlVKQpuvDngll0U=
+github.com/spiral/errors v1.0.6/go.mod h1:SwMSZVdZkkJVgXNNafccqOaxWg0XPzVU/dEdUEInE0o=
+github.com/spiral/errors v1.0.7 h1:GRN7Sjk4yVavD2W+1fUWBjqoivWQsnbsXbX7xyhZhbU=
+github.com/spiral/errors v1.0.7/go.mod h1:SwMSZVdZkkJVgXNNafccqOaxWg0XPzVU/dEdUEInE0o=
+github.com/spiral/goridge v1.0.4 h1:qnYtI84H0tcYjcbFdFl/VUFQZ0YUE9p+VuU8In4kC/8=
+github.com/spiral/goridge v2.1.4+incompatible h1:L15TKrbPEp/G6JfS3jjuvY6whkhfD292XX+1iy9mO2k=
github.com/spiral/goridge/v2 v2.4.6/go.mod h1:mYjL+Ny7nVfLqjRwIYV2pUSQ61eazvVclHII6FfZfYc=
+github.com/spiral/goridge/v3 v3.0.0-beta8 h1:x8uXCdhY49U1LEvmehnTaD2El6J9ZHAefRdh/QIZ6A4=
+github.com/spiral/goridge/v3 v3.0.0-beta8/go.mod h1:XFQGc42KNzo/hPIXPki7mEkFTf9v/T7qFk/TYJjMtzE=
+github.com/spiral/kv v0.0.0-20200601133055-3397db7fc998 h1:TtGWRjpF7iQy1IA7nuJXVfhnJb9m39pf7YuBXNPcKMc=
+github.com/spiral/roadrunner v1.9.1 h1:905qx8bIQN/XBz+ScOqrPeKdqf0lqm9rXwO//b5N4C4=
+github.com/spiral/roadrunner v1.9.1/go.mod h1:Q1al1YGjs7ZHVkAA7+gUKM0rwk6XWG07G0UjyjjuK+0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -277,23 +364,50 @@ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasthttp v1.18.0 h1:IV0DdMlatq9QO1Cr6wGJPVW1sV1Q8HvZXAIcjorylyM=
+github.com/valyala/fasthttp v1.18.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
+github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg=
+github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
+github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
+github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yookoala/gofast v0.4.0 h1:dLBjghcsbbZNOEHN8N1X/gh9S6srmJed4WQfG7DlKwo=
github.com/yookoala/gofast v0.4.0/go.mod h1:rfbkoKaQG1bnuTUZcmV3vAlnfpF4FTq8WbQJf2vcpg8=
+github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
+github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
+go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw=
+go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
+go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -307,6 +421,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@@ -314,8 +429,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -328,9 +443,14 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0 h1:MsuvTghUPjX762sGLnGsxC3HM0B5r83wEtYcYR8/vRs=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201216054612-986b41b23924 h1:QsnDpLLOKwHBBDa8nDws4DYNc/ryVW2vCpxCs09d4PY=
+golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -338,15 +458,19 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -354,18 +478,33 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201210223839-7e3030f88018/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ=
+golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201221093633-bc327ba9c2f0 h1:n+DPcgTwkgWzIFpLmoimYR2K2b0Ga5+Os4kayIN0vGo=
+golang.org/x/sys v0.0.0-20201221093633-bc327ba9c2f0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -376,7 +515,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -386,6 +524,8 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc h1:NCy3Ohtk6Iny5V/reW2Ktypo4zIpWBdRJ1uFMjBxdg8=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -399,6 +539,7 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -408,10 +549,12 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -427,21 +570,28 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
diff --git a/interfaces/events/handler.go b/interfaces/events/handler.go
new file mode 100644
index 00000000..ac6c15a4
--- /dev/null
+++ b/interfaces/events/handler.go
@@ -0,0 +1,14 @@
+package events
+
+// Handler interface
+type Handler interface {
+ // Return number of active listeners
+ NumListeners() int
+ // AddListener adds lister to the publisher
+ AddListener(listener Listener)
+ // Push pushes event to the listeners
+ Push(e interface{})
+}
+
+// Event listener listens for the events produced by worker, worker pool or other service.
+type Listener func(event interface{})
diff --git a/interfaces/events/pool_events.go b/interfaces/events/pool_events.go
new file mode 100644
index 00000000..d1464e1a
--- /dev/null
+++ b/interfaces/events/pool_events.go
@@ -0,0 +1,65 @@
+package events
+
+const (
+ // EventWorkerConstruct thrown when new worker is spawned.
+ EventWorkerConstruct P = iota + 7800
+
+ // EventWorkerDestruct thrown after worker destruction.
+ EventWorkerDestruct
+
+ // EventPoolError caused on pool wide errors.
+ EventPoolError
+
+ // EventSupervisorError triggered when supervisor can not complete work.
+ EventSupervisorError
+
+ // EventNoFreeWorkers triggered when there are no free workers in the stack and timeout for worker allocate elapsed
+ EventNoFreeWorkers
+
+ // EventMaxMemory caused when worker consumes more memory than allowed.
+ EventMaxMemory
+
+ // EventTTL thrown when worker is removed due TTL being reached. Context is rr.WorkerError
+ EventTTL
+
+ // EventIdleTTL triggered when worker spends too much time at rest.
+ EventIdleTTL
+
+ // EventExecTTL triggered when worker spends too much time doing the task (max_execution_time).
+ EventExecTTL
+)
+
+type P int64
+
+func (ev P) String() string {
+ switch ev {
+ case EventWorkerConstruct:
+ return "EventWorkerConstruct"
+ case EventWorkerDestruct:
+ return "EventWorkerDestruct"
+ case EventPoolError:
+ return "EventPoolError"
+ case EventSupervisorError:
+ return "EventSupervisorError"
+ case EventNoFreeWorkers:
+ return "EventNoFreeWorkers"
+ case EventMaxMemory:
+ return "EventMaxMemory"
+ case EventTTL:
+ return "EventTTL"
+ case EventIdleTTL:
+ return "EventIdleTTL"
+ case EventExecTTL:
+ return "EventExecTTL"
+ }
+ return "Unknown event type"
+}
+
+// PoolEvent triggered by pool on different events. Pool as also trigger WorkerEvent in case of log.
+type PoolEvent struct {
+ // Event type, see below.
+ Event P
+
+ // Payload depends on event type, typically it's worker or error.
+ Payload interface{}
+}
diff --git a/interfaces/events/worker_events.go b/interfaces/events/worker_events.go
new file mode 100644
index 00000000..2bff1811
--- /dev/null
+++ b/interfaces/events/worker_events.go
@@ -0,0 +1,33 @@
+package events
+
+const (
+ // EventWorkerError triggered after WorkerProcess. Except payload to be error.
+ EventWorkerError W = iota + 200
+
+ // EventWorkerLog triggered on every write to WorkerProcess StdErr pipe (batched). Except payload to be []byte string.
+ EventWorkerLog
+)
+
+type W int64
+
+func (ev W) String() string {
+ switch ev {
+ case EventWorkerError:
+ return "EventWorkerError"
+ case EventWorkerLog:
+ return "EventWorkerLog"
+ }
+ return "Unknown event type"
+}
+
+// WorkerEvent wraps worker events.
+type WorkerEvent struct {
+ // Event id, see below.
+ Event W
+
+ // Worker triggered the event.
+ Worker interface{}
+
+ // Event specific payload.
+ Payload interface{}
+}
diff --git a/interfaces/pool/pool.go b/interfaces/pool/pool.go
new file mode 100644
index 00000000..97cc945c
--- /dev/null
+++ b/interfaces/pool/pool.go
@@ -0,0 +1,100 @@
+package pool
+
+import (
+ "context"
+ "runtime"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+)
+
+// Pool managed set of inner worker processes.
+type Pool interface {
+ // GetConfig returns pool configuration.
+ GetConfig() interface{}
+
+ // Exec executes task with payload
+ Exec(rqs payload.Payload) (payload.Payload, error)
+
+ // ExecWithContext executes task with context which is used with timeout
+ ExecWithContext(ctx context.Context, rqs payload.Payload) (payload.Payload, error)
+
+ // Workers returns worker list associated with the pool.
+ Workers() (workers []worker.BaseProcess)
+
+ // Remove worker from the pool.
+ RemoveWorker(worker worker.BaseProcess) error
+
+ // Destroy all underlying stack (but let them to complete the task).
+ Destroy(ctx context.Context)
+}
+
+// Configures the pool behaviour.
+type Config struct {
+ // Debug flag creates new fresh worker before every request.
+ Debug bool
+
+ // NumWorkers defines how many sub-processes can be run at once. This value
+ // might be doubled by Swapper while hot-swap. Defaults to number of CPU cores.
+ NumWorkers int64
+
+ // MaxJobs defines how many executions is allowed for the worker until
+ // it's destruction. set 1 to create new process for each new task, 0 to let
+ // worker handle as many tasks as it can.
+ MaxJobs int64
+
+ // AllocateTimeout defines for how long pool will be waiting for a worker to
+ // be freed to handle the task. Defaults to 60s.
+ AllocateTimeout time.Duration
+
+ // DestroyTimeout defines for how long pool should be waiting for worker to
+ // properly destroy, if timeout reached worker will be killed. Defaults to 60s.
+ DestroyTimeout time.Duration
+
+ // Supervision config to limit worker and pool memory usage.
+ Supervisor *SupervisorConfig
+}
+
+// InitDefaults enables default config values.
+func (cfg *Config) InitDefaults() {
+ if cfg.NumWorkers == 0 {
+ cfg.NumWorkers = int64(runtime.NumCPU())
+ }
+
+ if cfg.AllocateTimeout == 0 {
+ cfg.AllocateTimeout = time.Minute
+ }
+
+ if cfg.DestroyTimeout == 0 {
+ cfg.DestroyTimeout = time.Minute
+ }
+ if cfg.Supervisor == nil {
+ return
+ }
+ cfg.Supervisor.InitDefaults()
+}
+
+type SupervisorConfig struct {
+ // WatchTick defines how often to check the state of worker.
+ WatchTick uint64
+
+ // TTL defines maximum time worker is allowed to live.
+ TTL uint64
+
+ // IdleTTL defines maximum duration worker can spend in idle mode. Disabled when 0.
+ IdleTTL uint64
+
+ // ExecTTL defines maximum lifetime per job.
+ ExecTTL uint64
+
+ // MaxWorkerMemory limits memory per worker.
+ MaxWorkerMemory uint64
+}
+
+// InitDefaults enables default config values.
+func (cfg *SupervisorConfig) InitDefaults() {
+ if cfg.WatchTick == 0 {
+ cfg.WatchTick = 1
+ }
+}
diff --git a/interfaces/worker/factory.go b/interfaces/worker/factory.go
new file mode 100644
index 00000000..376303df
--- /dev/null
+++ b/interfaces/worker/factory.go
@@ -0,0 +1,20 @@
+package worker
+
+import (
+ "context"
+ "os/exec"
+
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+)
+
+// Factory is responsible of wrapping given command into tasks WorkerProcess.
+type Factory interface {
+ // SpawnWorkerWithContext creates new WorkerProcess process based on given command with context.
+ // Process must not be started.
+ SpawnWorkerWithTimeout(context.Context, *exec.Cmd, ...events.Listener) (BaseProcess, error)
+ // SpawnWorker creates new WorkerProcess process based on given command.
+ // Process must not be started.
+ SpawnWorker(*exec.Cmd, ...events.Listener) (BaseProcess, error)
+ // Close the factory and underlying connections.
+ Close() error
+}
diff --git a/interfaces/worker/watcher.go b/interfaces/worker/watcher.go
new file mode 100644
index 00000000..ce2c1c5a
--- /dev/null
+++ b/interfaces/worker/watcher.go
@@ -0,0 +1,26 @@
+package worker
+
+import "context"
+
+type Watcher interface {
+ // AddToWatch used to add stack to wait its state
+ AddToWatch(workers []BaseProcess) error
+
+ // GetFreeWorker provide first free worker
+ GetFreeWorker(ctx context.Context) (BaseProcess, error)
+
+ // PutWorker enqueues worker back
+ PushWorker(w BaseProcess)
+
+ // AllocateNew used to allocate new worker and put in into the WorkerWatcher
+ AllocateNew() error
+
+ // Destroy destroys the underlying stack
+ Destroy(ctx context.Context)
+
+ // WorkersList return all stack w/o removing it from internal storage
+ WorkersList() []BaseProcess
+
+ // RemoveWorker remove worker from the stack
+ RemoveWorker(wb BaseProcess) error
+}
diff --git a/interfaces/worker/worker.go b/interfaces/worker/worker.go
new file mode 100644
index 00000000..0ac82158
--- /dev/null
+++ b/interfaces/worker/worker.go
@@ -0,0 +1,59 @@
+package worker
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/spiral/goridge/v3/interfaces/relay"
+ "github.com/spiral/roadrunner/v2/internal"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+)
+
+// Allocator is responsible for worker allocation in the pool
+type Allocator func() (BaseProcess, error)
+
+type BaseProcess interface {
+ fmt.Stringer
+
+ // Pid returns worker pid.
+ Pid() int64
+
+ // Created returns time worker was created at.
+ Created() time.Time
+
+ // State return receive-only WorkerProcess state object, state can be used to safely access
+ // WorkerProcess status, time when status changed and number of WorkerProcess executions.
+ State() internal.State
+
+ // Start used to run Cmd and immediately return
+ Start() error
+
+ // Wait must be called once for each WorkerProcess, call will be released once WorkerProcess is
+ // complete and will return process error (if any), if stderr is presented it's value
+ // will be wrapped as WorkerError. Method will return error code if php process fails
+ // to find or Start the script.
+ Wait() error
+
+ // Stop sends soft termination command to the WorkerProcess and waits for process completion.
+ Stop() error
+
+ // Kill kills underlying process, make sure to call Wait() func to gather
+ // error log from the stderr. Does not waits for process completion!
+ Kill() error
+
+ // Relay returns attached to worker goridge relay
+ Relay() relay.Relay
+
+ // AttachRelay used to attach goridge relay to the worker process
+ AttachRelay(rl relay.Relay)
+}
+
+type SyncWorker interface {
+ // BaseProcess provides basic functionality for the SyncWorker
+ BaseProcess
+ // Exec used to execute payload on the SyncWorker, there is no TIMEOUTS
+ Exec(rqs payload.Payload) (payload.Payload, error)
+ // ExecWithContext used to handle Exec with TTL
+ ExecWithTimeout(ctx context.Context, p payload.Payload) (payload.Payload, error)
+}
diff --git a/internal/protocol.go b/internal/protocol.go
new file mode 100755
index 00000000..a099ce4d
--- /dev/null
+++ b/internal/protocol.go
@@ -0,0 +1,94 @@
+package internal
+
+import (
+ "os"
+
+ j "github.com/json-iterator/go"
+ "github.com/spiral/errors"
+ "github.com/spiral/goridge/v3/interfaces/relay"
+ "github.com/spiral/goridge/v3/pkg/frame"
+)
+
+var json = j.ConfigCompatibleWithStandardLibrary
+
+type StopCommand struct {
+ Stop bool `json:"stop"`
+}
+
+type pidCommand struct {
+ Pid int `json:"pid"`
+}
+
+func SendControl(rl relay.Relay, payload interface{}) error {
+ const op = errors.Op("send control frame")
+ fr := frame.NewFrame()
+ fr.WriteVersion(frame.VERSION_1)
+ fr.WriteFlags(frame.CONTROL)
+
+ if data, ok := payload.([]byte); ok {
+ // check if payload no more that 4Gb
+ if uint32(len(data)) > ^uint32(0) {
+ return errors.E(op, errors.Str("payload is more that 4gb"))
+ }
+
+ fr.WritePayloadLen(uint32(len(data)))
+ fr.WritePayload(data)
+ fr.WriteCRC()
+
+ err := rl.Send(fr)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ }
+
+ data, err := json.Marshal(payload)
+ if err != nil {
+ return errors.E(op, errors.Errorf("invalid payload: %s", err))
+ }
+
+ fr.WritePayloadLen(uint32(len(data)))
+ fr.WritePayload(data)
+ fr.WriteCRC()
+
+ err = rl.Send(fr)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ return nil
+}
+
+func FetchPID(rl relay.Relay) (int64, error) {
+ const op = errors.Op("fetchPID")
+ err := SendControl(rl, pidCommand{Pid: os.Getpid()})
+ if err != nil {
+ return 0, errors.E(op, err)
+ }
+
+ frameR := frame.NewFrame()
+ err = rl.Receive(frameR)
+ if !frameR.VerifyCRC() {
+ return 0, errors.E(op, errors.Str("CRC mismatch"))
+ }
+ if err != nil {
+ return 0, errors.E(op, err)
+ }
+ if frameR == nil {
+ return 0, errors.E(op, errors.Str("nil frame received"))
+ }
+
+ flags := frameR.ReadFlags()
+
+ if flags&(byte(frame.CONTROL)) == 0 {
+ return 0, errors.E(op, errors.Str("unexpected response, header is missing, no CONTROL flag"))
+ }
+
+ link := &pidCommand{}
+ err = json.Unmarshal(frameR.Payload(), link)
+ if err != nil {
+ return 0, errors.E(op, err)
+ }
+
+ return int64(link.Pid), nil
+}
diff --git a/internal/state.go b/internal/state.go
new file mode 100755
index 00000000..8f7d939b
--- /dev/null
+++ b/internal/state.go
@@ -0,0 +1,122 @@
+package internal
+
+import (
+ "fmt"
+ "sync/atomic"
+)
+
+// State represents WorkerProcess status and updated time.
+type State interface {
+ fmt.Stringer
+ // Value returns WorkerState value
+ Value() int64
+ // Set sets the WorkerState
+ Set(value int64)
+ // NumJobs shows how many times WorkerProcess was invoked
+ NumExecs() int64
+ // IsActive returns true if WorkerProcess not Inactive or Stopped
+ IsActive() bool
+ // RegisterExec using to registering php executions
+ RegisterExec()
+ // SetLastUsed sets worker last used time
+ SetLastUsed(lu uint64)
+ // LastUsed return worker last used time
+ LastUsed() uint64
+}
+
+const (
+ // StateInactive - no associated process
+ StateInactive int64 = iota
+
+ // StateReady - ready for job.
+ StateReady
+
+ // StateWorking - working on given payload.
+ StateWorking
+
+ // StateInvalid - indicates that WorkerProcess is being disabled and will be removed.
+ StateInvalid
+
+ // StateStopping - process is being softly stopped.
+ StateStopping
+
+ StateKilling
+
+ // State of worker, when no need to allocate new one
+ StateDestroyed
+
+ // StateStopped - process has been terminated.
+ StateStopped
+
+ // StateErrored - error WorkerState (can't be used).
+ StateErrored
+
+ StateRemove
+)
+
+type WorkerState struct {
+ value int64
+ numExecs int64
+ // to be lightweight, use UnixNano
+ lastUsed uint64
+}
+
+// Thread safe
+func NewWorkerState(value int64) *WorkerState {
+ return &WorkerState{value: value}
+}
+
+// String returns current WorkerState as string.
+func (s *WorkerState) String() string {
+ switch s.Value() {
+ case StateInactive:
+ return "inactive"
+ case StateReady:
+ return "ready"
+ case StateWorking:
+ return "working"
+ case StateInvalid:
+ return "invalid"
+ case StateStopped:
+ return "stopped"
+ case StateErrored:
+ return "errored"
+ }
+
+ return "undefined"
+}
+
+// NumExecs returns number of registered WorkerProcess execs.
+func (s *WorkerState) NumExecs() int64 {
+ return atomic.LoadInt64(&s.numExecs)
+}
+
+// Value WorkerState returns WorkerState value
+func (s *WorkerState) Value() int64 {
+ return atomic.LoadInt64(&s.value)
+}
+
+// IsActive returns true if WorkerProcess not Inactive or Stopped
+func (s *WorkerState) IsActive() bool {
+ val := s.Value()
+ return val == StateWorking || val == StateReady
+}
+
+// change WorkerState value (status)
+func (s *WorkerState) Set(value int64) {
+ atomic.StoreInt64(&s.value, value)
+}
+
+// register new execution atomically
+func (s *WorkerState) RegisterExec() {
+ atomic.AddInt64(&s.numExecs, 1)
+}
+
+// Update last used time
+func (s *WorkerState) SetLastUsed(lu uint64) {
+ atomic.StoreUint64(&s.lastUsed, lu)
+}
+
+func (s *WorkerState) LastUsed() uint64 {
+ return atomic.LoadUint64(&s.lastUsed)
+}
diff --git a/internal/state_test.go b/internal/state_test.go
new file mode 100755
index 00000000..bdb05825
--- /dev/null
+++ b/internal/state_test.go
@@ -0,0 +1,27 @@
+package internal
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_NewState(t *testing.T) {
+ st := NewWorkerState(StateErrored)
+
+ assert.Equal(t, "errored", st.String())
+
+ assert.Equal(t, "inactive", NewWorkerState(StateInactive).String())
+ assert.Equal(t, "ready", NewWorkerState(StateReady).String())
+ assert.Equal(t, "working", NewWorkerState(StateWorking).String())
+ assert.Equal(t, "stopped", NewWorkerState(StateStopped).String())
+ assert.Equal(t, "undefined", NewWorkerState(1000).String())
+}
+
+func Test_IsActive(t *testing.T) {
+ assert.False(t, NewWorkerState(StateInactive).IsActive())
+ assert.True(t, NewWorkerState(StateReady).IsActive())
+ assert.True(t, NewWorkerState(StateWorking).IsActive())
+ assert.False(t, NewWorkerState(StateStopped).IsActive())
+ assert.False(t, NewWorkerState(StateErrored).IsActive())
+}
diff --git a/phpstan.neon.dist b/phpstan.neon.dist
deleted file mode 100644
index a6ef3b8a..00000000
--- a/phpstan.neon.dist
+++ /dev/null
@@ -1,4 +0,0 @@
-parameters:
- level: 'max'
- paths:
- - src
diff --git a/pipe_factory.go b/pipe_factory.go
deleted file mode 100644
index e9e750c4..00000000
--- a/pipe_factory.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "io"
- "os/exec"
-
- "github.com/pkg/errors"
- "github.com/spiral/goridge/v2"
-)
-
-// PipeFactory connects to workers using standard
-// streams (STDIN, STDOUT pipes).
-type PipeFactory struct {
-}
-
-// NewPipeFactory returns new factory instance and starts
-// listening
-func NewPipeFactory() *PipeFactory {
- return &PipeFactory{}
-}
-
-// SpawnWorker creates new worker and connects it to goridge relay,
-// method Wait() must be handled on level above.
-func (f *PipeFactory) SpawnWorker(cmd *exec.Cmd) (w *Worker, err error) {
- if w, err = newWorker(cmd); err != nil {
- return nil, err
- }
-
- var (
- in io.ReadCloser
- out io.WriteCloser
- )
-
- if in, err = cmd.StdoutPipe(); err != nil {
- return nil, err
- }
-
- if out, err = cmd.StdinPipe(); err != nil {
- return nil, err
- }
-
- w.rl = goridge.NewPipeRelay(in, out)
-
- if err := w.start(); err != nil {
- return nil, errors.Wrap(err, "process error")
- }
-
- if pid, err := fetchPID(w.rl); pid != *w.Pid {
- go func(w *Worker) {
- err := w.Kill()
- if err != nil {
- // there is no logger here, how to handle error in goroutines ?
- fmt.Printf("error killing the worker with PID number %d, Created: %s", w.Pid, w.Created)
- }
- }(w)
-
- if wErr := w.Wait(); wErr != nil {
- if _, ok := wErr.(*exec.ExitError); ok {
- // error might be nil here
- if err != nil {
- err = errors.Wrap(wErr, err.Error())
- }
- } else {
- err = wErr
- }
- }
-
- return nil, errors.Wrap(err, "unable to connect to worker")
- }
-
- w.state.set(StateReady)
- return w, nil
-}
-
-// Close the factory.
-func (f *PipeFactory) Close() error {
- return nil
-}
diff --git a/pipe_factory_test.go b/pipe_factory_test.go
deleted file mode 100644
index 378cb637..00000000
--- a/pipe_factory_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package roadrunner
-
-import (
- "os/exec"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Pipe_Start(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- assert.NoError(t, err)
- assert.NotNil(t, w)
-
- go func() {
- assert.NoError(t, w.Wait())
- }()
-
- assert.NoError(t, w.Stop())
-}
-
-func Test_Pipe_StartError(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
- err := cmd.Start()
- if err != nil {
- t.Errorf("error running the command: error %v", err)
- }
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- assert.Error(t, err)
- assert.Nil(t, w)
-}
-
-func Test_Pipe_PipeError(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
- _, err := cmd.StdinPipe()
- if err != nil {
- t.Errorf("error creating the STDIN pipe: error %v", err)
- }
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- assert.Error(t, err)
- assert.Nil(t, w)
-}
-
-func Test_Pipe_PipeError2(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
- _, err := cmd.StdinPipe()
- if err != nil {
- t.Errorf("error creating the STDIN pipe: error %v", err)
- }
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- assert.Error(t, err)
- assert.Nil(t, w)
-}
-
-func Test_Pipe_Failboot(t *testing.T) {
- cmd := exec.Command("php", "tests/failboot.php")
- w, err := NewPipeFactory().SpawnWorker(cmd)
-
- assert.Nil(t, w)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failboot")
-}
-
-func Test_Pipe_Invalid(t *testing.T) {
- cmd := exec.Command("php", "tests/invalid.php")
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- assert.Error(t, err)
- assert.Nil(t, w)
-}
-
-func Test_Pipe_Echo(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_Pipe_Broken(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "broken", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- err := w.Wait()
-
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "undefined_function()")
- }()
- defer func() {
- time.Sleep(time.Second)
- err := w.Stop()
- assert.NoError(t, err)
- }()
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
-
- assert.Error(t, err)
- assert.Nil(t, res)
-}
-
-func Benchmark_Pipe_SpawnWorker_Stop(b *testing.B) {
- f := NewPipeFactory()
- for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
- w, _ := f.SpawnWorker(cmd)
- go func() {
- if w.Wait() != nil {
- b.Fail()
- }
- }()
-
- err := w.Stop()
- if err != nil {
- b.Errorf("error stopping the worker: error %v", err)
- }
- }
-}
-
-func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- err := w.Wait()
- if err != nil {
- b.Errorf("error waiting the worker: error %v", err)
- }
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- b.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- for n := 0; n < b.N; n++ {
- if _, err := w.Exec(&Payload{Body: []byte("hello")}); err != nil {
- b.Fail()
- }
- }
-}
diff --git a/pkg/events/events.go b/pkg/events/events.go
new file mode 100755
index 00000000..226a0c91
--- /dev/null
+++ b/pkg/events/events.go
@@ -0,0 +1,41 @@
+package events
+
+import (
+ "sync"
+
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+)
+
+// HandlerImpl helps to broadcast events to multiple listeners.
+type HandlerImpl struct {
+ listeners []events.Listener
+ sync.RWMutex // all receivers should be pointers
+}
+
+func NewEventsHandler() events.Handler {
+ return &HandlerImpl{listeners: make([]events.Listener, 0, 2)}
+}
+
+// NumListeners returns number of event listeners.
+func (eb *HandlerImpl) NumListeners() int {
+ eb.Lock()
+ defer eb.Unlock()
+ return len(eb.listeners)
+}
+
+// AddListener registers new event listener.
+func (eb *HandlerImpl) AddListener(listener events.Listener) {
+ eb.Lock()
+ defer eb.Unlock()
+ eb.listeners = append(eb.listeners, listener)
+}
+
+// Push broadcast events across all event listeners.
+func (eb *HandlerImpl) Push(e interface{}) {
+ // ReadLock here because we are not changing listeners
+ eb.RLock()
+ defer eb.RUnlock()
+ for k := range eb.listeners {
+ eb.listeners[k](e)
+ }
+}
diff --git a/payload.go b/pkg/payload/payload.go
index 154cec95..fac36852 100644..100755
--- a/payload.go
+++ b/pkg/payload/payload.go
@@ -1,12 +1,12 @@
-package roadrunner
+package payload
-// Payload carries binary header and body to workers and
+// Payload carries binary header and body to stack and
// back to the server.
type Payload struct {
// Context represent payload context, might be omitted.
Context []byte
- // body contains binary payload to be processed by worker.
+ // body contains binary payload to be processed by WorkerProcess.
Body []byte
}
diff --git a/pkg/pipe/pipe_factory.go b/pkg/pipe/pipe_factory.go
new file mode 100755
index 00000000..c36c13e2
--- /dev/null
+++ b/pkg/pipe/pipe_factory.go
@@ -0,0 +1,163 @@
+package pipe
+
+import (
+ "context"
+ "os/exec"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/goridge/v3/pkg/pipe"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+ workerImpl "github.com/spiral/roadrunner/v2/pkg/worker"
+ "go.uber.org/multierr"
+)
+
+// Factory connects to stack using standard
+// streams (STDIN, STDOUT pipes).
+type Factory struct{}
+
+// NewPipeFactory returns new factory instance and starts
+// listening
+func NewPipeFactory() worker.Factory {
+ return &Factory{}
+}
+
+type SpawnResult struct {
+ w worker.BaseProcess
+ err error
+}
+
+// SpawnWorker creates new Process and connects it to goridge relay,
+// method Wait() must be handled on level above.
+func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, listeners ...events.Listener) (worker.BaseProcess, error) {
+ c := make(chan SpawnResult)
+ const op = errors.Op("spawn worker with context")
+ go func() {
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(listeners...))
+ if err != nil {
+ c <- SpawnResult{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ // TODO why out is in?
+ in, err := cmd.StdoutPipe()
+ if err != nil {
+ c <- SpawnResult{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ // TODO why in is out?
+ out, err := cmd.StdinPipe()
+ if err != nil {
+ c <- SpawnResult{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ // Init new PIPE relay
+ relay := pipe.NewPipeRelay(in, out)
+ w.AttachRelay(relay)
+
+ // Start the worker
+ err = w.Start()
+ if err != nil {
+ c <- SpawnResult{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ // errors bundle
+ pid, err := internal.FetchPID(relay)
+ if pid != w.Pid() || err != nil {
+ err = multierr.Combine(
+ err,
+ w.Kill(),
+ w.Wait(),
+ )
+ c <- SpawnResult{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ // everything ok, set ready state
+ w.State().Set(internal.StateReady)
+
+ // return worker
+ c <- SpawnResult{
+ w: w,
+ err: nil,
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case res := <-c:
+ if res.err != nil {
+ return nil, res.err
+ }
+ return res.w, nil
+ }
+}
+
+func (f *Factory) SpawnWorker(cmd *exec.Cmd, listeners ...events.Listener) (worker.BaseProcess, error) {
+ const op = errors.Op("spawn worker")
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(listeners...))
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // TODO why out is in?
+ in, err := cmd.StdoutPipe()
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // TODO why in is out?
+ out, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // Init new PIPE relay
+ relay := pipe.NewPipeRelay(in, out)
+ w.AttachRelay(relay)
+
+ // Start the worker
+ err = w.Start()
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // errors bundle
+ if pid, err := internal.FetchPID(relay); pid != w.Pid() {
+ err = multierr.Combine(
+ err,
+ w.Kill(),
+ w.Wait(),
+ )
+ return nil, errors.E(op, err)
+ }
+
+ // everything ok, set ready state
+ w.State().Set(internal.StateReady)
+ return w, nil
+}
+
+// Close the factory.
+func (f *Factory) Close() error {
+ return nil
+}
diff --git a/pkg/pipe/pipe_factory_test.go b/pkg/pipe/pipe_factory_test.go
new file mode 100755
index 00000000..a2731294
--- /dev/null
+++ b/pkg/pipe/pipe_factory_test.go
@@ -0,0 +1,510 @@
+package pipe
+
+import (
+ "context"
+ "os/exec"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/internal"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ workerImpl "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_GetState(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ go func() {
+ assert.NoError(t, w.Wait())
+ assert.Equal(t, internal.StateStopped, w.State().Value())
+ }()
+
+ assert.NoError(t, err)
+ assert.NotNil(t, w)
+
+ assert.Equal(t, internal.StateReady, w.State().Value())
+ err = w.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+}
+
+func Test_Kill(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ assert.Error(t, w.Wait())
+ assert.Equal(t, internal.StateErrored, w.State().Value())
+ }()
+
+ assert.NoError(t, err)
+ assert.NotNil(t, w)
+
+ assert.Equal(t, internal.StateReady, w.State().Value())
+ err = w.Kill()
+ if err != nil {
+ t.Errorf("error killing the Process: error %v", err)
+ }
+ wg.Wait()
+}
+
+func Test_Pipe_Start(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ assert.NoError(t, err)
+ assert.NotNil(t, w)
+
+ go func() {
+ assert.NoError(t, w.Wait())
+ }()
+
+ assert.NoError(t, w.Stop())
+}
+
+func Test_Pipe_StartError(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ err := cmd.Start()
+ if err != nil {
+ t.Errorf("error running the command: error %v", err)
+ }
+
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ assert.Error(t, err)
+ assert.Nil(t, w)
+}
+
+func Test_Pipe_PipeError(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ _, err := cmd.StdinPipe()
+ if err != nil {
+ t.Errorf("error creating the STDIN pipe: error %v", err)
+ }
+
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ assert.Error(t, err)
+ assert.Nil(t, w)
+}
+
+func Test_Pipe_PipeError2(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ _, err := cmd.StdinPipe()
+ if err != nil {
+ t.Errorf("error creating the STDIN pipe: error %v", err)
+ }
+
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ assert.Error(t, err)
+ assert.Nil(t, w)
+}
+
+func Test_Pipe_Failboot(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/failboot.php")
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+
+ assert.Nil(t, w)
+ assert.Error(t, err)
+ assert.Contains(t, err.Error(), "failboot")
+}
+
+func Test_Pipe_Invalid(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/invalid.php")
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ assert.Error(t, err)
+ assert.Nil(t, w)
+}
+
+func Test_Pipe_Echo(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = w.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ sw, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := sw.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.Equal(t, "hello", res.String())
+}
+
+func Test_Pipe_Broken(t *testing.T) {
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ time.Sleep(time.Second)
+ err = w.Stop()
+ assert.Error(t, err)
+ }()
+
+ sw, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := sw.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.Error(t, err)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+}
+
+func Benchmark_Pipe_SpawnWorker_Stop(b *testing.B) {
+ f := NewPipeFactory()
+ for n := 0; n < b.N; n++ {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ w, _ := f.SpawnWorkerWithTimeout(context.Background(), cmd)
+ go func() {
+ if w.Wait() != nil {
+ b.Fail()
+ }
+ }()
+
+ err := w.Stop()
+ if err != nil {
+ b.Errorf("error stopping the worker: error %v", err)
+ }
+ }
+}
+
+func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, _ := NewPipeFactory().SpawnWorkerWithTimeout(context.Background(), cmd)
+ sw, err := workerImpl.From(w)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ReportAllocs()
+ b.ResetTimer()
+ go func() {
+ err := w.Wait()
+ if err != nil {
+ b.Errorf("error waiting the worker: error %v", err)
+ }
+ }()
+ defer func() {
+ err := w.Stop()
+ if err != nil {
+ b.Errorf("error stopping the worker: error %v", err)
+ }
+ }()
+
+ for n := 0; n < b.N; n++ {
+ if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
+ b.Fail()
+ }
+ }
+}
+
+func Benchmark_Pipe_Worker_ExecEcho3(b *testing.B) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ err = w.Stop()
+ if err != nil {
+ b.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ sw, err := workerImpl.From(w)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for n := 0; n < b.N; n++ {
+ if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
+ b.Fail()
+ }
+ }
+}
+
+func Benchmark_Pipe_Worker_ExecEchoWithoutContext(b *testing.B) {
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+ ctx := context.Background()
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ defer func() {
+ err = w.Stop()
+ if err != nil {
+ b.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ sw, err := workerImpl.From(w)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ for n := 0; n < b.N; n++ {
+ if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
+ b.Fail()
+ }
+ }
+}
+
+func Test_Echo(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ syncWorker, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+ go func() {
+ assert.NoError(t, syncWorker.Wait())
+ }()
+ defer func() {
+ err := syncWorker.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.Nil(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.Equal(t, "hello", res.String())
+}
+
+func Test_BadPayload(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+
+ syncWorker, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ assert.NoError(t, syncWorker.Wait())
+ }()
+ defer func() {
+ err := syncWorker.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ res, err := syncWorker.Exec(payload.Payload{})
+
+ assert.Error(t, err)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+
+ assert.Contains(t, err.Error(), "payload can not be empty")
+}
+
+func Test_String(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ go func() {
+ assert.NoError(t, w.Wait())
+ }()
+ defer func() {
+ err := w.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ assert.Contains(t, w.String(), "php ../../tests/client.php echo pipes")
+ assert.Contains(t, w.String(), "ready")
+ assert.Contains(t, w.String(), "numExecs: 0")
+}
+
+func Test_Echo_Slow(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "pipes", "10", "10")
+
+ w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ go func() {
+ assert.NoError(t, w.Wait())
+ }()
+ defer func() {
+ err := w.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ syncWorker, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.Nil(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.Equal(t, "hello", res.String())
+}
+
+func Test_Broken(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
+ data := ""
+ mu := &sync.Mutex{}
+ listener := func(event interface{}) {
+ if wev, ok := event.(events.WorkerEvent); ok {
+ mu.Lock()
+ data = string(wev.Payload.([]byte))
+ mu.Unlock()
+ }
+ }
+
+ w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd, listener)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ syncWorker, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+ assert.NotNil(t, err)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+
+ time.Sleep(time.Second * 3)
+ mu.Lock()
+ if strings.ContainsAny(data, "undefined_function()") == false {
+ t.Fail()
+ }
+ mu.Unlock()
+ assert.Error(t, w.Stop())
+}
+
+func Test_Error(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "error", "pipes")
+
+ w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ go func() {
+ assert.NoError(t, w.Wait())
+ }()
+
+ defer func() {
+ err := w.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ syncWorker, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+ assert.NotNil(t, err)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+
+ if errors.Is(errors.SoftJob, err) == false {
+ t.Fatal("error should be of type errors.ErrSoftJob")
+ }
+ assert.Contains(t, err.Error(), "hello")
+}
+
+func Test_NumExecs(t *testing.T) {
+ ctx := context.Background()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
+
+ w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
+ go func() {
+ assert.NoError(t, w.Wait())
+ }()
+ defer func() {
+ err := w.Stop()
+ if err != nil {
+ t.Errorf("error stopping the Process: error %v", err)
+ }
+ }()
+
+ syncWorker, err := workerImpl.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+ if err != nil {
+ t.Errorf("fail to execute payload: error %v", err)
+ }
+ assert.Equal(t, int64(1), w.State().NumExecs())
+
+ _, err = syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+ if err != nil {
+ t.Errorf("fail to execute payload: error %v", err)
+ }
+ assert.Equal(t, int64(2), w.State().NumExecs())
+
+ _, err = syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+ if err != nil {
+ t.Errorf("fail to execute payload: error %v", err)
+ }
+ assert.Equal(t, int64(3), w.State().NumExecs())
+}
diff --git a/pkg/pool/config.go b/pkg/pool/config.go
new file mode 100644
index 00000000..acdd3d6f
--- /dev/null
+++ b/pkg/pool/config.go
@@ -0,0 +1,75 @@
+package pool
+
+import (
+ "runtime"
+ "time"
+)
+
+// Configures the pool behaviour.
+type Config struct {
+ // Debug flag creates new fresh worker before every request.
+ Debug bool
+
+ // NumWorkers defines how many sub-processes can be run at once. This value
+ // might be doubled by Swapper while hot-swap. Defaults to number of CPU cores.
+ NumWorkers int64 `yaml:"num_workers"`
+
+ // MaxJobs defines how many executions is allowed for the worker until
+ // it's destruction. set 1 to create new process for each new task, 0 to let
+ // worker handle as many tasks as it can.
+ MaxJobs int64 `yaml:"max_jobs"`
+
+ // AllocateTimeout defines for how long pool will be waiting for a worker to
+ // be freed to handle the task. Defaults to 60s.
+ AllocateTimeout time.Duration `yaml:"allocate_timeout"`
+
+ // DestroyTimeout defines for how long pool should be waiting for worker to
+ // properly destroy, if timeout reached worker will be killed. Defaults to 60s.
+ DestroyTimeout time.Duration `yaml:"destroy_timeout"`
+
+ // Supervision config to limit worker and pool memory usage.
+ Supervisor *SupervisorConfig `yaml:"supervisor"`
+}
+
+// InitDefaults enables default config values.
+func (cfg *Config) InitDefaults() {
+ if cfg.NumWorkers == 0 {
+ cfg.NumWorkers = int64(runtime.NumCPU())
+ }
+
+ if cfg.AllocateTimeout == 0 {
+ cfg.AllocateTimeout = time.Minute
+ }
+
+ if cfg.DestroyTimeout == 0 {
+ cfg.DestroyTimeout = time.Minute
+ }
+ if cfg.Supervisor == nil {
+ return
+ }
+ cfg.Supervisor.InitDefaults()
+}
+
+type SupervisorConfig struct {
+ // WatchTick defines how often to check the state of worker.
+ WatchTick uint64 `yaml:"watch_tick"`
+
+ // TTL defines maximum time worker is allowed to live.
+ TTL uint64 `yaml:"ttl"`
+
+ // IdleTTL defines maximum duration worker can spend in idle mode. Disabled when 0.
+ IdleTTL uint64 `yaml:"idle_ttl"`
+
+ // ExecTTL defines maximum lifetime per job.
+ ExecTTL uint64 `yaml:"exec_ttl"`
+
+ // MaxWorkerMemory limits memory per worker.
+ MaxWorkerMemory uint64 `yaml:"max_worker_memory"`
+}
+
+// InitDefaults enables default config values.
+func (cfg *SupervisorConfig) InitDefaults() {
+ if cfg.WatchTick == 0 {
+ cfg.WatchTick = 1
+ }
+}
diff --git a/pkg/pool/static_pool.go b/pkg/pool/static_pool.go
new file mode 100755
index 00000000..bb53e121
--- /dev/null
+++ b/pkg/pool/static_pool.go
@@ -0,0 +1,324 @@
+package pool
+
+import (
+ "context"
+ "os/exec"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+ eventsPkg "github.com/spiral/roadrunner/v2/pkg/events"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ syncWorker "github.com/spiral/roadrunner/v2/pkg/worker"
+ workerWatcher "github.com/spiral/roadrunner/v2/pkg/worker_watcher"
+)
+
+// StopRequest can be sent by worker to indicate that restart is required.
+const StopRequest = "{\"stop\":true}"
+
+// ErrorEncoder encode error or make a decision based on the error type
+type ErrorEncoder func(err error, w worker.BaseProcess) (payload.Payload, error)
+
+type Options func(p *StaticPool)
+
+type Command func() *exec.Cmd
+
+// StaticPool controls worker creation, destruction and task routing. Pool uses fixed amount of stack.
+type StaticPool struct {
+ cfg Config
+
+ // worker command creator
+ cmd Command
+
+ // creates and connects to stack
+ factory worker.Factory
+
+ // distributes the events
+ events events.Handler
+
+ // saved list of event listeners
+ listeners []events.Listener
+
+ // manages worker states and TTLs
+ ww worker.Watcher
+
+ // allocate new worker
+ allocator worker.Allocator
+
+ // errEncoder is the default Exec error encoder
+ errEncoder ErrorEncoder
+}
+
+// Initialize creates new worker pool and task multiplexer. StaticPool will initiate with one worker.
+func Initialize(ctx context.Context, cmd Command, factory worker.Factory, cfg Config, options ...Options) (pool.Pool, error) {
+ const op = errors.Op("Initialize")
+ if factory == nil {
+ return nil, errors.E(op, errors.Str("no factory initialized"))
+ }
+ cfg.InitDefaults()
+
+ if cfg.Debug {
+ cfg.NumWorkers = 0
+ cfg.MaxJobs = 1
+ }
+
+ p := &StaticPool{
+ cfg: cfg,
+ cmd: cmd,
+ factory: factory,
+ events: eventsPkg.NewEventsHandler(),
+ }
+
+ // add pool options
+ for i := 0; i < len(options); i++ {
+ options[i](p)
+ }
+
+ p.allocator = p.newPoolAllocator(ctx, p.cfg.AllocateTimeout, factory, cmd)
+ p.ww = workerWatcher.NewWorkerWatcher(p.allocator, p.cfg.NumWorkers, p.events)
+
+ workers, err := p.allocateWorkers(p.cfg.NumWorkers)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // put stack in the pool
+ err = p.ww.AddToWatch(workers)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ p.errEncoder = defaultErrEncoder(p)
+
+ // if supervised config not nil, guess, that pool wanted to be supervised
+ if cfg.Supervisor != nil {
+ sp := supervisorWrapper(p, p.events, p.cfg.Supervisor)
+ // start watcher timer
+ sp.Start()
+ return sp, nil
+ }
+
+ return p, nil
+}
+
+func AddListeners(listeners ...events.Listener) Options {
+ return func(p *StaticPool) {
+ p.listeners = listeners
+ for i := 0; i < len(listeners); i++ {
+ p.addListener(listeners[i])
+ }
+ }
+}
+
+// AddListener connects event listener to the pool.
+func (sp *StaticPool) addListener(listener events.Listener) {
+ sp.events.AddListener(listener)
+}
+
+// Config returns associated pool configuration. Immutable.
+func (sp *StaticPool) GetConfig() interface{} {
+ return sp.cfg
+}
+
+// Workers returns worker list associated with the pool.
+func (sp *StaticPool) Workers() (workers []worker.BaseProcess) {
+ return sp.ww.WorkersList()
+}
+
+func (sp *StaticPool) RemoveWorker(wb worker.BaseProcess) error {
+ return sp.ww.RemoveWorker(wb)
+}
+
+func (sp *StaticPool) Exec(p payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("exec")
+ if sp.cfg.Debug {
+ return sp.execDebug(p)
+ }
+ ctxGetFree, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout)
+ defer cancel()
+ w, err := sp.getWorker(ctxGetFree, op)
+ if err != nil {
+ return payload.Payload{}, errors.E(op, err)
+ }
+
+ rsp, err := w.Exec(p)
+ if err != nil {
+ return sp.errEncoder(err, w)
+ }
+
+ // worker want's to be terminated
+ // TODO careful with string(rsp.Context)
+ if len(rsp.Body) == 0 && string(rsp.Context) == StopRequest {
+ w.State().Set(internal.StateInvalid)
+ err = w.Stop()
+ if err != nil {
+ sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)})
+ }
+
+ return sp.Exec(p)
+ }
+
+ if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs {
+ err = sp.ww.AllocateNew()
+ if err != nil {
+ return payload.Payload{}, errors.E(op, err)
+ }
+ } else {
+ sp.ww.PushWorker(w)
+ }
+
+ return rsp, nil
+}
+
+func (sp *StaticPool) ExecWithContext(ctx context.Context, rqs payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("exec with context")
+ ctxGetFree, cancel := context.WithTimeout(ctx, sp.cfg.AllocateTimeout)
+ defer cancel()
+ w, err := sp.getWorker(ctxGetFree, op)
+ if err != nil {
+ return payload.Payload{}, errors.E(op, err)
+ }
+
+ rsp, err := w.ExecWithTimeout(ctx, rqs)
+ if err != nil {
+ return sp.errEncoder(err, w)
+ }
+
+ // worker want's to be terminated
+ if rsp.Body == nil && rsp.Context != nil && string(rsp.Context) == StopRequest {
+ w.State().Set(internal.StateInvalid)
+ err = w.Stop()
+ if err != nil {
+ sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)})
+ }
+
+ return sp.ExecWithContext(ctx, rqs)
+ }
+
+ if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs {
+ err = sp.ww.AllocateNew()
+ if err != nil {
+ return payload.Payload{}, errors.E(op, err)
+ }
+ } else {
+ sp.ww.PushWorker(w)
+ }
+
+ return rsp, nil
+}
+
+func (sp *StaticPool) getWorker(ctxGetFree context.Context, op errors.Op) (worker.SyncWorker, error) {
+ // GetFreeWorker function consumes context with timeout
+ w, err := sp.ww.GetFreeWorker(ctxGetFree)
+ if err != nil {
+ // if the error is of kind NoFreeWorkers, it means, that we can't get worker from the stack during the allocate timeout
+ if errors.Is(errors.NoFreeWorkers, err) {
+ sp.events.Push(events.PoolEvent{Event: events.EventNoFreeWorkers, Payload: errors.E(op, err)})
+ return nil, errors.E(op, err)
+ }
+ // else if err not nil - return error
+ return nil, errors.E(op, err)
+ }
+ return w.(worker.SyncWorker), nil
+}
+
+// Destroy all underlying stack (but let them to complete the task).
+func (sp *StaticPool) Destroy(ctx context.Context) {
+ sp.ww.Destroy(ctx)
+}
+
+func defaultErrEncoder(sp *StaticPool) ErrorEncoder {
+ return func(err error, w worker.BaseProcess) (payload.Payload, error) {
+ const op = errors.Op("error encoder")
+ // soft job errors are allowed
+ if errors.Is(errors.SoftJob, err) {
+ if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs {
+ err = sp.ww.AllocateNew()
+ if err != nil {
+ sp.events.Push(events.PoolEvent{Event: events.EventWorkerConstruct, Payload: errors.E(op, err)})
+ }
+
+ w.State().Set(internal.StateInvalid)
+ err = w.Stop()
+ if err != nil {
+ sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)})
+ }
+ } else {
+ sp.ww.PushWorker(w)
+ }
+
+ return payload.Payload{}, errors.E(op, err)
+ }
+
+ w.State().Set(internal.StateInvalid)
+ sp.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w})
+ errS := w.Stop()
+
+ if errS != nil {
+ return payload.Payload{}, errors.E(op, errors.Errorf("%v, %v", err, errS))
+ }
+
+ return payload.Payload{}, errors.E(op, err)
+ }
+}
+
+func (sp *StaticPool) newPoolAllocator(ctx context.Context, timeout time.Duration, factory worker.Factory, cmd func() *exec.Cmd) worker.Allocator {
+ return func() (worker.BaseProcess, error) {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+ w, err := factory.SpawnWorkerWithTimeout(ctx, cmd(), sp.listeners...)
+ if err != nil {
+ return nil, err
+ }
+
+ sw, err := syncWorker.From(w)
+ if err != nil {
+ return nil, err
+ }
+
+ sp.events.Push(events.PoolEvent{
+ Event: events.EventWorkerConstruct,
+ Payload: sw,
+ })
+ return sw, nil
+ }
+}
+
+func (sp *StaticPool) execDebug(p payload.Payload) (payload.Payload, error) {
+ sw, err := sp.allocator()
+ if err != nil {
+ return payload.Payload{}, err
+ }
+
+ r, err := sw.(worker.SyncWorker).Exec(p)
+
+ if stopErr := sw.Stop(); stopErr != nil {
+ sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: sw, Payload: err})
+ }
+
+ return r, err
+}
+
+// allocate required number of stack
+func (sp *StaticPool) allocateWorkers(numWorkers int64) ([]worker.BaseProcess, error) {
+ const op = errors.Op("allocate workers")
+ var workers []worker.BaseProcess
+
+ // constant number of stack simplify logic
+ for i := int64(0); i < numWorkers; i++ {
+ w, err := sp.allocator()
+ if err != nil {
+ return nil, errors.E(op, errors.WorkerAllocate, err)
+ }
+
+ sw, err := syncWorker.From(w)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+ workers = append(workers, sw)
+ }
+ return workers, nil
+}
diff --git a/pkg/pool/static_pool_test.go b/pkg/pool/static_pool_test.go
new file mode 100755
index 00000000..53d6b191
--- /dev/null
+++ b/pkg/pool/static_pool_test.go
@@ -0,0 +1,570 @@
+package pool
+
+import (
+ "context"
+ "log"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/internal"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/pkg/pipe"
+ "github.com/stretchr/testify/assert"
+)
+
+var cfg = Config{
+ NumWorkers: int64(runtime.NumCPU()),
+ AllocateTimeout: time.Second * 5,
+ DestroyTimeout: time.Second * 5,
+}
+
+func Test_NewPool(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+ assert.NoError(t, err)
+
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+}
+
+func Test_StaticPool_Invalid(t *testing.T) {
+ p, err := Initialize(
+ context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../tests/invalid.php") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+
+ assert.Nil(t, p)
+ assert.Error(t, err)
+}
+
+func Test_ConfigNoErrorInitDefaults(t *testing.T) {
+ p, err := Initialize(
+ context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+
+ assert.NotNil(t, p)
+ assert.NoError(t, err)
+}
+
+func Test_StaticPool_Echo(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+ assert.NoError(t, err)
+
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.Equal(t, "hello", res.String())
+}
+
+func Test_StaticPool_Echo_NilContext(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+ assert.NoError(t, err)
+
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ res, err := p.Exec(payload.Payload{Body: []byte("hello"), Context: nil})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.Equal(t, "hello", res.String())
+}
+
+func Test_StaticPool_Echo_Context(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "head", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+ assert.NoError(t, err)
+
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ res, err := p.Exec(payload.Payload{Body: []byte("hello"), Context: []byte("world")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.Empty(t, res.Body)
+ assert.NotNil(t, res.Context)
+
+ assert.Equal(t, "world", string(res.Context))
+}
+
+func Test_StaticPool_JobError(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "error", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+ assert.NoError(t, err)
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.Error(t, err)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+
+ if errors.Is(errors.SoftJob, err) == false {
+ t.Fatal("error should be of type errors.Exec")
+ }
+
+ assert.Contains(t, err.Error(), "hello")
+}
+
+func Test_StaticPool_Broken_Replace(t *testing.T) {
+ ctx := context.Background()
+ block := make(chan struct{}, 1)
+
+ listener := func(event interface{}) {
+ if wev, ok := event.(events.WorkerEvent); ok {
+ if wev.Event == events.EventWorkerLog {
+ e := string(wev.Payload.([]byte))
+ if strings.ContainsAny(e, "undefined_function()") {
+ block <- struct{}{}
+ return
+ }
+ }
+ }
+ }
+
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "broken", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ AddListeners(listener),
+ )
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+
+ time.Sleep(time.Second)
+ res, err := p.ExecWithContext(ctx, payload.Payload{Body: []byte("hello")})
+ assert.Error(t, err)
+ assert.Nil(t, res.Context)
+ assert.Nil(t, res.Body)
+
+ <-block
+
+ p.Destroy(ctx)
+}
+
+func Test_StaticPool_Broken_FromOutside(t *testing.T) {
+ ctx := context.Background()
+ // Consume pool events
+ ev := make(chan struct{}, 1)
+ listener := func(event interface{}) {
+ if pe, ok := event.(events.PoolEvent); ok {
+ if pe.Event == events.EventWorkerConstruct {
+ ev <- struct{}{}
+ }
+ }
+ }
+
+ var cfg = Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 5,
+ DestroyTimeout: time.Second * 5,
+ }
+
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ AddListeners(listener),
+ )
+ assert.NoError(t, err)
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.Equal(t, "hello", res.String())
+ assert.Equal(t, 1, len(p.Workers()))
+
+ // first creation
+ <-ev
+ // killing random worker and expecting pool to replace it
+ err = p.Workers()[0].Kill()
+ if err != nil {
+ t.Errorf("error killing the process: error %v", err)
+ }
+
+ // re-creation
+ <-ev
+
+ list := p.Workers()
+ for _, w := range list {
+ assert.Equal(t, internal.StateReady, w.State().Value())
+ }
+}
+
+func Test_StaticPool_AllocateTimeout(t *testing.T) {
+ p, err := Initialize(
+ context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Nanosecond * 1,
+ DestroyTimeout: time.Second * 2,
+ },
+ )
+ assert.Error(t, err)
+ if !errors.Is(errors.WorkerAllocate, err) {
+ t.Fatal("error should be of type WorkerAllocate")
+ }
+ assert.Nil(t, p)
+}
+
+func Test_StaticPool_Replace_Worker(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 1,
+ MaxJobs: 1,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+ assert.NoError(t, err)
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ var lastPID string
+ lastPID = strconv.Itoa(int(p.Workers()[0].Pid()))
+
+ res, _ := p.Exec(payload.Payload{Body: []byte("hello")})
+ assert.Equal(t, lastPID, string(res.Body))
+
+ for i := 0; i < 10; i++ {
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.NotEqual(t, lastPID, string(res.Body))
+ lastPID = string(res.Body)
+ }
+}
+
+func Test_StaticPool_Debug_Worker(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ Debug: true,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+ assert.NoError(t, err)
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ assert.Len(t, p.Workers(), 0)
+
+ var lastPID string
+ res, _ := p.Exec(payload.Payload{Body: []byte("hello")})
+ assert.NotEqual(t, lastPID, string(res.Body))
+
+ assert.Len(t, p.Workers(), 0)
+
+ for i := 0; i < 10; i++ {
+ assert.Len(t, p.Workers(), 0)
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.NotEqual(t, lastPID, string(res.Body))
+ lastPID = string(res.Body)
+ }
+}
+
+// identical to replace but controlled on worker side
+func Test_StaticPool_Stop_Worker(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "stop", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+ assert.NoError(t, err)
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ var lastPID string
+ lastPID = strconv.Itoa(int(p.Workers()[0].Pid()))
+
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, lastPID, string(res.Body))
+
+ for i := 0; i < 10; i++ {
+ res, err := p.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.NoError(t, err)
+ assert.NotNil(t, res)
+ assert.NotNil(t, res.Body)
+ assert.Empty(t, res.Context)
+
+ assert.NotEqual(t, lastPID, string(res.Body))
+ lastPID = string(res.Body)
+ }
+}
+
+// identical to replace but controlled on worker side
+func Test_Static_Pool_Destroy_And_Close(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+
+ assert.NotNil(t, p)
+ assert.NoError(t, err)
+
+ p.Destroy(ctx)
+ _, err = p.Exec(payload.Payload{Body: []byte("100")})
+ assert.Error(t, err)
+}
+
+// identical to replace but controlled on worker side
+func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+
+ assert.NotNil(t, p)
+ assert.NoError(t, err)
+
+ go func() {
+ _, err := p.Exec(payload.Payload{Body: []byte("100")})
+ if err != nil {
+ t.Errorf("error executing payload: error %v", err)
+ }
+ }()
+ time.Sleep(time.Millisecond * 10)
+
+ p.Destroy(ctx)
+ _, err = p.Exec(payload.Payload{Body: []byte("100")})
+ assert.Error(t, err)
+}
+
+// identical to replace but controlled on worker side
+func Test_Static_Pool_Handle_Dead(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 5,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+ assert.NoError(t, err)
+ defer p.Destroy(ctx)
+
+ assert.NotNil(t, p)
+
+ for _, w := range p.Workers() {
+ w.State().Set(internal.StateErrored)
+ }
+
+ _, err = p.Exec(payload.Payload{Body: []byte("hello")})
+ assert.Error(t, err)
+}
+
+// identical to replace but controlled on worker side
+func Test_Static_Pool_Slow_Destroy(t *testing.T) {
+ p, err := Initialize(
+ context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 5,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+
+ p.Destroy(context.Background())
+}
+
+func Benchmark_Pool_Echo(b *testing.B) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ cfg,
+ )
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ for n := 0; n < b.N; n++ {
+ if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
+ b.Fail()
+ }
+ }
+}
+
+//
+func Benchmark_Pool_Echo_Batched(b *testing.B) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: int64(runtime.NumCPU()),
+ AllocateTimeout: time.Second * 100,
+ DestroyTimeout: time.Second,
+ },
+ )
+ assert.NoError(b, err)
+ defer p.Destroy(ctx)
+
+ var wg sync.WaitGroup
+ for i := 0; i < b.N; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
+ b.Fail()
+ log.Println(err)
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+//
+func Benchmark_Pool_Echo_Replaced(b *testing.B) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ Config{
+ NumWorkers: 1,
+ MaxJobs: 1,
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ },
+ )
+ assert.NoError(b, err)
+ defer p.Destroy(ctx)
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for n := 0; n < b.N; n++ {
+ if _, err := p.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
+ b.Fail()
+ log.Println(err)
+ }
+ }
+}
diff --git a/pkg/pool/supervisor_pool.go b/pkg/pool/supervisor_pool.go
new file mode 100755
index 00000000..07fa7019
--- /dev/null
+++ b/pkg/pool/supervisor_pool.go
@@ -0,0 +1,223 @@
+package pool
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/tools"
+)
+
+const MB = 1024 * 1024
+
+// NSEC_IN_SEC nanoseconds in second
+const NSEC_IN_SEC int64 = 1000000000 //nolint:golint,stylecheck
+
+type Supervised interface {
+ pool.Pool
+ // Start used to start watching process for all pool workers
+ Start()
+}
+
+type supervised struct {
+ cfg *SupervisorConfig
+ events events.Handler
+ pool pool.Pool
+ stopCh chan struct{}
+ mu *sync.RWMutex
+}
+
+func supervisorWrapper(pool pool.Pool, events events.Handler, cfg *SupervisorConfig) Supervised {
+ sp := &supervised{
+ cfg: cfg,
+ events: events,
+ pool: pool,
+ mu: &sync.RWMutex{},
+ stopCh: make(chan struct{}),
+ }
+
+ return sp
+}
+
+type ttlExec struct {
+ err error
+ p payload.Payload
+}
+
+func (sp *supervised) ExecWithContext(ctx context.Context, rqs payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("exec_supervised")
+ if sp.cfg.ExecTTL == 0 {
+ return sp.pool.Exec(rqs)
+ }
+
+ c := make(chan ttlExec, 1)
+ ctx, cancel := context.WithTimeout(ctx, time.Duration(sp.cfg.ExecTTL)*time.Second)
+ defer cancel()
+ go func() {
+ res, err := sp.pool.ExecWithContext(ctx, rqs)
+ if err != nil {
+ c <- ttlExec{
+ err: errors.E(op, err),
+ p: payload.Payload{},
+ }
+ }
+
+ c <- ttlExec{
+ err: nil,
+ p: res,
+ }
+ }()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return payload.Payload{}, errors.E(op, errors.TimeOut, ctx.Err())
+ case res := <-c:
+ if res.err != nil {
+ return payload.Payload{}, res.err
+ }
+
+ return res.p, nil
+ }
+ }
+}
+
+func (sp *supervised) Exec(p payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("supervised exec")
+ rsp, err := sp.pool.Exec(p)
+ if err != nil {
+ return payload.Payload{}, errors.E(op, err)
+ }
+ return rsp, nil
+}
+
+func (sp *supervised) GetConfig() interface{} {
+ return sp.pool.GetConfig()
+}
+
+func (sp *supervised) Workers() (workers []worker.BaseProcess) {
+ sp.mu.Lock()
+ defer sp.mu.Unlock()
+ return sp.pool.Workers()
+}
+
+func (sp *supervised) RemoveWorker(worker worker.BaseProcess) error {
+ return sp.pool.RemoveWorker(worker)
+}
+
+func (sp *supervised) Destroy(ctx context.Context) {
+ sp.pool.Destroy(ctx)
+}
+
+func (sp *supervised) Start() {
+ go func() {
+ watchTout := time.NewTicker(time.Duration(sp.cfg.WatchTick) * time.Second)
+ for {
+ select {
+ case <-sp.stopCh:
+ watchTout.Stop()
+ return
+ // stop here
+ case <-watchTout.C:
+ sp.mu.Lock()
+ sp.control()
+ sp.mu.Unlock()
+ }
+ }
+ }()
+}
+
+func (sp *supervised) Stop() {
+ sp.stopCh <- struct{}{}
+}
+
+func (sp *supervised) control() {
+ now := time.Now()
+ const op = errors.Op("supervised pool control tick")
+
+ // THIS IS A COPY OF WORKERS
+ workers := sp.pool.Workers()
+
+ for i := 0; i < len(workers); i++ {
+ if workers[i].State().Value() == internal.StateInvalid {
+ continue
+ }
+
+ s, err := tools.WorkerProcessState(workers[i])
+ if err != nil {
+ // worker not longer valid for supervision
+ continue
+ }
+
+ if sp.cfg.TTL != 0 && now.Sub(workers[i].Created()).Seconds() >= float64(sp.cfg.TTL) {
+ err = sp.pool.RemoveWorker(workers[i])
+ if err != nil {
+ sp.events.Push(events.PoolEvent{Event: events.EventSupervisorError, Payload: errors.E(op, err)})
+ return
+ }
+ sp.events.Push(events.PoolEvent{Event: events.EventTTL, Payload: workers[i]})
+ continue
+ }
+
+ if sp.cfg.MaxWorkerMemory != 0 && s.MemoryUsage >= sp.cfg.MaxWorkerMemory*MB {
+ err = sp.pool.RemoveWorker(workers[i])
+ if err != nil {
+ sp.events.Push(events.PoolEvent{Event: events.EventSupervisorError, Payload: errors.E(op, err)})
+ return
+ }
+ sp.events.Push(events.PoolEvent{Event: events.EventMaxMemory, Payload: workers[i]})
+ continue
+ }
+
+ // firs we check maxWorker idle
+ if sp.cfg.IdleTTL != 0 {
+ // then check for the worker state
+ if workers[i].State().Value() != internal.StateReady {
+ continue
+ }
+
+ /*
+ Calculate idle time
+ If worker in the StateReady, we read it LastUsed timestamp as UnixNano uint64
+ 2. For example maxWorkerIdle is equal to 5sec, then, if (time.Now - LastUsed) > maxWorkerIdle
+ we are guessing that worker overlap idle time and has to be killed
+ */
+
+ // 1610530005534416045 lu
+ // lu - now = -7811150814 - nanoseconds
+ // 7.8 seconds
+ // get last used unix nano
+ lu := workers[i].State().LastUsed()
+ // worker not used, skip
+ if lu == 0 {
+ continue
+ }
+
+ // convert last used to unixNano and sub time.now to seconds
+ // negative number, because lu always in the past, except for the `back to the future` :)
+ res := ((int64(lu) - now.UnixNano()) / NSEC_IN_SEC) * -1
+
+ // maxWorkerIdle more than diff between now and last used
+ // for example:
+ // After exec worker goes to the rest
+ // And resting for the 5 seconds
+ // IdleTTL is 1 second.
+ // After the control check, res will be 5, idle is 1
+ // 5 - 1 = 4, more than 0, YOU ARE FIRED (removed). Done.
+ if int64(sp.cfg.IdleTTL)-res <= 0 {
+ err = sp.pool.RemoveWorker(workers[i])
+ if err != nil {
+ sp.events.Push(events.PoolEvent{Event: events.EventSupervisorError, Payload: errors.E(op, err)})
+ return
+ }
+ sp.events.Push(events.PoolEvent{Event: events.EventIdleTTL, Payload: workers[i]})
+ }
+ }
+ }
+}
diff --git a/pkg/pool/supervisor_test.go b/pkg/pool/supervisor_test.go
new file mode 100644
index 00000000..72226bee
--- /dev/null
+++ b/pkg/pool/supervisor_test.go
@@ -0,0 +1,196 @@
+package pool
+
+import (
+ "context"
+ "os/exec"
+ "testing"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/pkg/pipe"
+ "github.com/spiral/roadrunner/v2/tools"
+ "github.com/stretchr/testify/assert"
+)
+
+var cfgSupervised = Config{
+ NumWorkers: int64(1),
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ Supervisor: &SupervisorConfig{
+ WatchTick: 1,
+ TTL: 100,
+ IdleTTL: 100,
+ ExecTTL: 100,
+ MaxWorkerMemory: 100,
+ },
+}
+
+func TestSupervisedPool_Exec(t *testing.T) {
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/memleak.php", "pipes") },
+ pipe.NewPipeFactory(),
+ cfgSupervised,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ stopCh := make(chan struct{})
+ defer p.Destroy(context.Background())
+
+ go func() {
+ for {
+ select {
+ case <-stopCh:
+ return
+ default:
+ workers := p.Workers()
+ if len(workers) > 0 {
+ s, err := tools.WorkerProcessState(workers[0])
+ assert.NoError(t, err)
+ assert.NotNil(t, s)
+ // since this is soft limit, double max memory limit watch
+ if (s.MemoryUsage / MB) > cfgSupervised.Supervisor.MaxWorkerMemory*2 {
+ assert.Fail(t, "max memory reached")
+ }
+ }
+ }
+ }
+ }()
+
+ for i := 0; i < 100; i++ {
+ time.Sleep(time.Millisecond * 50)
+ _, err = p.Exec(payload.Payload{
+ Context: []byte(""),
+ Body: []byte("foo"),
+ })
+ assert.NoError(t, err)
+ }
+
+ stopCh <- struct{}{}
+}
+
+func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) {
+ var cfgExecTTL = Config{
+ NumWorkers: int64(1),
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ Supervisor: &SupervisorConfig{
+ WatchTick: 1,
+ TTL: 100,
+ IdleTTL: 100,
+ ExecTTL: 1,
+ MaxWorkerMemory: 100,
+ },
+ }
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
+ pipe.NewPipeFactory(),
+ cfgExecTTL,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ defer p.Destroy(context.Background())
+
+ pid := p.Workers()[0].Pid()
+
+ resp, err := p.ExecWithContext(context.Background(), payload.Payload{
+ Context: []byte(""),
+ Body: []byte("foo"),
+ })
+
+ assert.Error(t, err)
+ assert.Empty(t, resp.Body)
+ assert.Empty(t, resp.Context)
+
+ time.Sleep(time.Second * 1)
+ // should be new worker with new pid
+ assert.NotEqual(t, pid, p.Workers()[0].Pid())
+}
+
+func TestSupervisedPool_Idle(t *testing.T) {
+ var cfgExecTTL = Config{
+ NumWorkers: int64(1),
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ Supervisor: &SupervisorConfig{
+ WatchTick: 1,
+ TTL: 100,
+ IdleTTL: 1,
+ ExecTTL: 100,
+ MaxWorkerMemory: 100,
+ },
+ }
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
+ pipe.NewPipeFactory(),
+ cfgExecTTL,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ defer p.Destroy(context.Background())
+
+ pid := p.Workers()[0].Pid()
+
+ resp, err := p.ExecWithContext(context.Background(), payload.Payload{
+ Context: []byte(""),
+ Body: []byte("foo"),
+ })
+
+ assert.Nil(t, err)
+ assert.Empty(t, resp.Body)
+ assert.Empty(t, resp.Context)
+
+ time.Sleep(time.Second * 5)
+ // should be new worker with new pid
+ assert.NotEqual(t, pid, p.Workers()[0].Pid())
+}
+
+func TestSupervisedPool_ExecTTL_OK(t *testing.T) {
+ var cfgExecTTL = Config{
+ NumWorkers: int64(1),
+ AllocateTimeout: time.Second,
+ DestroyTimeout: time.Second,
+ Supervisor: &SupervisorConfig{
+ WatchTick: 1,
+ TTL: 100,
+ IdleTTL: 100,
+ ExecTTL: 4,
+ MaxWorkerMemory: 100,
+ },
+ }
+ ctx := context.Background()
+ p, err := Initialize(
+ ctx,
+ func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
+ pipe.NewPipeFactory(),
+ cfgExecTTL,
+ )
+
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ defer p.Destroy(context.Background())
+
+ pid := p.Workers()[0].Pid()
+
+ time.Sleep(time.Millisecond * 100)
+ resp, err := p.Exec(payload.Payload{
+ Context: []byte(""),
+ Body: []byte("foo"),
+ })
+
+ assert.NoError(t, err)
+ assert.Empty(t, resp.Body)
+ assert.Empty(t, resp.Context)
+
+ time.Sleep(time.Second * 1)
+ // should be the same pid
+ assert.Equal(t, pid, p.Workers()[0].Pid())
+}
diff --git a/pkg/socket/socket_factory.go b/pkg/socket/socket_factory.go
new file mode 100755
index 00000000..ff882389
--- /dev/null
+++ b/pkg/socket/socket_factory.go
@@ -0,0 +1,229 @@
+package socket
+
+import (
+ "context"
+ "net"
+ "os/exec"
+ "sync"
+ "time"
+
+ "github.com/shirou/gopsutil/process"
+ "github.com/spiral/errors"
+ "github.com/spiral/goridge/v3/interfaces/relay"
+ "github.com/spiral/goridge/v3/pkg/socket"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+ workerImpl "github.com/spiral/roadrunner/v2/pkg/worker"
+
+ "go.uber.org/multierr"
+ "golang.org/x/sync/errgroup"
+)
+
+// Factory connects to external stack using socket server.
+type Factory struct {
+ // listens for incoming connections from underlying processes
+ ls net.Listener
+
+ // relay connection timeout
+ tout time.Duration
+
+ // sockets which are waiting for process association
+ relays sync.Map
+
+ ErrCh chan error
+}
+
+// todo: review
+
+// NewSocketServer returns Factory attached to a given socket listener.
+// tout specifies for how long factory should serve for incoming relay connection
+func NewSocketServer(ls net.Listener, tout time.Duration) worker.Factory {
+ f := &Factory{
+ ls: ls,
+ tout: tout,
+ relays: sync.Map{},
+ ErrCh: make(chan error, 10),
+ }
+
+ // Be careful
+ // https://github.com/go101/go101/wiki/About-memory-ordering-guarantees-made-by-atomic-operations-in-Go
+ // https://github.com/golang/go/issues/5045
+ go func() {
+ f.ErrCh <- f.listen()
+ }()
+
+ return f
+}
+
+// blocking operation, returns an error
+func (f *Factory) listen() error {
+ errGr := &errgroup.Group{}
+ errGr.Go(func() error {
+ for {
+ conn, err := f.ls.Accept()
+ if err != nil {
+ return err
+ }
+
+ rl := socket.NewSocketRelay(conn)
+ pid, err := internal.FetchPID(rl)
+ if err != nil {
+ return err
+ }
+
+ f.attachRelayToPid(pid, rl)
+ }
+ })
+
+ return errGr.Wait()
+}
+
+type socketSpawn struct {
+ w worker.BaseProcess
+ err error
+}
+
+// SpawnWorker creates Process and connects it to appropriate relay or returns error
+func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, listeners ...events.Listener) (worker.BaseProcess, error) {
+ const op = errors.Op("spawn_worker_with_context")
+ c := make(chan socketSpawn)
+ go func() {
+ ctx, cancel := context.WithTimeout(ctx, f.tout)
+ defer cancel()
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(listeners...))
+ if err != nil {
+ c <- socketSpawn{
+ w: nil,
+ err: err,
+ }
+ return
+ }
+
+ err = w.Start()
+ if err != nil {
+ c <- socketSpawn{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ rl, err := f.findRelayWithContext(ctx, w)
+ if err != nil {
+ err = multierr.Combine(
+ err,
+ w.Kill(),
+ w.Wait(),
+ )
+
+ c <- socketSpawn{
+ w: nil,
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ w.AttachRelay(rl)
+ w.State().Set(internal.StateReady)
+
+ c <- socketSpawn{
+ w: w,
+ err: nil,
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case res := <-c:
+ if res.err != nil {
+ return nil, res.err
+ }
+
+ return res.w, nil
+ }
+}
+
+func (f *Factory) SpawnWorker(cmd *exec.Cmd, listeners ...events.Listener) (worker.BaseProcess, error) {
+ const op = errors.Op("spawn_worker")
+ w, err := workerImpl.InitBaseWorker(cmd, workerImpl.AddListeners(listeners...))
+ if err != nil {
+ return nil, err
+ }
+
+ err = w.Start()
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ rl, err := f.findRelay(w)
+ if err != nil {
+ err = multierr.Combine(
+ err,
+ w.Kill(),
+ w.Wait(),
+ )
+ return nil, err
+ }
+
+ w.AttachRelay(rl)
+ w.State().Set(internal.StateReady)
+
+ return w, nil
+}
+
+// Close socket factory and underlying socket connection.
+func (f *Factory) Close() error {
+ return f.ls.Close()
+}
+
+// waits for Process to connect over socket and returns associated relay of timeout
+func (f *Factory) findRelayWithContext(ctx context.Context, w worker.BaseProcess) (*socket.Relay, error) {
+ ticker := time.NewTicker(time.Millisecond * 100)
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-ticker.C:
+ _, err := process.NewProcess(int32(w.Pid()))
+ if err != nil {
+ return nil, err
+ }
+ default:
+ tmp, ok := f.relays.Load(w.Pid())
+ if !ok {
+ continue
+ }
+ return tmp.(*socket.Relay), nil
+ }
+ }
+}
+
+func (f *Factory) findRelay(w worker.BaseProcess) (*socket.Relay, error) {
+ const op = errors.Op("find_relay")
+ // poll every 1ms for the relay
+ pollDone := time.NewTimer(f.tout)
+ for {
+ select {
+ case <-pollDone.C:
+ return nil, errors.E(op, errors.Str("relay timeout"))
+ default:
+ tmp, ok := f.relays.Load(w.Pid())
+ if !ok {
+ continue
+ }
+ return tmp.(*socket.Relay), nil
+ }
+ }
+}
+
+// chan to store relay associated with specific pid
+func (f *Factory) attachRelayToPid(pid int64, relay relay.Relay) {
+ f.relays.Store(pid, relay)
+}
+
+// deletes relay chan associated with specific pid
+func (f *Factory) removeRelayFromPid(pid int64) {
+ f.relays.Delete(pid)
+}
diff --git a/socket_factory_test.go b/pkg/socket/socket_factory_test.go
index 330b60a5..983f3e8e 100644..100755
--- a/socket_factory_test.go
+++ b/pkg/socket/socket_factory_test.go
@@ -1,15 +1,20 @@
-package roadrunner
+package socket
import (
+ "context"
"net"
"os/exec"
+ "sync"
"testing"
"time"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/pkg/worker"
"github.com/stretchr/testify/assert"
)
func Test_Tcp_Start(t *testing.T) {
+ ctx := context.Background()
time.Sleep(time.Millisecond * 10) // to ensure free socket
ls, err := net.Listen("tcp", "localhost:9007")
@@ -24,9 +29,9 @@ func Test_Tcp_Start(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
- w, err := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
assert.NoError(t, err)
assert.NotNil(t, w)
@@ -36,22 +41,22 @@ func Test_Tcp_Start(t *testing.T) {
err = w.Stop()
if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
+ t.Errorf("error stopping the Process: error %v", err)
}
}
func Test_Tcp_StartCloseFactory(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
-
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
} else {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
- f := NewSocketFactory(ls, time.Minute)
+ f := NewSocketServer(ls, time.Minute)
defer func() {
err := ls.Close()
if err != nil {
@@ -59,23 +64,19 @@ func Test_Tcp_StartCloseFactory(t *testing.T) {
}
}()
- w, err := f.SpawnWorker(cmd)
+ w, err := f.SpawnWorkerWithTimeout(ctx, cmd)
assert.NoError(t, err)
assert.NotNil(t, w)
- go func() {
- assert.NoError(t, w.Wait())
- }()
-
err = w.Stop()
if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
+ t.Errorf("error stopping the Process: error %v", err)
}
}
func Test_Tcp_StartError(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
-
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
defer func() {
@@ -88,19 +89,20 @@ func Test_Tcp_StartError(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
err = cmd.Start()
if err != nil {
t.Errorf("error executing the command: error %v", err)
}
- w, err := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
assert.Error(t, err)
assert.Nil(t, w)
}
func Test_Tcp_Failboot(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
@@ -114,9 +116,9 @@ func Test_Tcp_Failboot(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
- w, err2 := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err2 := NewSocketServer(ls, time.Second*5).SpawnWorkerWithTimeout(ctx, cmd)
assert.Nil(t, w)
assert.Error(t, err2)
assert.Contains(t, err2.Error(), "failboot")
@@ -124,7 +126,7 @@ func Test_Tcp_Failboot(t *testing.T) {
func Test_Tcp_Timeout(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
-
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
defer func() {
@@ -137,17 +139,17 @@ func Test_Tcp_Timeout(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/slow-client.php", "echo", "tcp", "200", "0")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "tcp", "200", "0")
- w, err := NewSocketFactory(ls, time.Millisecond*100).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Millisecond*1).SpawnWorkerWithTimeout(ctx, cmd)
assert.Nil(t, w)
assert.Error(t, err)
- assert.Contains(t, err.Error(), "relay timeout")
+ assert.Contains(t, err.Error(), "context deadline exceeded")
}
func Test_Tcp_Invalid(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
-
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
defer func() {
@@ -160,16 +162,16 @@ func Test_Tcp_Invalid(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
- w, err := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Second*1).SpawnWorkerWithTimeout(ctx, cmd)
assert.Error(t, err)
assert.Nil(t, w)
}
func Test_Tcp_Broken(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
-
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
defer func() {
@@ -182,12 +184,17 @@ func Test_Tcp_Broken(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "broken", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "tcp")
- w, _ := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wg := sync.WaitGroup{}
+ wg.Add(1)
go func() {
+ defer wg.Done()
err := w.Wait()
-
assert.Error(t, err)
assert.Contains(t, err.Error(), "undefined_function()")
}()
@@ -195,18 +202,25 @@ func Test_Tcp_Broken(t *testing.T) {
defer func() {
time.Sleep(time.Second)
err2 := w.Stop()
- assert.NoError(t, err2)
+ // write tcp 127.0.0.1:9007->127.0.0.1:34204: use of closed network connection
+ assert.Error(t, err2)
}()
- res, err := w.Exec(&Payload{Body: []byte("hello")})
+ sw, err := worker.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res, err := sw.Exec(payload.Payload{Body: []byte("hello")})
assert.Error(t, err)
- assert.Nil(t, res)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+ wg.Wait()
}
func Test_Tcp_Echo(t *testing.T) {
time.Sleep(time.Millisecond * 10) // to ensure free socket
-
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if assert.NoError(t, err) {
defer func() {
@@ -219,30 +233,36 @@ func Test_Tcp_Echo(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
- w, _ := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, _ := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
go func() {
assert.NoError(t, w.Wait())
}()
defer func() {
err = w.Stop()
if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
+ t.Errorf("error stopping the Process: error %v", err)
}
}()
- res, err := w.Exec(&Payload{Body: []byte("hello")})
+ sw, err := worker.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := sw.Exec(payload.Payload{Body: []byte("hello")})
assert.NoError(t, err)
assert.NotNil(t, res)
assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
+ assert.Empty(t, res.Context)
assert.Equal(t, "hello", res.String())
}
func Test_Unix_Start(t *testing.T) {
+ ctx := context.Background()
ls, err := net.Listen("unix", "sock.unix")
if err == nil {
defer func() {
@@ -255,9 +275,9 @@ func Test_Unix_Start(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
- w, err := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
assert.NoError(t, err)
assert.NotNil(t, w)
@@ -267,12 +287,13 @@ func Test_Unix_Start(t *testing.T) {
err = w.Stop()
if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
+ t.Errorf("error stopping the Process: error %v", err)
}
}
func Test_Unix_Failboot(t *testing.T) {
ls, err := net.Listen("unix", "sock.unix")
+ ctx := context.Background()
if err == nil {
defer func() {
err := ls.Close()
@@ -284,9 +305,9 @@ func Test_Unix_Failboot(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
- w, err := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Second*5).SpawnWorkerWithTimeout(ctx, cmd)
assert.Nil(t, w)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failboot")
@@ -294,6 +315,7 @@ func Test_Unix_Failboot(t *testing.T) {
func Test_Unix_Timeout(t *testing.T) {
ls, err := net.Listen("unix", "sock.unix")
+ ctx := context.Background()
if err == nil {
defer func() {
err := ls.Close()
@@ -305,15 +327,16 @@ func Test_Unix_Timeout(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/slow-client.php", "echo", "unix", "200", "0")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "unix", "200", "0")
- w, err := NewSocketFactory(ls, time.Millisecond*100).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Millisecond*100).SpawnWorkerWithTimeout(ctx, cmd)
assert.Nil(t, w)
assert.Error(t, err)
- assert.Contains(t, err.Error(), "relay timeout")
+ assert.Contains(t, err.Error(), "context deadline exceeded")
}
func Test_Unix_Invalid(t *testing.T) {
+ ctx := context.Background()
ls, err := net.Listen("unix", "sock.unix")
if err == nil {
defer func() {
@@ -326,14 +349,15 @@ func Test_Unix_Invalid(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
- w, err := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Second*10).SpawnWorkerWithTimeout(ctx, cmd)
assert.Error(t, err)
assert.Nil(t, w)
}
func Test_Unix_Broken(t *testing.T) {
+ ctx := context.Background()
ls, err := net.Listen("unix", "sock.unix")
if err == nil {
defer func() {
@@ -346,10 +370,16 @@ func Test_Unix_Broken(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "broken", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "unix")
- w, _ := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
go func() {
+ defer wg.Done()
err := w.Wait()
assert.Error(t, err)
assert.Contains(t, err.Error(), "undefined_function()")
@@ -358,16 +388,24 @@ func Test_Unix_Broken(t *testing.T) {
defer func() {
time.Sleep(time.Second)
err = w.Stop()
- assert.NoError(t, err)
+ assert.Error(t, err)
}()
- res, err := w.Exec(&Payload{Body: []byte("hello")})
+ sw, err := worker.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := sw.Exec(payload.Payload{Body: []byte("hello")})
assert.Error(t, err)
- assert.Nil(t, res)
+ assert.Nil(t, res.Context)
+ assert.Nil(t, res.Body)
+ wg.Wait()
}
func Test_Unix_Echo(t *testing.T) {
+ ctx := context.Background()
ls, err := net.Listen("unix", "sock.unix")
if err == nil {
defer func() {
@@ -380,34 +418,43 @@ func Test_Unix_Echo(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
- w, _ := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ t.Fatal(err)
+ }
go func() {
assert.NoError(t, w.Wait())
}()
defer func() {
err = w.Stop()
if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
+ t.Errorf("error stopping the Process: error %v", err)
}
}()
- res, err := w.Exec(&Payload{Body: []byte("hello")})
+ sw, err := worker.From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := sw.Exec(payload.Payload{Body: []byte("hello")})
assert.NoError(t, err)
assert.NotNil(t, res)
assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
+ assert.Empty(t, res.Context)
assert.Equal(t, "hello", res.String())
}
func Benchmark_Tcp_SpawnWorker_Stop(b *testing.B) {
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if err == nil {
defer func() {
- err := ls.Close()
+ err = ls.Close()
if err != nil {
b.Errorf("error closing the listener: error %v", err)
}
@@ -416,29 +463,31 @@ func Benchmark_Tcp_SpawnWorker_Stop(b *testing.B) {
b.Skip("socket is busy")
}
- f := NewSocketFactory(ls, time.Minute)
+ f := NewSocketServer(ls, time.Minute)
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
- w, _ := f.SpawnWorker(cmd)
+ w, err := f.SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ b.Fatal(err)
+ }
go func() {
- if w.Wait() != nil {
- b.Fail()
- }
+ assert.NoError(b, w.Wait())
}()
err = w.Stop()
if err != nil {
- b.Errorf("error stopping the worker: error %v", err)
+ b.Errorf("error stopping the Process: error %v", err)
}
}
}
func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) {
+ ctx := context.Background()
ls, err := net.Listen("tcp", "localhost:9007")
if err == nil {
defer func() {
- err := ls.Close()
+ err = ls.Close()
if err != nil {
b.Errorf("error closing the listener: error %v", err)
}
@@ -447,30 +496,33 @@ func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) {
b.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
- w, _ := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
- go func() {
- err := w.Wait()
- if err != nil {
- b.Errorf("error waiting: %v", err)
- }
- }()
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ b.Fatal(err)
+ }
defer func() {
err = w.Stop()
if err != nil {
- b.Errorf("error stopping the worker: error %v", err)
+ b.Errorf("error stopping the Process: error %v", err)
}
}()
+ sw, err := worker.From(w)
+ if err != nil {
+ b.Fatal(err)
+ }
+
for n := 0; n < b.N; n++ {
- if _, err := w.Exec(&Payload{Body: []byte("hello")}); err != nil {
+ if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
b.Fail()
}
}
}
func Benchmark_Unix_SpawnWorker_Stop(b *testing.B) {
+ ctx := context.Background()
ls, err := net.Listen("unix", "sock.unix")
if err == nil {
defer func() {
@@ -483,25 +535,23 @@ func Benchmark_Unix_SpawnWorker_Stop(b *testing.B) {
b.Skip("socket is busy")
}
- f := NewSocketFactory(ls, time.Minute)
+ f := NewSocketServer(ls, time.Minute)
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "tests/client.php", "echo", "unix")
-
- w, _ := f.SpawnWorker(cmd)
- go func() {
- if w.Wait() != nil {
- b.Fail()
- }
- }()
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
+ w, err := f.SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ b.Fatal(err)
+ }
err = w.Stop()
if err != nil {
- b.Errorf("error stopping the worker: error %v", err)
+ b.Errorf("error stopping the Process: error %v", err)
}
}
}
func Benchmark_Unix_Worker_ExecEcho(b *testing.B) {
+ ctx := context.Background()
ls, err := net.Listen("unix", "sock.unix")
if err == nil {
defer func() {
@@ -514,24 +564,26 @@ func Benchmark_Unix_Worker_ExecEcho(b *testing.B) {
b.Skip("socket is busy")
}
- cmd := exec.Command("php", "tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
- w, _ := NewSocketFactory(ls, time.Minute).SpawnWorker(cmd)
- go func() {
- err := w.Wait()
- if err != nil {
- b.Errorf("error waiting: %v", err)
- }
- }()
+ w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
+ if err != nil {
+ b.Fatal(err)
+ }
defer func() {
err = w.Stop()
if err != nil {
- b.Errorf("error stopping the worker: error %v", err)
+ b.Errorf("error stopping the Process: error %v", err)
}
}()
+ sw, err := worker.From(w)
+ if err != nil {
+ b.Fatal(err)
+ }
+
for n := 0; n < b.N; n++ {
- if _, err := w.Exec(&Payload{Body: []byte("hello")}); err != nil {
+ if _, err := sw.Exec(payload.Payload{Body: []byte("hello")}); err != nil {
b.Fail()
}
}
diff --git a/pkg/worker/sync_worker.go b/pkg/worker/sync_worker.go
new file mode 100755
index 00000000..6a945cf4
--- /dev/null
+++ b/pkg/worker/sync_worker.go
@@ -0,0 +1,224 @@
+package worker
+
+import (
+ "bytes"
+ "context"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/goridge/v3/interfaces/relay"
+ "github.com/spiral/goridge/v3/pkg/frame"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "go.uber.org/multierr"
+)
+
+type syncWorker struct {
+ w worker.BaseProcess
+}
+
+// From creates SyncWorker from BaseProcess
+func From(w worker.BaseProcess) (worker.SyncWorker, error) {
+ return &syncWorker{
+ w: w,
+ }, nil
+}
+
+// Exec payload without TTL timeout.
+func (tw *syncWorker) Exec(p payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("sync worker Exec")
+ if len(p.Body) == 0 && len(p.Context) == 0 {
+ return payload.Payload{}, errors.E(op, errors.Str("payload can not be empty"))
+ }
+
+ if tw.w.State().Value() != internal.StateReady {
+ return payload.Payload{}, errors.E(op, errors.Errorf("Process is not ready (%s)", tw.w.State().String()))
+ }
+
+ // set last used time
+ tw.w.State().SetLastUsed(uint64(time.Now().UnixNano()))
+ tw.w.State().Set(internal.StateWorking)
+
+ rsp, err := tw.execPayload(p)
+ if err != nil {
+ // just to be more verbose
+ if errors.Is(errors.SoftJob, err) == false {
+ tw.w.State().Set(internal.StateErrored)
+ tw.w.State().RegisterExec()
+ }
+ return payload.Payload{}, err
+ }
+
+ tw.w.State().Set(internal.StateReady)
+ tw.w.State().RegisterExec()
+
+ return rsp, nil
+}
+
+type wexec struct {
+ payload payload.Payload
+ err error
+}
+
+// Exec payload without TTL timeout.
+func (tw *syncWorker) ExecWithTimeout(ctx context.Context, p payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("ExecWithTimeout")
+ c := make(chan wexec, 1)
+
+ go func() {
+ if len(p.Body) == 0 && len(p.Context) == 0 {
+ c <- wexec{
+ payload: payload.Payload{},
+ err: errors.E(op, errors.Str("payload can not be empty")),
+ }
+ return
+ }
+
+ if tw.w.State().Value() != internal.StateReady {
+ c <- wexec{
+ payload: payload.Payload{},
+ err: errors.E(op, errors.Errorf("Process is not ready (%s)", tw.w.State().String())),
+ }
+ return
+ }
+
+ // set last used time
+ tw.w.State().SetLastUsed(uint64(time.Now().UnixNano()))
+ tw.w.State().Set(internal.StateWorking)
+
+ rsp, err := tw.execPayload(p)
+ if err != nil {
+ // just to be more verbose
+ if errors.Is(errors.SoftJob, err) == false {
+ tw.w.State().Set(internal.StateErrored)
+ tw.w.State().RegisterExec()
+ }
+ c <- wexec{
+ payload: payload.Payload{},
+ err: errors.E(op, err),
+ }
+ return
+ }
+
+ tw.w.State().Set(internal.StateReady)
+ tw.w.State().RegisterExec()
+
+ c <- wexec{
+ payload: rsp,
+ err: nil,
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ err := multierr.Combine(tw.Kill())
+ if err != nil {
+ return payload.Payload{}, multierr.Append(err, ctx.Err())
+ }
+ return payload.Payload{}, ctx.Err()
+ case res := <-c:
+ if res.err != nil {
+ return payload.Payload{}, res.err
+ }
+ return res.payload, nil
+ }
+}
+
+func (tw *syncWorker) execPayload(p payload.Payload) (payload.Payload, error) {
+ const op = errors.Op("exec pl")
+
+ fr := frame.NewFrame()
+ fr.WriteVersion(frame.VERSION_1)
+ // can be 0 here
+
+ buf := new(bytes.Buffer)
+ buf.Write(p.Context)
+ buf.Write(p.Body)
+
+ // Context offset
+ fr.WriteOptions(uint32(len(p.Context)))
+ fr.WritePayloadLen(uint32(buf.Len()))
+ fr.WritePayload(buf.Bytes())
+
+ fr.WriteCRC()
+
+ // empty and free the buffer
+ buf.Truncate(0)
+
+ err := tw.Relay().Send(fr)
+ if err != nil {
+ return payload.Payload{}, err
+ }
+
+ frameR := frame.NewFrame()
+
+ err = tw.w.Relay().Receive(frameR)
+ if err != nil {
+ return payload.Payload{}, errors.E(op, err)
+ }
+ if frameR == nil {
+ return payload.Payload{}, errors.E(op, errors.Str("nil fr received"))
+ }
+
+ if !frameR.VerifyCRC() {
+ return payload.Payload{}, errors.E(op, errors.Str("failed to verify CRC"))
+ }
+
+ flags := frameR.ReadFlags()
+
+ if flags&byte(frame.ERROR) != byte(0) {
+ return payload.Payload{}, errors.E(op, errors.SoftJob, errors.Str(string(frameR.Payload())))
+ }
+
+ options := frameR.ReadOptions()
+ if len(options) != 1 {
+ return payload.Payload{}, errors.E(op, errors.Str("options length should be equal 1 (body offset)"))
+ }
+
+ pl := payload.Payload{}
+ pl.Context = frameR.Payload()[:options[0]]
+ pl.Body = frameR.Payload()[options[0]:]
+
+ return pl, nil
+}
+
+func (tw *syncWorker) String() string {
+ return tw.w.String()
+}
+
+func (tw *syncWorker) Pid() int64 {
+ return tw.w.Pid()
+}
+
+func (tw *syncWorker) Created() time.Time {
+ return tw.w.Created()
+}
+
+func (tw *syncWorker) State() internal.State {
+ return tw.w.State()
+}
+
+func (tw *syncWorker) Start() error {
+ return tw.w.Start()
+}
+
+func (tw *syncWorker) Wait() error {
+ return tw.w.Wait()
+}
+
+func (tw *syncWorker) Stop() error {
+ return tw.w.Stop()
+}
+
+func (tw *syncWorker) Kill() error {
+ return tw.w.Kill()
+}
+
+func (tw *syncWorker) Relay() relay.Relay {
+ return tw.w.Relay()
+}
+
+func (tw *syncWorker) AttachRelay(rl relay.Relay) {
+ tw.w.AttachRelay(rl)
+}
diff --git a/pkg/worker/sync_worker_test.go b/pkg/worker/sync_worker_test.go
new file mode 100755
index 00000000..40988b06
--- /dev/null
+++ b/pkg/worker/sync_worker_test.go
@@ -0,0 +1,37 @@
+package worker
+
+import (
+ "os/exec"
+ "testing"
+
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_NotStarted_String(t *testing.T) {
+ cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
+
+ w, _ := InitBaseWorker(cmd)
+ assert.Contains(t, w.String(), "php tests/client.php echo pipes")
+ assert.Contains(t, w.String(), "inactive")
+ assert.Contains(t, w.String(), "numExecs: 0")
+}
+
+func Test_NotStarted_Exec(t *testing.T) {
+ cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
+
+ w, _ := InitBaseWorker(cmd)
+
+ syncWorker, err := From(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ res, err := syncWorker.Exec(payload.Payload{Body: []byte("hello")})
+
+ assert.Error(t, err)
+ assert.Nil(t, res.Body)
+ assert.Nil(t, res.Context)
+
+ assert.Contains(t, err.Error(), "Process is not ready (inactive)")
+}
diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go
new file mode 100755
index 00000000..493882a8
--- /dev/null
+++ b/pkg/worker/worker.go
@@ -0,0 +1,315 @@
+package worker
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/goridge/v3/interfaces/relay"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+ eventsPkg "github.com/spiral/roadrunner/v2/pkg/events"
+ "go.uber.org/multierr"
+)
+
+const (
+ // WaitDuration - for how long error buffer should attempt to aggregate error messages
+ // before merging output together since lastError update (required to keep error update together).
+ WaitDuration = 25 * time.Millisecond
+
+ // ReadBufSize used to make a slice with specified length to read from stderr
+ ReadBufSize = 10240 // Kb
+)
+
+type Options func(p *Process)
+
+// Process - supervised process with api over goridge.Relay.
+type Process struct {
+ // created indicates at what time Process has been created.
+ created time.Time
+
+ // updates parent supervisor or pool about Process events
+ events events.Handler
+
+ // state holds information about current Process state,
+ // number of Process executions, buf status change time.
+ // publicly this object is receive-only and protected using Mutex
+ // and atomic counter.
+ state *internal.WorkerState
+
+ // underlying command with associated process, command must be
+ // provided to Process from outside in non-started form. CmdSource
+ // stdErr direction will be handled by Process to aggregate error message.
+ cmd *exec.Cmd
+
+ // pid of the process, points to pid of underlying process and
+ // can be nil while process is not started.
+ pid int
+
+ // stderr aggregates stderr output from underlying process. Value can be
+ // receive only once command is completed and all pipes are closed.
+ stderr *bytes.Buffer
+
+ // channel is being closed once command is complete.
+ // waitDone chan interface{}
+
+ // contains information about resulted process state.
+ endState *os.ProcessState
+
+ // ensures than only one execution can be run at once.
+ mu sync.RWMutex
+
+ // communication bus with underlying process.
+ relay relay.Relay
+ // rd in a second part of pipe to read from stderr
+ rd io.Reader
+ // stop signal terminates io.Pipe from reading from stderr
+ stop chan struct{}
+
+ syncPool sync.Pool
+}
+
+// InitBaseWorker creates new Process over given exec.cmd.
+func InitBaseWorker(cmd *exec.Cmd, options ...Options) (worker.BaseProcess, error) {
+ if cmd.Process != nil {
+ return nil, fmt.Errorf("can't attach to running process")
+ }
+ w := &Process{
+ created: time.Now(),
+ events: eventsPkg.NewEventsHandler(),
+ cmd: cmd,
+ state: internal.NewWorkerState(internal.StateInactive),
+ stderr: new(bytes.Buffer),
+ stop: make(chan struct{}, 1),
+ // sync pool for STDERR
+ // All receivers are pointers
+ syncPool: sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, ReadBufSize)
+ return &buf
+ },
+ },
+ }
+
+ w.rd, w.cmd.Stderr = io.Pipe()
+
+ // small buffer optimization
+ // at this point we know, that stderr will contain huge messages
+ w.stderr.Grow(ReadBufSize)
+
+ // add options
+ for i := 0; i < len(options); i++ {
+ options[i](w)
+ }
+
+ go func() {
+ w.watch()
+ }()
+
+ return w, nil
+}
+
+func AddListeners(listeners ...events.Listener) Options {
+ return func(p *Process) {
+ for i := 0; i < len(listeners); i++ {
+ p.addListener(listeners[i])
+ }
+ }
+}
+
+// Pid returns worker pid.
+func (w *Process) Pid() int64 {
+ return int64(w.pid)
+}
+
+// Created returns time worker was created at.
+func (w *Process) Created() time.Time {
+ return w.created
+}
+
+// AddListener registers new worker event listener.
+func (w *Process) addListener(listener events.Listener) {
+ w.events.AddListener(listener)
+}
+
+// State return receive-only Process state object, state can be used to safely access
+// Process status, time when status changed and number of Process executions.
+func (w *Process) State() internal.State {
+ return w.state
+}
+
+// State return receive-only Process state object, state can be used to safely access
+// Process status, time when status changed and number of Process executions.
+func (w *Process) AttachRelay(rl relay.Relay) {
+ w.relay = rl
+}
+
+// State return receive-only Process state object, state can be used to safely access
+// Process status, time when status changed and number of Process executions.
+func (w *Process) Relay() relay.Relay {
+ return w.relay
+}
+
+// String returns Process description. fmt.Stringer interface
+func (w *Process) String() string {
+ st := w.state.String()
+ // we can safely compare pid to 0
+ if w.pid != 0 {
+ st = st + ", pid:" + strconv.Itoa(w.pid)
+ }
+
+ return fmt.Sprintf(
+ "(`%s` [%s], numExecs: %v)",
+ strings.Join(w.cmd.Args, " "),
+ st,
+ w.state.NumExecs(),
+ )
+}
+
+func (w *Process) Start() error {
+ err := w.cmd.Start()
+ if err != nil {
+ return err
+ }
+ w.pid = w.cmd.Process.Pid
+ return nil
+}
+
+// Wait must be called once for each Process, call will be released once Process is
+// complete and will return process error (if any), if stderr is presented it's value
+// will be wrapped as WorkerError. Method will return error code if php process fails
+// to find or Start the script.
+func (w *Process) Wait() error {
+ const op = errors.Op("worker process wait")
+ err := multierr.Combine(w.cmd.Wait())
+
+ if w.State().Value() == internal.StateDestroyed {
+ return errors.E(op, err)
+ }
+
+ // at this point according to the documentation (see cmd.Wait comment)
+ // if worker finishes with an error, message will be written to the stderr first
+ // and then w.cmd.Wait return an error
+ w.endState = w.cmd.ProcessState
+ if err != nil {
+ w.state.Set(internal.StateErrored)
+
+ w.mu.RLock()
+ // if process return code > 0, here will be an error from stderr (if presents)
+ if w.stderr.Len() > 0 {
+ err = multierr.Append(err, errors.E(op, errors.Str(w.stderr.String())))
+ // stop the stderr buffer
+ w.stop <- struct{}{}
+ }
+ w.mu.RUnlock()
+
+ return multierr.Append(err, w.closeRelay())
+ }
+
+ err = multierr.Append(err, w.closeRelay())
+ if err != nil {
+ w.state.Set(internal.StateErrored)
+ return err
+ }
+
+ if w.endState.Success() {
+ w.state.Set(internal.StateStopped)
+ }
+
+ return nil
+}
+
+func (w *Process) closeRelay() error {
+ if w.relay != nil {
+ err := w.relay.Close()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Stop sends soft termination command to the Process and waits for process completion.
+func (w *Process) Stop() error {
+ var err error
+ w.state.Set(internal.StateStopping)
+ err = multierr.Append(err, internal.SendControl(w.relay, &internal.StopCommand{Stop: true}))
+ if err != nil {
+ w.state.Set(internal.StateKilling)
+ return multierr.Append(err, w.cmd.Process.Kill())
+ }
+ w.state.Set(internal.StateStopped)
+ return nil
+}
+
+// Kill kills underlying process, make sure to call Wait() func to gather
+// error log from the stderr. Does not waits for process completion!
+func (w *Process) Kill() error {
+ if w.State().Value() == internal.StateDestroyed {
+ err := w.cmd.Process.Signal(os.Kill)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ w.state.Set(internal.StateKilling)
+ err := w.cmd.Process.Signal(os.Kill)
+ if err != nil {
+ return err
+ }
+ w.state.Set(internal.StateStopped)
+ return nil
+}
+
+// put the pointer, to not allocate new slice
+// but erase it len and then return back
+func (w *Process) put(data *[]byte) {
+ w.syncPool.Put(data)
+}
+
+// get pointer to the byte slice
+func (w *Process) get() *[]byte {
+ return w.syncPool.Get().(*[]byte)
+}
+
+// Write appends the contents of pool to the errBuffer, growing the errBuffer as
+// needed. The return value n is the length of pool; errBuffer is always nil.
+func (w *Process) watch() {
+ go func() {
+ for {
+ select {
+ case <-w.stop:
+ buf := w.get()
+ // read the last data
+ n, _ := w.rd.Read(*buf)
+ w.events.Push(events.WorkerEvent{Event: events.EventWorkerLog, Worker: w, Payload: (*buf)[:n]})
+ w.mu.Lock()
+ // write new message
+ // we are sending only n read bytes, without sending previously written message as bytes slice from syncPool
+ w.stderr.Write((*buf)[:n])
+ w.mu.Unlock()
+ w.put(buf)
+ return
+ default:
+ // read the max 10kb of stderr per one read
+ buf := w.get()
+ n, _ := w.rd.Read(*buf)
+ w.events.Push(events.WorkerEvent{Event: events.EventWorkerLog, Worker: w, Payload: (*buf)[:n]})
+ w.mu.Lock()
+ // write new message
+ w.stderr.Write((*buf)[:n])
+ w.mu.Unlock()
+ w.put(buf)
+ }
+ }
+ }()
+}
diff --git a/pkg/worker/worker_test.go b/pkg/worker/worker_test.go
new file mode 100755
index 00000000..805f66b5
--- /dev/null
+++ b/pkg/worker/worker_test.go
@@ -0,0 +1,19 @@
+package worker
+
+import (
+ "os/exec"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_OnStarted(t *testing.T) {
+ cmd := exec.Command("php", "tests/client.php", "broken", "pipes")
+ assert.Nil(t, cmd.Start())
+
+ w, err := InitBaseWorker(cmd)
+ assert.Nil(t, w)
+ assert.NotNil(t, err)
+
+ assert.Equal(t, "can't attach to running process", err.Error())
+}
diff --git a/pkg/worker_watcher/worker_watcher.go b/pkg/worker_watcher/worker_watcher.go
new file mode 100755
index 00000000..127dc801
--- /dev/null
+++ b/pkg/worker_watcher/worker_watcher.go
@@ -0,0 +1,299 @@
+package worker_watcher //nolint:golint,stylecheck
+
+import (
+ "context"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/internal"
+)
+
+type Stack struct {
+ workers []worker.BaseProcess
+ mutex sync.RWMutex
+ destroy bool
+ actualNumOfWorkers int64
+}
+
+func NewWorkersStack() *Stack {
+ w := runtime.NumCPU()
+ return &Stack{
+ workers: make([]worker.BaseProcess, 0, w),
+ actualNumOfWorkers: 0,
+ }
+}
+
+func (stack *Stack) Reset() {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+ stack.actualNumOfWorkers = 0
+ stack.workers = nil
+}
+
+// Push worker back to the stack
+// If stack in destroy state, Push will provide 100ms window to unlock the mutex
+func (stack *Stack) Push(w worker.BaseProcess) {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+ stack.actualNumOfWorkers++
+ stack.workers = append(stack.workers, w)
+}
+
+func (stack *Stack) IsEmpty() bool {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+ return len(stack.workers) == 0
+}
+
+func (stack *Stack) Pop() (worker.BaseProcess, bool) {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+
+ // do not release new stack
+ if stack.destroy {
+ return nil, true
+ }
+
+ if len(stack.workers) == 0 {
+ return nil, false
+ }
+
+ // move worker
+ w := stack.workers[len(stack.workers)-1]
+ stack.workers = stack.workers[:len(stack.workers)-1]
+ stack.actualNumOfWorkers--
+ return w, false
+}
+
+func (stack *Stack) FindAndRemoveByPid(pid int64) bool {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+ for i := 0; i < len(stack.workers); i++ {
+ // worker in the stack, reallocating
+ if stack.workers[i].Pid() == pid {
+ stack.workers = append(stack.workers[:i], stack.workers[i+1:]...)
+ stack.actualNumOfWorkers--
+ // worker found and removed
+ return true
+ }
+ }
+ // no worker with such ID
+ return false
+}
+
+// Workers return copy of the workers in the stack
+func (stack *Stack) Workers() []worker.BaseProcess {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+ workersCopy := make([]worker.BaseProcess, 0, 1)
+ // copy
+ for _, v := range stack.workers {
+ workersCopy = append(workersCopy, v)
+ }
+
+ return workersCopy
+}
+
+func (stack *Stack) isDestroying() bool {
+ stack.mutex.Lock()
+ defer stack.mutex.Unlock()
+ return stack.destroy
+}
+
+// we also have to give a chance to pool to Push worker (return it)
+func (stack *Stack) Destroy(ctx context.Context) {
+ stack.mutex.Lock()
+ stack.destroy = true
+ stack.mutex.Unlock()
+
+ tt := time.NewTicker(time.Millisecond * 100)
+ for {
+ select {
+ case <-tt.C:
+ stack.mutex.Lock()
+ // that might be one of the workers is working
+ if len(stack.workers) != int(stack.actualNumOfWorkers) {
+ stack.mutex.Unlock()
+ continue
+ }
+ stack.mutex.Unlock()
+ // unnecessary mutex, but
+ // just to make sure. All stack at this moment are in the stack
+ // Pop operation is blocked, push can't be done, since it's not possible to pop
+ stack.mutex.Lock()
+ for i := 0; i < len(stack.workers); i++ {
+ // set state for the stack in the stack (unused at the moment)
+ stack.workers[i].State().Set(internal.StateDestroyed)
+ // kill the worker
+ _ = stack.workers[i].Kill()
+ }
+ stack.mutex.Unlock()
+ tt.Stop()
+ // clear
+ stack.Reset()
+ return
+ }
+ }
+}
+
+// workerCreateFunc can be nil, but in that case, dead stack will not be replaced
+func NewWorkerWatcher(allocator worker.Allocator, numWorkers int64, events events.Handler) worker.Watcher {
+ ww := &workerWatcher{
+ stack: NewWorkersStack(),
+ allocator: allocator,
+ initialNumWorkers: numWorkers,
+ actualNumWorkers: numWorkers,
+ events: events,
+ }
+
+ return ww
+}
+
+type workerWatcher struct {
+ mutex sync.RWMutex
+ stack *Stack
+ allocator worker.Allocator
+ initialNumWorkers int64
+ actualNumWorkers int64
+ events events.Handler
+}
+
+func (ww *workerWatcher) AddToWatch(workers []worker.BaseProcess) error {
+ for i := 0; i < len(workers); i++ {
+ ww.stack.Push(workers[i])
+
+ go func(swc worker.BaseProcess) {
+ ww.wait(swc)
+ }(workers[i])
+ }
+ return nil
+}
+
+func (ww *workerWatcher) GetFreeWorker(ctx context.Context) (worker.BaseProcess, error) {
+ const op = errors.Op("GetFreeWorker")
+ // thread safe operation
+ w, stop := ww.stack.Pop()
+ if stop {
+ return nil, errors.E(op, errors.WatcherStopped)
+ }
+
+ // handle worker remove state
+ // in this state worker is destroyed by supervisor
+ if w != nil && w.State().Value() == internal.StateRemove {
+ err := ww.RemoveWorker(w)
+ if err != nil {
+ return nil, err
+ }
+ // try to get next
+ return ww.GetFreeWorker(ctx)
+ }
+ // no free stack
+ if w == nil {
+ for {
+ select {
+ default:
+ w, stop = ww.stack.Pop()
+ if stop {
+ return nil, errors.E(op, errors.WatcherStopped)
+ }
+ if w == nil {
+ continue
+ }
+ return w, nil
+ case <-ctx.Done():
+ return nil, errors.E(op, errors.NoFreeWorkers, errors.Str("no free workers in the stack, timeout exceed"))
+ }
+ }
+ }
+
+ return w, nil
+}
+
+func (ww *workerWatcher) AllocateNew() error {
+ ww.stack.mutex.Lock()
+ const op = errors.Op("allocate new worker")
+ sw, err := ww.allocator()
+ if err != nil {
+ return errors.E(op, errors.WorkerAllocate, err)
+ }
+
+ ww.addToWatch(sw)
+ ww.stack.mutex.Unlock()
+ ww.PushWorker(sw)
+
+ return nil
+}
+
+func (ww *workerWatcher) RemoveWorker(wb worker.BaseProcess) error {
+ ww.mutex.Lock()
+ defer ww.mutex.Unlock()
+
+ const op = errors.Op("remove worker")
+ pid := wb.Pid()
+
+ if ww.stack.FindAndRemoveByPid(pid) {
+ wb.State().Set(internal.StateRemove)
+ err := wb.Kill()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+// O(1) operation
+func (ww *workerWatcher) PushWorker(w worker.BaseProcess) {
+ ww.mutex.Lock()
+ defer ww.mutex.Unlock()
+ ww.stack.Push(w)
+}
+
+// Destroy all underlying stack (but let them to complete the task)
+func (ww *workerWatcher) Destroy(ctx context.Context) {
+ // destroy stack, we don't use ww mutex here, since we should be able to push worker
+ ww.stack.Destroy(ctx)
+}
+
+// Warning, this is O(n) operation, and it will return copy of the actual workers
+func (ww *workerWatcher) WorkersList() []worker.BaseProcess {
+ return ww.stack.Workers()
+}
+
+func (ww *workerWatcher) wait(w worker.BaseProcess) {
+ const op = errors.Op("process wait")
+ err := w.Wait()
+ if err != nil {
+ ww.events.Push(events.WorkerEvent{
+ Event: events.EventWorkerError,
+ Worker: w,
+ Payload: errors.E(op, err),
+ })
+ }
+
+ if w.State().Value() == internal.StateDestroyed {
+ // worker was manually destroyed, no need to replace
+ ww.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w})
+ return
+ }
+
+ _ = ww.stack.FindAndRemoveByPid(w.Pid())
+ err = ww.AllocateNew()
+ if err != nil {
+ ww.events.Push(events.PoolEvent{
+ Event: events.EventPoolError,
+ Payload: errors.E(op, err),
+ })
+ }
+}
+
+func (ww *workerWatcher) addToWatch(wb worker.BaseProcess) {
+ go func() {
+ ww.wait(wb)
+ }()
+}
diff --git a/plugins/checker/config.go b/plugins/checker/config.go
new file mode 100644
index 00000000..5f952592
--- /dev/null
+++ b/plugins/checker/config.go
@@ -0,0 +1,5 @@
+package checker
+
+type Config struct {
+ Address string
+}
diff --git a/plugins/checker/interface.go b/plugins/checker/interface.go
new file mode 100644
index 00000000..dd9dcada
--- /dev/null
+++ b/plugins/checker/interface.go
@@ -0,0 +1,11 @@
+package checker
+
+// Status consists of status code from the service
+type Status struct {
+ Code int
+}
+
+// Checker interface used to get latest status from plugin
+type Checker interface {
+ Status() Status
+}
diff --git a/plugins/checker/plugin.go b/plugins/checker/plugin.go
new file mode 100644
index 00000000..95f4f68c
--- /dev/null
+++ b/plugins/checker/plugin.go
@@ -0,0 +1,151 @@
+package checker
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/gofiber/fiber/v2"
+ fiberLogger "github.com/gofiber/fiber/v2/middleware/logger"
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const (
+ // PluginName declares public plugin name.
+ PluginName = "status"
+)
+
+type Plugin struct {
+ registry map[string]Checker
+ server *fiber.App
+ log logger.Logger
+ cfg *Config
+}
+
+func (c *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
+ const op = errors.Op("status plugin init")
+ err := cfg.UnmarshalKey(PluginName, &c.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ if c.cfg == nil {
+ return errors.E(errors.Disabled)
+ }
+
+ c.registry = make(map[string]Checker)
+ c.log = log
+ return nil
+}
+
+func (c *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ c.server = fiber.New(fiber.Config{
+ ReadTimeout: time.Second * 5,
+ WriteTimeout: time.Second * 5,
+ IdleTimeout: time.Second * 5,
+ })
+ c.server.Group("/v1", c.healthHandler)
+ c.server.Use(fiberLogger.New())
+ c.server.Use("/health", c.healthHandler)
+
+ go func() {
+ err := c.server.Listen(c.cfg.Address)
+ if err != nil {
+ errCh <- err
+ }
+ }()
+
+ return errCh
+}
+
+func (c *Plugin) Stop() error {
+ const op = errors.Op("checker stop")
+ err := c.server.Shutdown()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+// Reset named service.
+func (c *Plugin) Status(name string) (Status, error) {
+ const op = errors.Op("get status")
+ svc, ok := c.registry[name]
+ if !ok {
+ return Status{}, errors.E(op, errors.Errorf("no such service: %s", name))
+ }
+
+ return svc.Status(), nil
+}
+
+// CollectTarget collecting services which can provide Status.
+func (c *Plugin) CollectTarget(name endure.Named, r Checker) error {
+ c.registry[name.Name()] = r
+ return nil
+}
+
+// Collects declares services to be collected.
+func (c *Plugin) Collects() []interface{} {
+ return []interface{}{
+ c.CollectTarget,
+ }
+}
+
+// Name of the service.
+func (c *Plugin) Name() string {
+ return PluginName
+}
+
+// RPCService returns associated rpc service.
+func (c *Plugin) RPC() interface{} {
+ return &rpc{srv: c, log: c.log}
+}
+
+type Plugins struct {
+ Plugins []string `query:"plugin"`
+}
+
+const template string = "Service: %s: Status: %d\n"
+
+func (c *Plugin) healthHandler(ctx *fiber.Ctx) error {
+ const op = errors.Op("health_handler")
+ plugins := &Plugins{}
+ err := ctx.QueryParser(plugins)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ if len(plugins.Plugins) == 0 {
+ ctx.Status(http.StatusOK)
+ _, _ = ctx.WriteString("No plugins provided in query. Query should be in form of: /v1/health?plugin=plugin1&plugin=plugin2 \n")
+ return nil
+ }
+
+ failed := false
+ // iterate over all provided plugins
+ for i := 0; i < len(plugins.Plugins); i++ {
+ // check if the plugin exists
+ if plugin, ok := c.registry[plugins.Plugins[i]]; ok {
+ st := plugin.Status()
+ if st.Code >= 500 {
+ failed = true
+ continue
+ } else if st.Code >= 100 && st.Code <= 400 {
+ _, _ = ctx.WriteString(fmt.Sprintf(template, plugins.Plugins[i], st.Code))
+ }
+ } else {
+ _, _ = ctx.WriteString(fmt.Sprintf("Service: %s not found", plugins.Plugins[i]))
+ }
+ }
+ if failed {
+ ctx.Status(http.StatusInternalServerError)
+ return nil
+ }
+
+ ctx.Status(http.StatusOK)
+ return nil
+}
diff --git a/plugins/checker/rpc.go b/plugins/checker/rpc.go
new file mode 100644
index 00000000..0daa62fe
--- /dev/null
+++ b/plugins/checker/rpc.go
@@ -0,0 +1,27 @@
+package checker
+
+import (
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+type rpc struct {
+ srv *Plugin
+ log logger.Logger
+}
+
+// Status return current status of the provided plugin
+func (rpc *rpc) Status(service string, status *Status) error {
+ const op = errors.Op("status")
+ rpc.log.Debug("started Status method", "service", service)
+ st, err := rpc.srv.Status(service)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ *status = st
+
+ rpc.log.Debug("status code", "code", st.Code)
+ rpc.log.Debug("successfully finished Status method")
+ return nil
+}
diff --git a/plugins/config/interface.go b/plugins/config/interface.go
new file mode 100644
index 00000000..23279f53
--- /dev/null
+++ b/plugins/config/interface.go
@@ -0,0 +1,26 @@
+package config
+
+type Configurer interface {
+ // // UnmarshalKey takes a single key and unmarshals it into a Struct.
+ //
+ // func (h *HttpService) Init(cp config.Configurer) error {
+ // h.config := &HttpConfig{}
+ // if err := configProvider.UnmarshalKey("http", h.config); err != nil {
+ // return err
+ // }
+ // }
+ UnmarshalKey(name string, out interface{}) error
+
+ // Unmarshal unmarshals the config into a Struct. Make sure that the tags
+ // on the fields of the structure are properly set.
+ Unmarshal(out interface{}) error
+
+ // Get used to get config section
+ Get(name string) interface{}
+
+ // Overwrite used to overwrite particular values in the unmarshalled config
+ Overwrite(values map[string]interface{}) error
+
+ // Has checks if config section exists.
+ Has(name string) bool
+}
diff --git a/plugins/config/plugin.go b/plugins/config/plugin.go
new file mode 100755
index 00000000..9cecf9f9
--- /dev/null
+++ b/plugins/config/plugin.go
@@ -0,0 +1,84 @@
+package config
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/spf13/viper"
+ "github.com/spiral/errors"
+)
+
+type Viper struct {
+ viper *viper.Viper
+ Path string
+ Prefix string
+ Type string
+ ReadInCfg []byte
+}
+
+// Inits config provider.
+func (v *Viper) Init() error {
+ const op = errors.Op("viper plugin init")
+ v.viper = viper.New()
+ // If user provided []byte data with config, read it and ignore Path and Prefix
+ if v.ReadInCfg != nil && v.Type != "" {
+ v.viper.SetConfigType("yaml")
+ return v.viper.ReadConfig(bytes.NewBuffer(v.ReadInCfg))
+ }
+
+ // read in environment variables that match
+ v.viper.AutomaticEnv()
+ if v.Prefix == "" {
+ return errors.E(op, errors.Str("prefix should be set"))
+ }
+
+ v.viper.SetEnvPrefix(v.Prefix)
+ if v.Path == "" {
+ return errors.E(op, errors.Str("path should be set"))
+ }
+
+ v.viper.SetConfigFile(v.Path)
+ v.viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
+
+ return v.viper.ReadInConfig()
+}
+
+// Overwrite overwrites existing config with provided values
+func (v *Viper) Overwrite(values map[string]interface{}) error {
+ if len(values) != 0 {
+ for key, value := range values {
+ v.viper.Set(key, value)
+ }
+ }
+
+ return nil
+}
+
+// UnmarshalKey reads configuration section into configuration object.
+func (v *Viper) UnmarshalKey(name string, out interface{}) error {
+ const op = errors.Op("unmarshal key")
+ err := v.viper.UnmarshalKey(name, &out)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+func (v *Viper) Unmarshal(out interface{}) error {
+ const op = errors.Op("config unmarshal")
+ err := v.viper.Unmarshal(&out)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+// Get raw config in a form of config section.
+func (v *Viper) Get(name string) interface{} {
+ return v.viper.Get(name)
+}
+
+// Has checks if config section exists.
+func (v *Viper) Has(name string) bool {
+ return v.viper.IsSet(name)
+}
diff --git a/plugins/doc/graphviz.svg b/plugins/doc/graphviz.svg
new file mode 100644
index 00000000..86f6ab5c
--- /dev/null
+++ b/plugins/doc/graphviz.svg
@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"><!-- Generated by graphviz version 2.40.1 (20161225.0304)
+ --><!-- Title: endure Pages: 1 --><svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="625pt" height="479pt" viewBox="0.00 0.00 624.94 478.79">
+<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 474.786)">
+<title>endure</title>
+<polygon fill="#ffffff" stroke="transparent" points="-4,4 -4,-474.786 620.9357,-474.786 620.9357,4 -4,4"/>
+<!-- checker -->
+<g id="node1" class="node">
+<title>checker</title>
+<ellipse fill="none" stroke="#000000" cx="412.2429" cy="-377.2862" rx="41.1103" ry="18"/>
+<text text-anchor="middle" x="412.2429" y="-373.0862" font-family="Times,serif" font-size="14.00" fill="#000000">checker</text>
+</g>
+<!-- config -->
+<g id="node2" class="node">
+<title>config</title>
+<ellipse fill="none" stroke="#000000" cx="463.8878" cy="-235.393" rx="35.9154" ry="18"/>
+<text text-anchor="middle" x="463.8878" y="-231.193" font-family="Times,serif" font-size="14.00" fill="#000000">config</text>
+</g>
+<!-- checker&#45;&gt;config -->
+<g id="edge1" class="edge">
+<title>checker-&gt;config</title>
+<path fill="none" stroke="#000000" d="M418.7837,-359.3154C427.6313,-335.0068 443.4953,-291.4209 453.8554,-262.9568"/>
+<polygon fill="#000000" stroke="#000000" points="457.2687,-263.812 457.4,-253.218 450.6908,-261.4178 457.2687,-263.812"/>
+</g>
+<!-- logger -->
+<g id="node3" class="node">
+<title>logger</title>
+<ellipse fill="none" stroke="#000000" cx="35.7071" cy="-310.8928" rx="35.9154" ry="18"/>
+<text text-anchor="middle" x="35.7071" y="-306.6928" font-family="Times,serif" font-size="14.00" fill="#000000">logger</text>
+</g>
+<!-- checker&#45;&gt;logger -->
+<g id="edge2" class="edge">
+<title>checker-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M374.0665,-370.5547C303.7112,-358.1492 154.0014,-331.7513 79.586,-318.6299"/>
+<polygon fill="#000000" stroke="#000000" points="80.0574,-315.1591 69.6015,-316.8693 78.8418,-322.0527 80.0574,-315.1591"/>
+</g>
+<!-- logger&#45;&gt;config -->
+<g id="edge4" class="edge">
+<title>logger-&gt;config</title>
+<path fill="none" stroke="#000000" d="M69.6636,-304.9054C146.6435,-291.3317 334.3698,-258.2305 420.0048,-243.1308"/>
+<polygon fill="#000000" stroke="#000000" points="420.6875,-246.5645 429.9277,-241.3811 419.4719,-239.6708 420.6875,-246.5645"/>
+</g>
+<!-- gzip -->
+<g id="node4" class="node">
+<title>gzip</title>
+<ellipse fill="none" stroke="#000000" cx="531.6651" cy="-102.393" rx="27.8286" ry="18"/>
+<text text-anchor="middle" x="531.6651" y="-98.193" font-family="Times,serif" font-size="14.00" fill="#000000">gzip</text>
+</g>
+<!-- headers -->
+<g id="node5" class="node">
+<title>headers</title>
+<ellipse fill="none" stroke="#000000" cx="576.4118" cy="-235.393" rx="40.548" ry="18"/>
+<text text-anchor="middle" x="576.4118" y="-231.193" font-family="Times,serif" font-size="14.00" fill="#000000">headers</text>
+</g>
+<!-- headers&#45;&gt;config -->
+<g id="edge3" class="edge">
+<title>headers-&gt;config</title>
+<path fill="none" stroke="#000000" d="M535.788,-235.393C527.3742,-235.393 518.4534,-235.393 509.8639,-235.393"/>
+<polygon fill="#000000" stroke="#000000" points="509.607,-231.8931 499.607,-235.393 509.607,-238.8931 509.607,-231.8931"/>
+</g>
+<!-- metrics -->
+<g id="node6" class="node">
+<title>metrics</title>
+<ellipse fill="none" stroke="#000000" cx="412.2429" cy="-93.4998" rx="39.4196" ry="18"/>
+<text text-anchor="middle" x="412.2429" y="-89.2998" font-family="Times,serif" font-size="14.00" fill="#000000">metrics</text>
+</g>
+<!-- metrics&#45;&gt;config -->
+<g id="edge6" class="edge">
+<title>metrics-&gt;config</title>
+<path fill="none" stroke="#000000" d="M418.7837,-111.4707C427.6313,-135.7792 443.4953,-179.3651 453.8554,-207.8292"/>
+<polygon fill="#000000" stroke="#000000" points="450.6908,-209.3682 457.4,-217.5681 457.2687,-206.974 450.6908,-209.3682"/>
+</g>
+<!-- metrics&#45;&gt;logger -->
+<g id="edge5" class="edge">
+<title>metrics-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M387.5373,-107.7636C321.7958,-145.7194 142.5487,-249.2078 68.4432,-291.9926"/>
+<polygon fill="#000000" stroke="#000000" points="66.4391,-289.1082 59.5289,-297.1393 69.9391,-295.1704 66.4391,-289.1082"/>
+</g>
+<!-- redis -->
+<g id="node7" class="node">
+<title>redis</title>
+<ellipse fill="none" stroke="#000000" cx="281.4734" cy="-18" rx="29.6127" ry="18"/>
+<text text-anchor="middle" x="281.4734" y="-13.8" font-family="Times,serif" font-size="14.00" fill="#000000">redis</text>
+</g>
+<!-- redis&#45;&gt;config -->
+<g id="edge8" class="edge">
+<title>redis-&gt;config</title>
+<path fill="none" stroke="#000000" d="M295.1841,-34.3398C326.9308,-72.174 405.6399,-165.9759 443.2445,-210.7914"/>
+<polygon fill="#000000" stroke="#000000" points="440.6581,-213.1541 449.7672,-218.5648 446.0204,-208.6545 440.6581,-213.1541"/>
+</g>
+<!-- redis&#45;&gt;logger -->
+<g id="edge7" class="edge">
+<title>redis-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M267.9098,-34.1644C227.1471,-82.7435 105.5381,-227.6715 56.5241,-286.0841"/>
+<polygon fill="#000000" stroke="#000000" points="53.5843,-284.1426 49.8376,-294.0528 58.9466,-288.6421 53.5843,-284.1426"/>
+</g>
+<!-- reload -->
+<g id="node8" class="node">
+<title>reload</title>
+<ellipse fill="none" stroke="#000000" cx="281.4734" cy="-452.786" rx="35.3315" ry="18"/>
+<text text-anchor="middle" x="281.4734" y="-448.586" font-family="Times,serif" font-size="14.00" fill="#000000">reload</text>
+</g>
+<!-- reload&#45;&gt;config -->
+<g id="edge10" class="edge">
+<title>reload-&gt;config</title>
+<path fill="none" stroke="#000000" d="M295.4842,-436.0885C327.4495,-397.9939 405.8819,-304.5217 443.3335,-259.8887"/>
+<polygon fill="#000000" stroke="#000000" points="446.0824,-262.0576 449.8292,-252.1474 440.7201,-257.5581 446.0824,-262.0576"/>
+</g>
+<!-- reload&#45;&gt;logger -->
+<g id="edge9" class="edge">
+<title>reload-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M257.9083,-439.1807C213.6848,-413.6483 118.2025,-358.5216 68.0211,-329.5493"/>
+<polygon fill="#000000" stroke="#000000" points="69.6111,-326.4259 59.2009,-324.457 66.1111,-332.4881 69.6111,-326.4259"/>
+</g>
+<!-- resetter -->
+<g id="node9" class="node">
+<title>resetter</title>
+<ellipse fill="none" stroke="#000000" cx="132.7678" cy="-426.5652" rx="39.3984" ry="18"/>
+<text text-anchor="middle" x="132.7678" y="-422.3652" font-family="Times,serif" font-size="14.00" fill="#000000">resetter</text>
+</g>
+<!-- reload&#45;&gt;resetter -->
+<g id="edge11" class="edge">
+<title>reload-&gt;resetter</title>
+<path fill="none" stroke="#000000" d="M248.1009,-446.9016C227.9026,-443.3401 201.8366,-438.7439 179.5962,-434.8224"/>
+<polygon fill="#000000" stroke="#000000" points="180.1376,-431.3639 169.6817,-433.0742 178.922,-438.2575 180.1376,-431.3639"/>
+</g>
+<!-- resetter&#45;&gt;logger -->
+<g id="edge12" class="edge">
+<title>resetter-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M118.4461,-409.4974C102.0084,-389.9077 74.9173,-357.6218 56.2379,-335.3605"/>
+<polygon fill="#000000" stroke="#000000" points="58.881,-333.0653 49.7719,-327.6546 53.5187,-337.5649 58.881,-333.0653"/>
+</g>
+<!-- rpc -->
+<g id="node10" class="node">
+<title>rpc</title>
+<ellipse fill="none" stroke="#000000" cx="132.7678" cy="-44.2208" rx="27" ry="18"/>
+<text text-anchor="middle" x="132.7678" y="-40.0208" font-family="Times,serif" font-size="14.00" fill="#000000">rpc</text>
+</g>
+<!-- rpc&#45;&gt;config -->
+<g id="edge13" class="edge">
+<title>rpc-&gt;config</title>
+<path fill="none" stroke="#000000" d="M153.4808,-56.1795C209.3277,-88.4227 363.359,-177.3527 431.1448,-216.4889"/>
+<polygon fill="#000000" stroke="#000000" points="429.7078,-219.7006 440.1181,-221.6696 433.2078,-213.6384 429.7078,-219.7006"/>
+</g>
+<!-- rpc&#45;&gt;logger -->
+<g id="edge14" class="edge">
+<title>rpc-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M126.3994,-61.7179C109.8827,-107.097 65.5725,-228.8383 45.6502,-283.5745"/>
+<polygon fill="#000000" stroke="#000000" points="42.3576,-282.3876 42.2262,-292.9816 48.9354,-284.7818 42.3576,-282.3876"/>
+</g>
+<!-- static -->
+<g id="node11" class="node">
+<title>static</title>
+<ellipse fill="none" stroke="#000000" cx="35.7071" cy="-159.8932" rx="31.3333" ry="18"/>
+<text text-anchor="middle" x="35.7071" y="-155.6932" font-family="Times,serif" font-size="14.00" fill="#000000">static</text>
+</g>
+<!-- static&#45;&gt;config -->
+<g id="edge15" class="edge">
+<title>static-&gt;config</title>
+<path fill="none" stroke="#000000" d="M65.8159,-165.2022C140.1736,-178.3135 332.7753,-212.2743 419.9157,-227.6396"/>
+<polygon fill="#000000" stroke="#000000" points="419.5489,-231.1288 430.0048,-229.4185 420.7645,-224.2351 419.5489,-231.1288"/>
+</g>
+<!-- static&#45;&gt;logger -->
+<g id="edge16" class="edge">
+<title>static-&gt;logger</title>
+<path fill="none" stroke="#000000" d="M35.7071,-178.1073C35.7071,-204.0691 35.7071,-251.9543 35.7071,-282.5696"/>
+<polygon fill="#000000" stroke="#000000" points="32.2072,-282.6141 35.7071,-292.6141 39.2072,-282.6142 32.2072,-282.6141"/>
+</g>
+</g>
+</svg> \ No newline at end of file
diff --git a/plugins/gzip/plugin.go b/plugins/gzip/plugin.go
new file mode 100644
index 00000000..e5b9e4f5
--- /dev/null
+++ b/plugins/gzip/plugin.go
@@ -0,0 +1,25 @@
+package gzip
+
+import (
+ "net/http"
+
+ "github.com/NYTimes/gziphandler"
+)
+
+const PluginName = "gzip"
+
+type Gzip struct{}
+
+func (g *Gzip) Init() error {
+ return nil
+}
+
+func (g *Gzip) Middleware(next http.Handler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ gziphandler.GzipHandler(next).ServeHTTP(w, r)
+ }
+}
+
+func (g *Gzip) Name() string {
+ return PluginName
+}
diff --git a/service/headers/config.go b/plugins/headers/config.go
index f9af1df2..8d4e29c2 100644
--- a/service/headers/config.go
+++ b/plugins/headers/config.go
@@ -1,17 +1,17 @@
package headers
-import "github.com/spiral/roadrunner/service"
-
// Config declares headers service configuration.
type Config struct {
- // CORS settings.
- CORS *CORSConfig
+ Headers struct {
+ // CORS settings.
+ CORS *CORSConfig
- // Request headers to add to every payload send to PHP.
- Request map[string]string
+ // Request headers to add to every payload send to PHP.
+ Request map[string]string
- // Response headers to add to every payload generated by PHP.
- Response map[string]string
+ // Response headers to add to every payload generated by PHP.
+ Response map[string]string
+ }
}
// CORSConfig headers configuration.
@@ -34,8 +34,3 @@ type CORSConfig struct {
// MaxAge of CORS headers in seconds/
MaxAge int
}
-
-// Hydrate service config.
-func (c *Config) Hydrate(cfg service.Config) error {
- return cfg.Unmarshal(c)
-}
diff --git a/plugins/headers/plugin.go b/plugins/headers/plugin.go
new file mode 100644
index 00000000..f1c6e6f3
--- /dev/null
+++ b/plugins/headers/plugin.go
@@ -0,0 +1,117 @@
+package headers
+
+import (
+ "net/http"
+ "strconv"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+// ID contains default service name.
+const PluginName = "headers"
+const RootPluginName = "http"
+
+// Service serves headers files. Potentially convert into middleware?
+type Plugin struct {
+ // server configuration (location, forbidden files and etc)
+ cfg *Config
+}
+
+// Init must return configure service and return true if service hasStatus enabled. Must return error in case of
+// misconfiguration. Services must not be used without proper configuration pushed first.
+func (s *Plugin) Init(cfg config.Configurer) error {
+ const op = errors.Op("headers plugin init")
+ err := cfg.UnmarshalKey(RootPluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ return nil
+}
+
+// middleware must return true if request/response pair is handled within the middleware.
+func (s *Plugin) Middleware(next http.Handler) http.HandlerFunc {
+ // Define the http.HandlerFunc
+ return func(w http.ResponseWriter, r *http.Request) {
+ if s.cfg.Headers.Request != nil {
+ for k, v := range s.cfg.Headers.Request {
+ r.Header.Add(k, v)
+ }
+ }
+
+ if s.cfg.Headers.Response != nil {
+ for k, v := range s.cfg.Headers.Response {
+ w.Header().Set(k, v)
+ }
+ }
+
+ if s.cfg.Headers.CORS != nil {
+ if r.Method == http.MethodOptions {
+ s.preflightRequest(w)
+ return
+ }
+ s.corsHeaders(w)
+ }
+
+ next.ServeHTTP(w, r)
+ }
+}
+
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// configure OPTIONS response
+func (s *Plugin) preflightRequest(w http.ResponseWriter) {
+ headers := w.Header()
+
+ headers.Add("Vary", "Origin")
+ headers.Add("Vary", "Access-Control-Request-Method")
+ headers.Add("Vary", "Access-Control-Request-Headers")
+
+ if s.cfg.Headers.CORS.AllowedOrigin != "" {
+ headers.Set("Access-Control-Allow-Origin", s.cfg.Headers.CORS.AllowedOrigin)
+ }
+
+ if s.cfg.Headers.CORS.AllowedHeaders != "" {
+ headers.Set("Access-Control-Allow-Headers", s.cfg.Headers.CORS.AllowedHeaders)
+ }
+
+ if s.cfg.Headers.CORS.AllowedMethods != "" {
+ headers.Set("Access-Control-Allow-Methods", s.cfg.Headers.CORS.AllowedMethods)
+ }
+
+ if s.cfg.Headers.CORS.AllowCredentials != nil {
+ headers.Set("Access-Control-Allow-Credentials", strconv.FormatBool(*s.cfg.Headers.CORS.AllowCredentials))
+ }
+
+ if s.cfg.Headers.CORS.MaxAge > 0 {
+ headers.Set("Access-Control-Max-Age", strconv.Itoa(s.cfg.Headers.CORS.MaxAge))
+ }
+
+ w.WriteHeader(http.StatusOK)
+}
+
+// configure CORS headers
+func (s *Plugin) corsHeaders(w http.ResponseWriter) {
+ headers := w.Header()
+
+ headers.Add("Vary", "Origin")
+
+ if s.cfg.Headers.CORS.AllowedOrigin != "" {
+ headers.Set("Access-Control-Allow-Origin", s.cfg.Headers.CORS.AllowedOrigin)
+ }
+
+ if s.cfg.Headers.CORS.AllowedHeaders != "" {
+ headers.Set("Access-Control-Allow-Headers", s.cfg.Headers.CORS.AllowedHeaders)
+ }
+
+ if s.cfg.Headers.CORS.ExposedHeaders != "" {
+ headers.Set("Access-Control-Expose-Headers", s.cfg.Headers.CORS.ExposedHeaders)
+ }
+
+ if s.cfg.Headers.CORS.AllowCredentials != nil {
+ headers.Set("Access-Control-Allow-Credentials", strconv.FormatBool(*s.cfg.Headers.CORS.AllowCredentials))
+ }
+}
diff --git a/service/http/attributes/attributes.go b/plugins/http/attributes/attributes.go
index 77d6ea69..4c453766 100644
--- a/service/http/attributes/attributes.go
+++ b/plugins/http/attributes/attributes.go
@@ -6,9 +6,18 @@ import (
"net/http"
)
-type attrKey int
+// contextKey is a value for use with context.WithValue. It's used as
+// a pointer so it fits in an interface{} without allocation.
+type contextKey struct {
+ name string
+}
+
+func (k *contextKey) String() string { return k.name }
-const contextKey attrKey = iota
+var (
+ // PsrContextKey is a context key. It can be used in the http attributes
+ PsrContextKey = &contextKey{"psr_attributes"}
+)
type attrs map[string]interface{}
@@ -30,12 +39,12 @@ func (v attrs) del(key string) {
// Init returns request with new context and attribute bag.
func Init(r *http.Request) *http.Request {
- return r.WithContext(context.WithValue(r.Context(), contextKey, attrs{}))
+ return r.WithContext(context.WithValue(r.Context(), PsrContextKey, attrs{}))
}
// All returns all context attributes.
func All(r *http.Request) map[string]interface{} {
- v := r.Context().Value(contextKey)
+ v := r.Context().Value(PsrContextKey)
if v == nil {
return attrs{}
}
@@ -46,7 +55,7 @@ func All(r *http.Request) map[string]interface{} {
// Get gets the value from request context. It replaces any existing
// values.
func Get(r *http.Request, key string) interface{} {
- v := r.Context().Value(contextKey)
+ v := r.Context().Value(PsrContextKey)
if v == nil {
return nil
}
@@ -57,7 +66,7 @@ func Get(r *http.Request, key string) interface{} {
// Set sets the key to value. It replaces any existing
// values. Context specific.
func Set(r *http.Request, key string, value interface{}) error {
- v := r.Context().Value(contextKey)
+ v := r.Context().Value(PsrContextKey)
if v == nil {
return errors.New("unable to find `psr:attributes` context key")
}
diff --git a/service/http/config.go b/plugins/http/config.go
index 34733e44..abde8917 100644
--- a/service/http/config.go
+++ b/plugins/http/config.go
@@ -1,28 +1,46 @@
package http
import (
- "errors"
- "fmt"
"net"
- "net/http"
"os"
+ "runtime"
"strings"
+ "time"
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
+ "github.com/spiral/errors"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
)
+// Cidrs is a slice of IPNet addresses
+type Cidrs []*net.IPNet
+
+// IsTrusted checks if the ip address exists in the provided in the config addresses
+func (c *Cidrs) IsTrusted(ip string) bool {
+ if len(*c) == 0 {
+ return false
+ }
+
+ i := net.ParseIP(ip)
+ if i == nil {
+ return false
+ }
+
+ for _, cird := range *c {
+ if cird.Contains(i) {
+ return true
+ }
+ }
+
+ return false
+}
+
// Config configures RoadRunner HTTP server.
type Config struct {
- // AppErrorCode is error code for the application errors (default 500)
- AppErrorCode uint64
- // Error code for the RR pool or worker errors
- InternalErrorCode uint64
// Port and port to handle as http server.
Address string
// SSL defines https server options.
- SSL SSLConfig
+ SSL *SSLConfig
// FCGI configuration. You can use FastCGI without HTTP server.
FCGI *FCGIConfig
@@ -31,17 +49,25 @@ type Config struct {
HTTP2 *HTTP2Config
// MaxRequestSize specified max size for payload body in megabytes, set 0 to unlimited.
- MaxRequestSize int64
+ MaxRequestSize uint64 `yaml:"max_request_size"`
// TrustedSubnets declare IP subnets which are allowed to set ip using X-Real-Ip and X-Forwarded-For
- TrustedSubnets []string
- cidrs []*net.IPNet
+ TrustedSubnets []string `yaml:"trusted_subnets"`
// Uploads configures uploads configuration.
Uploads *UploadsConfig
- // Workers configures rr server and worker pool.
- Workers *roadrunner.ServerConfig
+ // Pool configures worker pool.
+ Pool *poolImpl.Config
+
+ // Env is environment variables passed to the http pool
+ Env map[string]string
+
+ // List of the middleware names (order will be preserved)
+ Middleware []string
+
+ // slice of net.IPNet
+ cidrs Cidrs
}
// FCGIConfig for FastCGI server.
@@ -59,13 +85,14 @@ type HTTP2Config struct {
H2C bool
// MaxConcurrentStreams defaults to 128.
- MaxConcurrentStreams uint32
+ MaxConcurrentStreams uint32 `yaml:"max_concurrent_streams"`
}
// InitDefaults sets default values for HTTP/2 configuration.
func (cfg *HTTP2Config) InitDefaults() error {
cfg.Enabled = true
cfg.MaxConcurrentStreams = 128
+
return nil
}
@@ -92,7 +119,7 @@ func (c *Config) EnableHTTP() bool {
return c.Address != ""
}
-// EnableTLS returns true if rr must listen TLS connections.
+// EnableTLS returns true if pool must listen TLS connections.
func (c *Config) EnableTLS() bool {
return c.SSL.Key != "" || c.SSL.Cert != "" || c.SSL.RootCA != ""
}
@@ -112,18 +139,18 @@ func (c *Config) EnableFCGI() bool {
return c.FCGI.Address != ""
}
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- if c.AppErrorCode == 0 {
- // set default behaviour - 500 error code
- c.AppErrorCode = http.StatusInternalServerError
- }
- if c.InternalErrorCode == 0 {
- // set default behaviour - 500 error code
- c.InternalErrorCode = http.StatusInternalServerError
- }
- if c.Workers == nil {
- c.Workers = &roadrunner.ServerConfig{}
+// InitDefaults must populate Config values using given Config source. Must return error if Config is not valid.
+func (c *Config) InitDefaults() error {
+ if c.Pool == nil {
+ // default pool
+ c.Pool = &poolImpl.Config{
+ Debug: false,
+ NumWorkers: int64(runtime.NumCPU()),
+ MaxJobs: 1000,
+ AllocateTimeout: time.Second * 60,
+ DestroyTimeout: time.Second * 60,
+ Supervisor: nil,
+ }
}
if c.HTTP2 == nil {
@@ -138,6 +165,10 @@ func (c *Config) Hydrate(cfg service.Config) error {
c.Uploads = &UploadsConfig{}
}
+ if c.SSL == nil {
+ c.SSL = &SSLConfig{}
+ }
+
if c.SSL.Port == 0 {
c.SSL.Port = 443
}
@@ -150,16 +181,6 @@ func (c *Config) Hydrate(cfg service.Config) error {
if err != nil {
return err
}
- err = c.Workers.InitDefaults()
- if err != nil {
- return err
- }
-
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
-
- c.Workers.UpscaleDurations()
if c.TrustedSubnets == nil {
// @see https://en.wikipedia.org/wiki/Reserved_IP_addresses
@@ -174,24 +195,28 @@ func (c *Config) Hydrate(cfg service.Config) error {
}
}
- if err := c.parseCIDRs(); err != nil {
+ cidrs, err := ParseCIDRs(c.TrustedSubnets)
+ if err != nil {
return err
}
+ c.cidrs = cidrs
return c.Valid()
}
-func (c *Config) parseCIDRs() error {
- for _, cidr := range c.TrustedSubnets {
+// ParseCIDRs parse IPNet addresses and return slice of its
+func ParseCIDRs(subnets []string) (Cidrs, error) {
+ c := make(Cidrs, 0, len(subnets))
+ for _, cidr := range subnets {
_, cr, err := net.ParseCIDR(cidr)
if err != nil {
- return err
+ return nil, err
}
- c.cidrs = append(c.cidrs, cr)
+ c = append(c, cr)
}
- return nil
+ return c, nil
}
// IsTrusted if api can be trusted to use X-Real-Ip, X-Forwarded-For
@@ -216,38 +241,31 @@ func (c *Config) IsTrusted(ip string) bool {
// Valid validates the configuration.
func (c *Config) Valid() error {
+ const op = errors.Op("validation")
if c.Uploads == nil {
- return errors.New("malformed uploads config")
+ return errors.E(op, errors.Str("malformed uploads config"))
}
if c.HTTP2 == nil {
- return errors.New("malformed http2 config")
+ return errors.E(op, errors.Str("malformed http2 config"))
}
- if c.Workers == nil {
- return errors.New("malformed workers config")
- }
-
- if c.Workers.Pool == nil {
- return errors.New("malformed workers config (pool config is missing)")
- }
-
- if err := c.Workers.Pool.Valid(); err != nil {
- return err
+ if c.Pool == nil {
+ return errors.E(op, "malformed pool config")
}
if !c.EnableHTTP() && !c.EnableTLS() && !c.EnableFCGI() {
- return errors.New("unable to run http service, no method has been specified (http, https, http/2 or FastCGI)")
+ return errors.E(op, errors.Str("unable to run http service, no method has been specified (http, https, http/2 or FastCGI)"))
}
if c.Address != "" && !strings.Contains(c.Address, ":") {
- return errors.New("malformed http server address")
+ return errors.E(op, errors.Str("malformed http server address"))
}
if c.EnableTLS() {
if _, err := os.Stat(c.SSL.Key); err != nil {
if os.IsNotExist(err) {
- return fmt.Errorf("key file '%s' does not exists", c.SSL.Key)
+ return errors.E(op, errors.Errorf("key file '%s' does not exists", c.SSL.Key))
}
return err
@@ -255,7 +273,7 @@ func (c *Config) Valid() error {
if _, err := os.Stat(c.SSL.Cert); err != nil {
if os.IsNotExist(err) {
- return fmt.Errorf("cert file '%s' does not exists", c.SSL.Cert)
+ return errors.E(op, errors.Errorf("cert file '%s' does not exists", c.SSL.Cert))
}
return err
@@ -265,7 +283,7 @@ func (c *Config) Valid() error {
if c.SSL.RootCA != "" {
if _, err := os.Stat(c.SSL.RootCA); err != nil {
if os.IsNotExist(err) {
- return fmt.Errorf("root ca path provided, but path '%s' does not exists", c.SSL.RootCA)
+ return errors.E(op, errors.Errorf("root ca path provided, but path '%s' does not exists", c.SSL.RootCA))
}
return err
}
diff --git a/service/http/constants.go b/plugins/http/constants.go
index a25f52a4..c3d5c589 100644
--- a/service/http/constants.go
+++ b/plugins/http/constants.go
@@ -3,4 +3,6 @@ package http
import "net/http"
var http2pushHeaderKey = http.CanonicalHeaderKey("http2-push")
-var trailerHeaderKey = http.CanonicalHeaderKey("trailer")
+
+// TrailerHeaderKey http header key
+var TrailerHeaderKey = http.CanonicalHeaderKey("trailer")
diff --git a/service/http/errors.go b/plugins/http/errors.go
index fb8762ef..fb8762ef 100644
--- a/service/http/errors.go
+++ b/plugins/http/errors.go
diff --git a/service/http/errors_windows.go b/plugins/http/errors_windows.go
index 3d0ba04c..3d0ba04c 100644
--- a/service/http/errors_windows.go
+++ b/plugins/http/errors_windows.go
diff --git a/service/http/handler.go b/plugins/http/handler.go
index 43f894d7..9c40cdfc 100644
--- a/service/http/handler.go
+++ b/plugins/http/handler.go
@@ -1,7 +1,6 @@
package http
import (
- "fmt"
"net"
"net/http"
"strconv"
@@ -9,9 +8,11 @@ import (
"sync"
"time"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
- "github.com/spiral/roadrunner"
+ "github.com/hashicorp/go-multierror"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
)
const (
@@ -22,6 +23,9 @@ const (
EventError
)
+// MB is 1024 bytes
+const MB = 1024 * 1024
+
// ErrorEvent represents singular http error event.
type ErrorEvent struct {
// Request contains client request, must not be stored.
@@ -61,17 +65,30 @@ func (e *ResponseEvent) Elapsed() time.Duration {
// Handler serves http connections to underlying PHP application using PSR-7 protocol. Context will include request headers,
// parsed files and query, payload will include parsed form dataTree (if any).
type Handler struct {
- cfg *Config
- log *logrus.Logger
- rr *roadrunner.Server
- mul sync.Mutex
- lsn func(event int, ctx interface{})
- internalErrorCode uint64
- appErrorCode uint64
+ maxRequestSize uint64
+ uploads UploadsConfig
+ trusted Cidrs
+ log logger.Logger
+ pool pool.Pool
+ mul sync.Mutex
+ lsn events.Listener
}
-// Listen attaches handler event controller.
-func (h *Handler) Listen(l func(event int, ctx interface{})) {
+// NewHandler return handle interface implementation
+func NewHandler(maxReqSize uint64, uploads UploadsConfig, trusted Cidrs, pool pool.Pool) (*Handler, error) {
+ if pool == nil {
+ return nil, errors.E(errors.Str("pool should be initialized"))
+ }
+ return &Handler{
+ maxRequestSize: maxReqSize * MB,
+ uploads: uploads,
+ pool: pool,
+ trusted: trusted,
+ }, nil
+}
+
+// AddListener attaches handler event controller.
+func (h *Handler) AddListener(l events.Listener) {
h.mul.Lock()
defer h.mul.Unlock()
@@ -80,22 +97,18 @@ func (h *Handler) Listen(l func(event int, ctx interface{})) {
// mdwr serve using PSR-7 requests passed to underlying application. Attempts to serve static files first if enabled.
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ const op = errors.Op("ServeHTTP")
start := time.Now()
// validating request size
- if h.cfg.MaxRequestSize != 0 {
- if length := r.Header.Get("content-length"); length != "" {
- if size, err := strconv.ParseInt(length, 10, 64); err != nil {
- h.handleError(w, r, err, start)
- return
- } else if size > h.cfg.MaxRequestSize*1024*1024 {
- h.handleError(w, r, errors.New("request body max size is exceeded"), start)
- return
- }
+ if h.maxRequestSize != 0 {
+ err := h.maxSize(w, r, start, op)
+ if err != nil {
+ return
}
}
- req, err := NewRequest(r, h.cfg.Uploads)
+ req, err := NewRequest(r, h.uploads)
if err != nil {
h.handleError(w, r, err, start)
return
@@ -113,7 +126,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
- rsp, err := h.rr.Exec(p)
+ rsp, err := h.pool.Exec(p)
if err != nil {
h.handleError(w, r, err, start)
return
@@ -132,60 +145,58 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
+func (h *Handler) maxSize(w http.ResponseWriter, r *http.Request, start time.Time, op errors.Op) error {
+ if length := r.Header.Get("content-length"); length != "" {
+ if size, err := strconv.ParseInt(length, 10, 64); err != nil {
+ h.handleError(w, r, err, start)
+ return err
+ } else if size > int64(h.maxRequestSize) {
+ h.handleError(w, r, errors.E(op, errors.Str("request body max size is exceeded")), start)
+ return err
+ }
+ }
+ return nil
+}
+
// handleError sends error.
-/*
-handleError distinct RR errors and App errors
-You can set return distinct error codes for the App and for the RR
-*/
func (h *Handler) handleError(w http.ResponseWriter, r *http.Request, err error, start time.Time) {
+ h.mul.Lock()
+ defer h.mul.Unlock()
// if pipe is broken, there is no sense to write the header
// in this case we just report about error
if err == errEPIPE {
- h.throw(EventError, &ErrorEvent{Request: r, Error: err, start: start, elapsed: time.Since(start)})
+ h.throw(ErrorEvent{Request: r, Error: err, start: start, elapsed: time.Since(start)})
return
}
- if errors.Is(err, roadrunner.ErrNoAssociatedPool) ||
- errors.Is(err, roadrunner.ErrAllocateWorker) ||
- errors.Is(err, roadrunner.ErrWorkerNotReady) ||
- errors.Is(err, roadrunner.ErrEmptyPayload) ||
- errors.Is(err, roadrunner.ErrPoolStopped) ||
- errors.Is(err, roadrunner.ErrWorkerAllocateTimeout) ||
- errors.Is(err, roadrunner.ErrAllWorkersAreDead) {
- // for the RR errors, write custom error code
- w.WriteHeader(int(h.internalErrorCode))
- } else {
- // ResponseWriter is ok, write the error code
- w.WriteHeader(int(h.appErrorCode))
- }
-
+ err = multierror.Append(err)
+ // ResponseWriter is ok, write the error code
+ w.WriteHeader(500)
_, err2 := w.Write([]byte(err.Error()))
// error during the writing to the ResponseWriter
if err2 != nil {
+ err = multierror.Append(err2, err)
// concat original error with ResponseWriter error
- h.throw(EventError, &ErrorEvent{Request: r, Error: errors.New(fmt.Sprintf("error: %v, during handle this error, ResponseWriter error occurred: %v", err, err2)), start: start, elapsed: time.Since(start)})
+ h.throw(ErrorEvent{Request: r, Error: errors.E(err), start: start, elapsed: time.Since(start)})
return
}
- h.throw(EventError, &ErrorEvent{Request: r, Error: err, start: start, elapsed: time.Since(start)})
+ h.throw(ErrorEvent{Request: r, Error: err, start: start, elapsed: time.Since(start)})
}
// handleResponse triggers response event.
func (h *Handler) handleResponse(req *Request, resp *Response, start time.Time) {
- h.throw(EventResponse, &ResponseEvent{Request: req, Response: resp, start: start, elapsed: time.Since(start)})
+ h.throw(ResponseEvent{Request: req, Response: resp, start: start, elapsed: time.Since(start)})
}
// throw invokes event handler if any.
-func (h *Handler) throw(event int, ctx interface{}) {
- h.mul.Lock()
- defer h.mul.Unlock()
-
+func (h *Handler) throw(event interface{}) {
if h.lsn != nil {
- h.lsn(event, ctx)
+ h.lsn(event)
}
}
// get real ip passing multiple proxy
func (h *Handler) resolveIP(r *Request) {
- if !h.cfg.IsTrusted(r.RemoteAddr) {
+ if h.trusted.IsTrusted(r.RemoteAddr) == false {
return
}
@@ -205,7 +216,7 @@ func (h *Handler) resolveIP(r *Request) {
}
// The logic here is the following:
- // In general case, we only expect X-Real-Ip header. If it exist, we get the IP addres from header and set request Remote address
+ // In general case, we only expect X-Real-Ip header. If it exist, we get the IP address from header and set request Remote address
// But, if there is no X-Real-Ip header, we also trying to check CloudFlare headers
// True-Client-IP is a general CF header in which copied information from X-Real-Ip in CF.
// CF-Connecting-IP is an Enterprise feature and we check it last in order.
diff --git a/service/http/parse.go b/plugins/http/parse.go
index 9b58d328..d4a1604b 100644
--- a/service/http/parse.go
+++ b/plugins/http/parse.go
@@ -30,7 +30,7 @@ func parseData(r *http.Request) dataTree {
// pushes value into data tree.
func (d dataTree) push(k string, v []string) {
- keys := fetchIndexes(k)
+ keys := FetchIndexes(k)
if len(keys) <= MaxLevel {
d.mount(keys, v)
}
@@ -60,7 +60,7 @@ func (d dataTree) mount(i []string, v []string) {
}
// parse incoming dataTree request into JSON (including contentMultipart form dataTree)
-func parseUploads(r *http.Request, cfg *UploadsConfig) *Uploads {
+func parseUploads(r *http.Request, cfg UploadsConfig) *Uploads {
u := &Uploads{
cfg: cfg,
tree: make(fileTree),
@@ -82,7 +82,7 @@ func parseUploads(r *http.Request, cfg *UploadsConfig) *Uploads {
// pushes new file upload into it's proper place.
func (d fileTree) push(k string, v []*FileUpload) {
- keys := fetchIndexes(k)
+ keys := FetchIndexes(k)
if len(keys) <= MaxLevel {
d.mount(keys, v)
}
@@ -111,8 +111,8 @@ func (d fileTree) mount(i []string, v []*FileUpload) {
d[i[0]].(fileTree).mount(i[1:], v)
}
-// fetchIndexes parses input name and splits it into separate indexes list.
-func fetchIndexes(s string) []string {
+// FetchIndexes parses input name and splits it into separate indexes list.
+func FetchIndexes(s string) []string {
var (
pos int
ch string
diff --git a/plugins/http/plugin.go b/plugins/http/plugin.go
new file mode 100644
index 00000000..e6aba78b
--- /dev/null
+++ b/plugins/http/plugin.go
@@ -0,0 +1,532 @@
+package http
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/fcgi"
+ "net/url"
+ "strings"
+ "sync"
+
+ "github.com/hashicorp/go-multierror"
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ "github.com/spiral/roadrunner/v2/plugins/checker"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/http/attributes"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/spiral/roadrunner/v2/utils"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/h2c"
+ "golang.org/x/sys/cpu"
+)
+
+const (
+ // PluginName declares plugin name.
+ PluginName = "http"
+
+ // EventInitSSL thrown at moment of https initialization. SSL server passed as context.
+ EventInitSSL = 750
+)
+
+// Middleware interface
+type Middleware interface {
+ Middleware(f http.Handler) http.HandlerFunc
+}
+
+type middleware map[string]Middleware
+
+// Plugin manages pool, http servers. The main http plugin structure
+type Plugin struct {
+ sync.RWMutex
+
+ configurer config.Configurer
+ server server.Server
+ log logger.Logger
+
+ cfg *Config
+ // middlewares to chain
+ mdwr middleware
+
+ // Pool which attached to all servers
+ pool pool.Pool
+
+ // servers RR handler
+ handler *Handler
+
+ // servers
+ http *http.Server
+ https *http.Server
+ fcgi *http.Server
+}
+
+// Init must return configure svc and return true if svc hasStatus enabled. Must return error in case of
+// misconfiguration. Services must not be used without proper configuration pushed first.
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger, server server.Server) error {
+ const op = errors.Op("http Init")
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ err = s.cfg.InitDefaults()
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.configurer = cfg
+ s.log = log
+ s.mdwr = make(map[string]Middleware)
+
+ if !s.cfg.EnableHTTP() && !s.cfg.EnableTLS() && !s.cfg.EnableFCGI() {
+ return errors.E(op, errors.Disabled)
+ }
+
+ s.pool, err = server.NewWorkerPool(context.Background(), poolImpl.Config{
+ Debug: s.cfg.Pool.Debug,
+ NumWorkers: s.cfg.Pool.NumWorkers,
+ MaxJobs: s.cfg.Pool.MaxJobs,
+ AllocateTimeout: s.cfg.Pool.AllocateTimeout,
+ DestroyTimeout: s.cfg.Pool.DestroyTimeout,
+ Supervisor: s.cfg.Pool.Supervisor,
+ }, s.cfg.Env, s.logCallback)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.server = server
+
+ return nil
+}
+
+func (s *Plugin) logCallback(event interface{}) {
+ if ev, ok := event.(ResponseEvent); ok {
+ s.log.Debug("http handler response received", "elapsed", ev.Elapsed().String(), "remote address", ev.Request.RemoteAddr)
+ }
+}
+
+// Serve serves the svc.
+func (s *Plugin) Serve() chan error {
+ s.Lock()
+ defer s.Unlock()
+
+ const op = errors.Op("serve http")
+ errCh := make(chan error, 2)
+
+ var err error
+ s.handler, err = NewHandler(
+ s.cfg.MaxRequestSize,
+ *s.cfg.Uploads,
+ s.cfg.cidrs,
+ s.pool,
+ )
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+
+ s.handler.AddListener(s.logCallback)
+
+ if s.cfg.EnableHTTP() {
+ if s.cfg.EnableH2C() {
+ s.http = &http.Server{Addr: s.cfg.Address, Handler: h2c.NewHandler(s, &http2.Server{})}
+ } else {
+ s.http = &http.Server{Addr: s.cfg.Address, Handler: s}
+ }
+ }
+
+ if s.cfg.EnableTLS() {
+ s.https = s.initSSL()
+ if s.cfg.SSL.RootCA != "" {
+ err = s.appendRootCa()
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+ }
+
+ if s.cfg.EnableHTTP2() {
+ if err := s.initHTTP2(); err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+ }
+ }
+
+ if s.cfg.EnableFCGI() {
+ s.fcgi = &http.Server{Handler: s}
+ }
+
+ // apply middlewares before starting the server
+ if len(s.mdwr) > 0 {
+ s.addMiddlewares()
+ }
+
+ if s.http != nil {
+ go func() {
+ httpErr := s.http.ListenAndServe()
+ if httpErr != nil && httpErr != http.ErrServerClosed {
+ errCh <- errors.E(op, httpErr)
+ return
+ }
+ }()
+ }
+
+ if s.https != nil {
+ go func() {
+ httpErr := s.https.ListenAndServeTLS(
+ s.cfg.SSL.Cert,
+ s.cfg.SSL.Key,
+ )
+
+ if httpErr != nil && httpErr != http.ErrServerClosed {
+ errCh <- errors.E(op, httpErr)
+ return
+ }
+ }()
+ }
+
+ if s.fcgi != nil {
+ go func() {
+ httpErr := s.serveFCGI()
+ if httpErr != nil && httpErr != http.ErrServerClosed {
+ errCh <- errors.E(op, httpErr)
+ return
+ }
+ }()
+ }
+
+ return errCh
+}
+
+// Stop stops the http.
+func (s *Plugin) Stop() error {
+ s.Lock()
+ defer s.Unlock()
+
+ var err error
+ if s.fcgi != nil {
+ err = s.fcgi.Shutdown(context.Background())
+ if err != nil && err != http.ErrServerClosed {
+ s.log.Error("error shutting down the fcgi server", "error", err)
+ // write error and try to stop other transport
+ err = multierror.Append(err)
+ }
+ }
+
+ if s.https != nil {
+ err = s.https.Shutdown(context.Background())
+ if err != nil && err != http.ErrServerClosed {
+ s.log.Error("error shutting down the https server", "error", err)
+ // write error and try to stop other transport
+ err = multierror.Append(err)
+ }
+ }
+
+ if s.http != nil {
+ err = s.http.Shutdown(context.Background())
+ if err != nil && err != http.ErrServerClosed {
+ s.log.Error("error shutting down the http server", "error", err)
+ // write error and try to stop other transport
+ err = multierror.Append(err)
+ }
+ }
+
+ s.pool.Destroy(context.Background())
+
+ return err
+}
+
+// ServeHTTP handles connection using set of middleware and pool PSR-7 server.
+func (s *Plugin) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if headerContainsUpgrade(r, s) {
+ http.Error(w, "server does not support upgrade header", http.StatusInternalServerError)
+ return
+ }
+
+ if s.redirect(w, r) {
+ return
+ }
+
+ if s.https != nil && r.TLS != nil {
+ w.Header().Add("Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload")
+ }
+
+ r = attributes.Init(r)
+ // protect the case, when user send Reset and we are replacing handler with pool
+ s.RLock()
+ s.handler.ServeHTTP(w, r)
+ s.RUnlock()
+}
+
+// Workers returns associated pool workers
+func (s *Plugin) Workers() []worker.BaseProcess {
+ return s.pool.Workers()
+}
+
+// Name returns endure.Named interface implementation
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// Reset destroys the old pool and replaces it with new one, waiting for old pool to die
+func (s *Plugin) Reset() error {
+ s.Lock()
+ defer s.Unlock()
+ const op = errors.Op("http reset")
+ s.log.Info("HTTP plugin got restart request. Restarting...")
+ s.pool.Destroy(context.Background())
+ s.pool = nil
+
+ // re-read the config
+ err := s.configurer.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.pool, err = s.server.NewWorkerPool(context.Background(), poolImpl.Config{
+ Debug: s.cfg.Pool.Debug,
+ NumWorkers: s.cfg.Pool.NumWorkers,
+ MaxJobs: s.cfg.Pool.MaxJobs,
+ AllocateTimeout: s.cfg.Pool.AllocateTimeout,
+ DestroyTimeout: s.cfg.Pool.DestroyTimeout,
+ Supervisor: s.cfg.Pool.Supervisor,
+ }, s.cfg.Env, s.logCallback)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.log.Info("HTTP listeners successfully re-added")
+
+ s.log.Info("HTTP workers Pool successfully restarted")
+ s.handler, err = NewHandler(
+ s.cfg.MaxRequestSize,
+ *s.cfg.Uploads,
+ s.cfg.cidrs,
+ s.pool,
+ )
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.log.Info("HTTP plugin successfully restarted")
+ return nil
+}
+
+// Collects collecting http middlewares
+func (s *Plugin) Collects() []interface{} {
+ return []interface{}{
+ s.AddMiddleware,
+ }
+}
+
+// AddMiddleware is base requirement for the middleware (name and Middleware)
+func (s *Plugin) AddMiddleware(name endure.Named, m Middleware) {
+ s.mdwr[name.Name()] = m
+}
+
+// Status return status of the particular plugin
+func (s *Plugin) Status() checker.Status {
+ workers := s.Workers()
+ for i := 0; i < len(workers); i++ {
+ if workers[i].State().IsActive() {
+ return checker.Status{
+ Code: http.StatusOK,
+ }
+ }
+ }
+ // if there are no workers, threat this as error
+ return checker.Status{
+ Code: http.StatusInternalServerError,
+ }
+}
+
+func (s *Plugin) redirect(w http.ResponseWriter, r *http.Request) bool {
+ if s.https != nil && r.TLS == nil && s.cfg.SSL.Redirect {
+ target := &url.URL{
+ Scheme: "https",
+ Host: s.tlsAddr(r.Host, false),
+ Path: r.URL.Path,
+ RawQuery: r.URL.RawQuery,
+ }
+
+ http.Redirect(w, r, target.String(), http.StatusTemporaryRedirect)
+ return true
+ }
+ return false
+}
+
+func headerContainsUpgrade(r *http.Request, s *Plugin) bool {
+ if _, ok := r.Header["Upgrade"]; ok {
+ // https://golang.org/pkg/net/http/#Hijacker
+ s.log.Error("server does not support Upgrade header")
+ return true
+ }
+ return false
+}
+
+// append RootCA to the https server TLS config
+func (s *Plugin) appendRootCa() error {
+ const op = errors.Op("append root CA")
+ rootCAs, err := x509.SystemCertPool()
+ if err != nil {
+ return nil
+ }
+ if rootCAs == nil {
+ rootCAs = x509.NewCertPool()
+ }
+
+ CA, err := ioutil.ReadFile(s.cfg.SSL.RootCA)
+ if err != nil {
+ return err
+ }
+
+ // should append our CA cert
+ ok := rootCAs.AppendCertsFromPEM(CA)
+ if !ok {
+ return errors.E(op, errors.Str("could not append Certs from PEM"))
+ }
+ // disable "G402 (CWE-295): TLS MinVersion too low. (Confidence: HIGH, Severity: HIGH)"
+ // #nosec G402
+ cfg := &tls.Config{
+ InsecureSkipVerify: false,
+ RootCAs: rootCAs,
+ }
+ s.http.TLSConfig = cfg
+
+ return nil
+}
+
+// Init https server
+func (s *Plugin) initSSL() *http.Server {
+ var topCipherSuites []uint16
+ var defaultCipherSuitesTLS13 []uint16
+
+ hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
+ hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
+ // Keep in sync with crypto/aes/cipher_s390x.go.
+ hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
+
+ hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
+
+ if hasGCMAsm {
+ // If AES-GCM hardware is provided then priorities AES-GCM
+ // cipher suites.
+ topCipherSuites = []uint16{
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ }
+ defaultCipherSuitesTLS13 = []uint16{
+ tls.TLS_AES_128_GCM_SHA256,
+ tls.TLS_CHACHA20_POLY1305_SHA256,
+ tls.TLS_AES_256_GCM_SHA384,
+ }
+ } else {
+ // Without AES-GCM hardware, we put the ChaCha20-Poly1305
+ // cipher suites first.
+ topCipherSuites = []uint16{
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ }
+ defaultCipherSuitesTLS13 = []uint16{
+ tls.TLS_CHACHA20_POLY1305_SHA256,
+ tls.TLS_AES_128_GCM_SHA256,
+ tls.TLS_AES_256_GCM_SHA384,
+ }
+ }
+
+ DefaultCipherSuites := make([]uint16, 0, 22)
+ DefaultCipherSuites = append(DefaultCipherSuites, topCipherSuites...)
+ DefaultCipherSuites = append(DefaultCipherSuites, defaultCipherSuitesTLS13...)
+
+ server := &http.Server{
+ Addr: s.tlsAddr(s.cfg.Address, true),
+ Handler: s,
+ TLSConfig: &tls.Config{
+ CurvePreferences: []tls.CurveID{
+ tls.CurveP256,
+ tls.CurveP384,
+ tls.CurveP521,
+ tls.X25519,
+ },
+ CipherSuites: DefaultCipherSuites,
+ MinVersion: tls.VersionTLS12,
+ PreferServerCipherSuites: true,
+ },
+ }
+
+ return server
+}
+
+// init http/2 server
+func (s *Plugin) initHTTP2() error {
+ return http2.ConfigureServer(s.https, &http2.Server{
+ MaxConcurrentStreams: s.cfg.HTTP2.MaxConcurrentStreams,
+ })
+}
+
+// serveFCGI starts FastCGI server.
+func (s *Plugin) serveFCGI() error {
+ l, err := utils.CreateListener(s.cfg.FCGI.Address)
+ if err != nil {
+ return err
+ }
+
+ err = fcgi.Serve(l, s.fcgi.Handler)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// tlsAddr replaces listen or host port with port configured by SSL config.
+func (s *Plugin) tlsAddr(host string, forcePort bool) string {
+ // remove current forcePort first
+ host = strings.Split(host, ":")[0]
+
+ if forcePort || s.cfg.SSL.Port != 443 {
+ host = fmt.Sprintf("%s:%v", host, s.cfg.SSL.Port)
+ }
+
+ return host
+}
+
+func (s *Plugin) addMiddlewares() {
+ if s.http != nil {
+ applyMiddlewares(s.http, s.mdwr, s.cfg.Middleware, s.log)
+ }
+ if s.https != nil {
+ applyMiddlewares(s.https, s.mdwr, s.cfg.Middleware, s.log)
+ }
+
+ if s.fcgi != nil {
+ applyMiddlewares(s.fcgi, s.mdwr, s.cfg.Middleware, s.log)
+ }
+}
+
+func applyMiddlewares(server *http.Server, middlewares map[string]Middleware, order []string, log logger.Logger) {
+ for i := 0; i < len(order); i++ {
+ if mdwr, ok := middlewares[order[i]]; ok {
+ server.Handler = mdwr.Middleware(server.Handler)
+ } else {
+ log.Warn("requested middleware does not exist", "requested", order[i])
+ }
+ }
+}
diff --git a/service/http/request.go b/plugins/http/request.go
index f3fff198..3983fdde 100644
--- a/service/http/request.go
+++ b/plugins/http/request.go
@@ -8,11 +8,14 @@ import (
"net/url"
"strings"
- "github.com/sirupsen/logrus"
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service/http/attributes"
+ j "github.com/json-iterator/go"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/plugins/http/attributes"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
)
+var json = j.ConfigCompatibleWithStandardLibrary
+
const (
defaultMaxMemory = 32 << 20 // 32 MB
contentNone = iota + 900
@@ -67,8 +70,8 @@ func fetchIP(pair string) string {
}
// NewRequest creates new PSR7 compatible request using net/http request.
-func NewRequest(r *http.Request, cfg *UploadsConfig) (req *Request, err error) {
- req = &Request{
+func NewRequest(r *http.Request, cfg UploadsConfig) (*Request, error) {
+ req := &Request{
RemoteAddr: fetchIP(r.RemoteAddr),
Protocol: r.Proto,
Method: r.Method,
@@ -90,18 +93,19 @@ func NewRequest(r *http.Request, cfg *UploadsConfig) (req *Request, err error) {
return req, nil
case contentStream:
+ var err error
req.body, err = ioutil.ReadAll(r.Body)
return req, err
case contentMultipart:
- if err = r.ParseMultipartForm(defaultMaxMemory); err != nil {
+ if err := r.ParseMultipartForm(defaultMaxMemory); err != nil {
return nil, err
}
req.Uploads = parseUploads(r, cfg)
fallthrough
case contentFormData:
- if err = r.ParseForm(); err != nil {
+ if err := r.ParseForm(); err != nil {
return nil, err
}
@@ -113,7 +117,7 @@ func NewRequest(r *http.Request, cfg *UploadsConfig) (req *Request, err error) {
}
// Open moves all uploaded files to temporary directory so it can be given to php later.
-func (r *Request) Open(log *logrus.Logger) {
+func (r *Request) Open(log logger.Logger) {
if r.Uploads == nil {
return
}
@@ -122,7 +126,7 @@ func (r *Request) Open(log *logrus.Logger) {
}
// Close clears all temp file uploads
-func (r *Request) Close(log *logrus.Logger) {
+func (r *Request) Close(log logger.Logger) {
if r.Uploads == nil {
return
}
@@ -132,16 +136,17 @@ func (r *Request) Close(log *logrus.Logger) {
// Payload request marshaled RoadRunner payload based on PSR7 data. values encode method is JSON. Make sure to open
// files prior to calling this method.
-func (r *Request) Payload() (p *roadrunner.Payload, err error) {
- p = &roadrunner.Payload{}
+func (r *Request) Payload() (payload.Payload, error) {
+ p := payload.Payload{}
+ var err error
if p.Context, err = json.Marshal(r); err != nil {
- return nil, err
+ return payload.Payload{}, err
}
if r.Parsed {
if p.Body, err = json.Marshal(r.body); err != nil {
- return nil, err
+ return payload.Payload{}, err
}
} else if r.body != nil {
p.Body = r.body.([]byte)
diff --git a/service/http/response.go b/plugins/http/response.go
index a2540edf..17049ce1 100644
--- a/service/http/response.go
+++ b/plugins/http/response.go
@@ -4,14 +4,11 @@ import (
"io"
"net/http"
"strings"
+ "sync"
- j "github.com/json-iterator/go"
-
- "github.com/spiral/roadrunner"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
)
-var json = j.ConfigCompatibleWithStandardLibrary
-
// Response handles PSR7 response logic.
type Response struct {
// Status contains response status.
@@ -20,14 +17,14 @@ type Response struct {
// Header contains list of response headers.
Headers map[string][]string `json:"headers"`
- // associated body payload.
- body interface{}
+ // associated Body payload.
+ Body interface{}
+ sync.Mutex
}
-// NewResponse creates new response based on given rr payload.
-func NewResponse(p *roadrunner.Payload) (*Response, error) {
- r := &Response{body: p.Body}
-
+// NewResponse creates new response based on given pool payload.
+func NewResponse(p payload.Payload) (*Response, error) {
+ r := &Response{Body: p.Body}
if err := json.Unmarshal(p.Context, r); err != nil {
return nil, err
}
@@ -57,14 +54,14 @@ func (r *Response) Write(w http.ResponseWriter) error {
w.WriteHeader(r.Status)
- if data, ok := r.body.([]byte); ok {
+ if data, ok := r.Body.([]byte); ok {
_, err := w.Write(data)
if err != nil {
return handleWriteError(err)
}
}
- if rc, ok := r.body.(io.Reader); ok {
+ if rc, ok := r.Body.(io.Reader); ok {
if _, err := io.Copy(w, rc); err != nil {
return err
}
@@ -88,7 +85,7 @@ func handlePushHeaders(h map[string][]string) []string {
}
func handleTrailers(h map[string][]string) {
- trailers, ok := h[trailerHeaderKey]
+ trailers, ok := h[TrailerHeaderKey]
if !ok {
return
}
@@ -104,5 +101,5 @@ func handleTrailers(h map[string][]string) {
}
}
- delete(h, trailerHeaderKey)
+ delete(h, TrailerHeaderKey)
}
diff --git a/service/http/uploads.go b/plugins/http/uploads.go
index e369fab2..d5196844 100644
--- a/service/http/uploads.go
+++ b/plugins/http/uploads.go
@@ -1,14 +1,13 @@
package http
import (
- "fmt"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+
"io"
"io/ioutil"
"mime/multipart"
"os"
"sync"
-
- "github.com/sirupsen/logrus"
)
const (
@@ -19,19 +18,19 @@ const (
UploadErrorNoFile = 4
// UploadErrorNoTmpDir - missing a temporary folder.
- UploadErrorNoTmpDir = 5
+ UploadErrorNoTmpDir = 6
// UploadErrorCantWrite - failed to write file to disk.
- UploadErrorCantWrite = 6
+ UploadErrorCantWrite = 7
// UploadErrorExtension - forbidden file extension.
- UploadErrorExtension = 7
+ UploadErrorExtension = 8
)
// Uploads tree manages uploaded files tree and temporary files.
type Uploads struct {
// associated temp directory and forbidden extensions.
- cfg *UploadsConfig
+ cfg UploadsConfig
// pre processed data tree for Uploads.
tree fileTree
@@ -47,7 +46,7 @@ func (u *Uploads) MarshalJSON() ([]byte, error) {
// Open moves all uploaded files to temp directory, return error in case of issue with temp directory. File errors
// will be handled individually.
-func (u *Uploads) Open(log *logrus.Logger) {
+func (u *Uploads) Open(log logger.Logger) {
var wg sync.WaitGroup
for _, f := range u.list {
wg.Add(1)
@@ -55,7 +54,7 @@ func (u *Uploads) Open(log *logrus.Logger) {
defer wg.Done()
err := f.Open(u.cfg)
if err != nil && log != nil {
- log.Error(fmt.Errorf("error opening the file: error %v", err))
+ log.Error("error opening the file", "err", err)
}
}(f)
}
@@ -64,12 +63,12 @@ func (u *Uploads) Open(log *logrus.Logger) {
}
// Clear deletes all temporary files.
-func (u *Uploads) Clear(log *logrus.Logger) {
+func (u *Uploads) Clear(log logger.Logger) {
for _, f := range u.list {
if f.TempFilename != "" && exists(f.TempFilename) {
err := os.Remove(f.TempFilename)
if err != nil && log != nil {
- log.Error(fmt.Errorf("error removing the file: error %v", err))
+ log.Error("error removing the file", "err", err)
}
}
}
@@ -113,7 +112,7 @@ func NewUpload(f *multipart.FileHeader) *FileUpload {
// STACK
// DEFER FILE CLOSE (2)
// DEFER TMP CLOSE (1)
-func (f *FileUpload) Open(cfg *UploadsConfig) (err error) {
+func (f *FileUpload) Open(cfg UploadsConfig) (err error) {
if cfg.Forbids(f.Name) {
f.Error = UploadErrorExtension
return nil
diff --git a/service/http/uploads_config.go b/plugins/http/uploads_config.go
index 3f655064..4c20c8e8 100644
--- a/service/http/uploads_config.go
+++ b/plugins/http/uploads_config.go
@@ -19,6 +19,7 @@ type UploadsConfig struct {
// InitDefaults sets missing values to their default values.
func (cfg *UploadsConfig) InitDefaults() error {
cfg.Forbid = []string{".php", ".exe", ".bat"}
+ cfg.Dir = os.TempDir()
return nil
}
diff --git a/plugins/informer/interface.go b/plugins/informer/interface.go
new file mode 100644
index 00000000..27139ae1
--- /dev/null
+++ b/plugins/informer/interface.go
@@ -0,0 +1,8 @@
+package informer
+
+import "github.com/spiral/roadrunner/v2/interfaces/worker"
+
+// Informer used to get workers from particular plugin or set of plugins
+type Informer interface {
+ Workers() []worker.BaseProcess
+}
diff --git a/plugins/informer/plugin.go b/plugins/informer/plugin.go
new file mode 100644
index 00000000..3359cd7e
--- /dev/null
+++ b/plugins/informer/plugin.go
@@ -0,0 +1,55 @@
+package informer
+
+import (
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const PluginName = "informer"
+
+type Plugin struct {
+ registry map[string]Informer
+ log logger.Logger
+}
+
+func (p *Plugin) Init(log logger.Logger) error {
+ p.registry = make(map[string]Informer)
+ p.log = log
+ return nil
+}
+
+// Workers provides BaseProcess slice with workers for the requested plugin
+func (p *Plugin) Workers(name string) ([]worker.BaseProcess, error) {
+ const op = errors.Op("get workers")
+ svc, ok := p.registry[name]
+ if !ok {
+ return nil, errors.E(op, errors.Errorf("no such service: %s", name))
+ }
+
+ return svc.Workers(), nil
+}
+
+// CollectTarget resettable service.
+func (p *Plugin) CollectTarget(name endure.Named, r Informer) error {
+ p.registry[name.Name()] = r
+ return nil
+}
+
+// Collects declares services to be collected.
+func (p *Plugin) Collects() []interface{} {
+ return []interface{}{
+ p.CollectTarget,
+ }
+}
+
+// Name of the service.
+func (p *Plugin) Name() string {
+ return PluginName
+}
+
+// RPCService returns associated rpc service.
+func (p *Plugin) RPC() interface{} {
+ return &rpc{srv: p, log: p.log}
+}
diff --git a/plugins/informer/rpc.go b/plugins/informer/rpc.go
new file mode 100644
index 00000000..98b5681c
--- /dev/null
+++ b/plugins/informer/rpc.go
@@ -0,0 +1,54 @@
+package informer
+
+import (
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/tools"
+)
+
+type rpc struct {
+ srv *Plugin
+ log logger.Logger
+}
+
+// WorkerList contains list of workers.
+type WorkerList struct {
+ // Workers is list of workers.
+ Workers []tools.ProcessState `json:"workers"`
+}
+
+// List all resettable services.
+func (rpc *rpc) List(_ bool, list *[]string) error {
+ rpc.log.Debug("Started List method")
+ *list = make([]string, 0, len(rpc.srv.registry))
+
+ for name := range rpc.srv.registry {
+ *list = append(*list, name)
+ }
+ rpc.log.Debug("list of services", "list", *list)
+
+ rpc.log.Debug("successfully finished List method")
+ return nil
+}
+
+// Workers state of a given service.
+func (rpc *rpc) Workers(service string, list *WorkerList) error {
+ rpc.log.Debug("started Workers method", "service", service)
+ workers, err := rpc.srv.Workers(service)
+ if err != nil {
+ return err
+ }
+
+ list.Workers = make([]tools.ProcessState, 0)
+ for _, w := range workers {
+ ps, err := tools.WorkerProcessState(w.(worker.BaseProcess))
+ if err != nil {
+ continue
+ }
+
+ list.Workers = append(list.Workers, ps)
+ }
+ rpc.log.Debug("list of workers", "workers", list.Workers)
+ rpc.log.Debug("successfully finished Workers method")
+ return nil
+}
diff --git a/plugins/kv/boltdb/config.go b/plugins/kv/boltdb/config.go
new file mode 100644
index 00000000..b2e1e636
--- /dev/null
+++ b/plugins/kv/boltdb/config.go
@@ -0,0 +1,24 @@
+package boltdb
+
+type Config struct {
+ // Dir is a directory to store the DB files
+ Dir string
+ // File is boltDB file. No need to create it by your own,
+ // boltdb driver is able to create the file, or read existing
+ File string
+ // Bucket to store data in boltDB
+ Bucket string
+ // db file permissions
+ Permissions int
+ // timeout
+ Interval uint `yaml:"interval"`
+}
+
+// InitDefaults initializes default values for the boltdb
+func (s *Config) InitDefaults() {
+ s.Dir = "." // current dir
+ s.Bucket = "rr" // default bucket name
+ s.File = "rr.db" // default file name
+ s.Permissions = 0777 // free for all
+ s.Interval = 60 // default is 60 seconds timeout
+}
diff --git a/plugins/kv/boltdb/plugin.go b/plugins/kv/boltdb/plugin.go
new file mode 100644
index 00000000..6cfc49f6
--- /dev/null
+++ b/plugins/kv/boltdb/plugin.go
@@ -0,0 +1,452 @@
+package boltdb
+
+import (
+ "bytes"
+ "encoding/gob"
+ "os"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ bolt "go.etcd.io/bbolt"
+)
+
+const PluginName = "boltdb"
+
+// BoltDB K/V storage.
+type Plugin struct {
+ // db instance
+ DB *bolt.DB
+ // name should be UTF-8
+ bucket []byte
+
+ // config for RR integration
+ cfg *Config
+
+ // logger
+ log logger.Logger
+
+ // gc contains key which are contain timeouts
+ gc *sync.Map
+ // default timeout for cache cleanup is 1 minute
+ timeout time.Duration
+
+ // stop is used to stop keys GC and close boltdb connection
+ stop chan struct{}
+}
+
+func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
+ const op = errors.Op("boltdb plugin init")
+ s.cfg = &Config{}
+
+ s.cfg.InitDefaults()
+
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ // set the logger
+ s.log = log
+
+ db, err := bolt.Open(path.Join(s.cfg.Dir, s.cfg.File), os.FileMode(s.cfg.Permissions), nil)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // create bucket if it does not exist
+ // tx.Commit invokes via the db.Update
+ err = db.Update(func(tx *bolt.Tx) error {
+ const upOp = errors.Op("boltdb Update")
+ _, err = tx.CreateBucketIfNotExists([]byte(s.cfg.Bucket))
+ if err != nil {
+ return errors.E(op, upOp)
+ }
+ return nil
+ })
+
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.DB = db
+ s.bucket = []byte(s.cfg.Bucket)
+ s.stop = make(chan struct{})
+ s.timeout = time.Duration(s.cfg.Interval) * time.Second
+ s.gc = &sync.Map{}
+
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ // start the TTL gc
+ go s.gcPhase()
+
+ return errCh
+}
+
+func (s *Plugin) Stop() error {
+ const op = errors.Op("boltdb stop")
+ err := s.Close()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+func (s *Plugin) Has(keys ...string) (map[string]bool, error) {
+ const op = errors.Op("boltdb Has")
+ s.log.Debug("boltdb HAS method called", "args", keys)
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ m := make(map[string]bool, len(keys))
+
+ // this is readable transaction
+ err := s.DB.View(func(tx *bolt.Tx) error {
+ // Get retrieves the value for a key in the bucket.
+ // Returns a nil value if the key does not exist or if the key is a nested bucket.
+ // The returned value is only valid for the life of the transaction.
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+ exist := b.Get([]byte(keys[i]))
+ if exist != nil {
+ m[keys[i]] = true
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ s.log.Debug("boltdb HAS method finished")
+ return m, nil
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+func (s *Plugin) Get(key string) ([]byte, error) {
+ const op = errors.Op("boltdb Get")
+ // to get cases like " "
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+
+ var val []byte
+ err := s.DB.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+ val = b.Get([]byte(key))
+
+ // try to decode values
+ if val != nil {
+ buf := bytes.NewReader(val)
+ decoder := gob.NewDecoder(buf)
+
+ var i string
+ err := decoder.Decode(&i)
+ if err != nil {
+ // unsafe (w/o runes) convert
+ return errors.E(op, err)
+ }
+
+ // set the value
+ val = []byte(i)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ return val, nil
+}
+
+func (s *Plugin) MGet(keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("boltdb MGet")
+ // defence
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ err := s.DB.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+
+ buf := new(bytes.Buffer)
+ var out string
+ buf.Grow(100)
+ for i := range keys {
+ value := b.Get([]byte(keys[i]))
+ buf.Write(value)
+ // allocate enough space
+ dec := gob.NewDecoder(buf)
+ if value != nil {
+ err := dec.Decode(&out)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ m[keys[i]] = out
+ buf.Reset()
+ out = ""
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ return m, nil
+}
+
+// Set puts the K/V to the bolt
+func (s *Plugin) Set(items ...kv.Item) error {
+ const op = errors.Op("boltdb Set")
+ if items == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // start writable transaction
+ tx, err := s.DB.Begin(true)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ defer func() {
+ err = tx.Commit()
+ if err != nil {
+ errRb := tx.Rollback()
+ if errRb != nil {
+ s.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb)
+ }
+ }
+ }()
+
+ b := tx.Bucket(s.bucket)
+ // use access by index to avoid copying
+ for i := range items {
+ // performance note: pass a prepared bytes slice with initial cap
+ // we can't move buf and gob out of loop, because we need to clear both from data
+ // but gob will contain (w/o re-init) the past data
+ buf := bytes.Buffer{}
+ encoder := gob.NewEncoder(&buf)
+ if errors.Is(errors.EmptyItem, err) {
+ return errors.E(op, errors.EmptyItem)
+ }
+
+ // Encode value
+ err = encoder.Encode(&items[i].Value)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ // buf.Bytes will copy the underlying slice. Take a look in case of performance problems
+ err = b.Put([]byte(items[i].Key), buf.Bytes())
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // if there are no errors, and TTL > 0, we put the key with timeout to the hashmap, for future check
+ // we do not need mutex here, since we use sync.Map
+ if items[i].TTL != "" {
+ // check correctness of provided TTL
+ _, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ // Store key TTL in the separate map
+ s.gc.Store(items[i].Key, items[i].TTL)
+ }
+
+ buf.Reset()
+ }
+
+ return nil
+}
+
+// Delete all keys from DB
+func (s *Plugin) Delete(keys ...string) error {
+ const op = errors.Op("boltdb Delete")
+ if keys == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for _, key := range keys {
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ // start writable transaction
+ tx, err := s.DB.Begin(true)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ defer func() {
+ err = tx.Commit()
+ if err != nil {
+ errRb := tx.Rollback()
+ if errRb != nil {
+ s.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb)
+ }
+ }
+ }()
+
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+
+ for _, key := range keys {
+ err = b.Delete([]byte(key))
+ if err != nil {
+ return errors.E(op, err)
+ }
+ }
+
+ return nil
+}
+
+// MExpire sets the expiration time to the key
+// If key already has the expiration time, it will be overwritten
+func (s *Plugin) MExpire(items ...kv.Item) error {
+ const op = errors.Op("boltdb MExpire")
+ for i := range items {
+ if items[i].TTL == "" || strings.TrimSpace(items[i].Key) == "" {
+ return errors.E(op, errors.Str("should set timeout and at least one key"))
+ }
+
+ // verify provided TTL
+ _, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ s.gc.Store(items[i].Key, items[i].TTL)
+ }
+ return nil
+}
+
+func (s *Plugin) TTL(keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("boltdb TTL")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ for i := range keys {
+ if item, ok := s.gc.Load(keys[i]); ok {
+ // a little bit dangerous operation, but user can't store value other that kv.Item.TTL --> int64
+ m[keys[i]] = item.(string)
+ }
+ }
+ return m, nil
+}
+
+// Close the DB connection
+func (s *Plugin) Close() error {
+ // stop the keys GC
+ s.stop <- struct{}{}
+ return s.DB.Close()
+}
+
+// RPCService returns associated rpc service.
+func (s *Plugin) RPC() interface{} {
+ return kv.NewRPCServer(s, s.log)
+}
+
+// Name returns plugin name
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// ========================= PRIVATE =================================
+
+func (s *Plugin) gcPhase() {
+ t := time.NewTicker(s.timeout)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ // calculate current time before loop started to be fair
+ now := time.Now()
+ s.gc.Range(func(key, value interface{}) bool {
+ const op = errors.Op("gcPhase")
+ k := key.(string)
+ v, err := time.Parse(time.RFC3339, value.(string))
+ if err != nil {
+ return false
+ }
+
+ if now.After(v) {
+ // time expired
+ s.gc.Delete(k)
+ s.log.Debug("key deleted", "key", k)
+ err := s.DB.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket(s.bucket)
+ if b == nil {
+ return errors.E(op, errors.NoSuchBucket)
+ }
+ err := b.Delete([]byte(k))
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ })
+ if err != nil {
+ s.log.Error("error during the gc phase of update", "error", err)
+ // todo this error is ignored, it means, that timer still be active
+ // to prevent this, we need to invoke t.Stop()
+ return false
+ }
+ }
+ return true
+ })
+ case <-s.stop:
+ return
+ }
+ }
+}
diff --git a/plugins/kv/boltdb/plugin_unit_test.go b/plugins/kv/boltdb/plugin_unit_test.go
new file mode 100644
index 00000000..2459e493
--- /dev/null
+++ b/plugins/kv/boltdb/plugin_unit_test.go
@@ -0,0 +1,531 @@
+package boltdb
+
+import (
+ "os"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/stretchr/testify/assert"
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+// NewBoltClient instantiate new BOLTDB client
+// The parameters are:
+// path string -- path to database file (can be placed anywhere), if file is not exist, it will be created
+// perm os.FileMode -- file permissions, for example 0777
+// options *bolt.Options -- boltDB options, such as timeouts, noGrows options and other
+// bucket string -- name of the bucket to use, should be UTF-8
+func newBoltClient(path string, perm os.FileMode, options *bolt.Options, bucket string, ttl time.Duration) (kv.Storage, error) {
+ const op = errors.Op("newBoltClient")
+ db, err := bolt.Open(path, perm, options)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // bucket should be SET
+ if bucket == "" {
+ return nil, errors.E(op, errors.Str("bucket should be set"))
+ }
+
+ // create bucket if it does not exist
+ // tx.Commit invokes via the db.Update
+ err = db.Update(func(tx *bolt.Tx) error {
+ _, err = tx.CreateBucketIfNotExists([]byte(bucket))
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ // if TTL is not set, make it default
+ if ttl == 0 {
+ ttl = time.Minute
+ }
+
+ l, _ := zap.NewDevelopment()
+ s := &Plugin{
+ DB: db,
+ bucket: []byte(bucket),
+ stop: make(chan struct{}),
+ timeout: ttl,
+ gc: &sync.Map{},
+ log: logger.NewZapAdapter(l),
+ }
+
+ // start the TTL gc
+ go s.gcPhase()
+
+ return s, nil
+}
+
+func initStorage() kv.Storage {
+ storage, err := newBoltClient("rr.db", 0777, nil, "rr", time.Second)
+ if err != nil {
+ panic(err)
+ }
+ return storage
+}
+
+func cleanup(t *testing.T, path string) {
+ err := os.RemoveAll(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestStorage_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+}
+
+func TestStorage_Has_Set_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+}
+
+func TestConcurrentReadWriteTransactions(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ m := &sync.RWMutex{}
+ // concurrently set the keys
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ // set is writable transaction
+ // it should stop readable
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }, kv.Item{
+ Key: "key2" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }))
+ m.Unlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.RLock()
+ v, err = s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ m.RUnlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ err = s.Delete("key" + strconv.Itoa(i))
+ assert.NoError(t, err)
+ m.Unlock()
+ }
+ }(s)
+
+ wg.Wait()
+}
+
+func TestStorage_Has_Set_MGet(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_Has_Set_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world2",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.Get("key")
+ assert.NoError(t, err)
+
+ if string(res) != "hello world" {
+ t.Fatal("wrong value by key")
+ }
+}
+
+func TestStorage_Set_Del_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ // check that keys are present
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+
+ assert.NoError(t, s.Delete("key", "key2"))
+ // check that keys are not present
+ res, err = s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 0)
+}
+
+func TestStorage_Set_GetM(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestNilAndWrongArgs(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ // check
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ _, err = s.Has("")
+ assert.Error(t, err)
+
+ _, err = s.Get("")
+ assert.Error(t, err)
+
+ _, err = s.Get(" ")
+ assert.Error(t, err)
+
+ _, err = s.Get(" ")
+ assert.Error(t, err)
+
+ _, err = s.MGet("key", "key2", "")
+ assert.Error(t, err)
+
+ _, err = s.MGet("key", "key2", " ")
+ assert.Error(t, err)
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ assert.Error(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "asdf",
+ }))
+
+ _, err = s.Has("key")
+ assert.NoError(t, err)
+
+ assert.Error(t, s.Set(kv.Item{}))
+
+ err = s.Delete("")
+ assert.Error(t, err)
+
+ err = s.Delete("key", "")
+ assert.Error(t, err)
+
+ err = s.Delete("key", " ")
+ assert.Error(t, err)
+
+ err = s.Delete("key")
+ assert.NoError(t, err)
+}
+
+func TestStorage_MExpire_TTL(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+ // set timeout to 5 sec
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ i1 := kv.Item{
+ Key: "key",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ i2 := kv.Item{
+ Key: "key2",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ assert.NoError(t, s.MExpire(i1, i2))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestStorage_SetExpire_TTL(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ cleanup(t, "rr.db")
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ // set timeout to 5 sec
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: nowPlusFive,
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: nowPlusFive,
+ }))
+
+ time.Sleep(time.Second * 2)
+ m, err := s.TTL("key", "key2")
+ assert.NoError(t, err)
+
+ // remove a precision 4.02342342 -> 4
+ keyTTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // remove a precision 4.02342342 -> 4
+ key2TTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.True(t, keyTTL < 5)
+ assert.True(t, key2TTL < 5)
+
+ time.Sleep(time.Second * 4)
+
+ // ensure that storage is clean
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
diff --git a/plugins/kv/interface.go b/plugins/kv/interface.go
new file mode 100644
index 00000000..c1367cdf
--- /dev/null
+++ b/plugins/kv/interface.go
@@ -0,0 +1,41 @@
+package kv
+
+// Item represents general storage item
+type Item struct {
+ // Key of item
+ Key string
+ // Value of item
+ Value string
+ // live until time provided by TTL in RFC 3339 format
+ TTL string
+}
+
+// Storage represents single abstract storage.
+type Storage interface {
+ // Has checks if value exists.
+ Has(keys ...string) (map[string]bool, error)
+
+ // Get loads value content into a byte slice.
+ Get(key string) ([]byte, error)
+
+ // MGet loads content of multiple values
+ // Returns the map with existing keys and associated values
+ MGet(keys ...string) (map[string]interface{}, error)
+
+ // Set used to upload item to KV with TTL
+ // 0 value in TTL means no TTL
+ Set(items ...Item) error
+
+ // MExpire sets the TTL for multiply keys
+ MExpire(items ...Item) error
+
+ // TTL return the rest time to live for provided keys
+ // Not supported for the memcached and boltdb
+ TTL(keys ...string) (map[string]interface{}, error)
+
+ // Delete one or multiple keys.
+ Delete(keys ...string) error
+
+ // Close closes the storage and underlying resources.
+ Close() error
+}
diff --git a/plugins/kv/memcached/config.go b/plugins/kv/memcached/config.go
new file mode 100644
index 00000000..62f29ef2
--- /dev/null
+++ b/plugins/kv/memcached/config.go
@@ -0,0 +1,10 @@
+package memcached
+
+type Config struct {
+ // Addr is url for memcached, 11211 port is used by default
+ Addr []string
+}
+
+func (s *Config) InitDefaults() {
+ s.Addr = []string{"localhost:11211"} // default url for memcached // init logger
+}
diff --git a/plugins/kv/memcached/plugin.go b/plugins/kv/memcached/plugin.go
new file mode 100644
index 00000000..f5111c04
--- /dev/null
+++ b/plugins/kv/memcached/plugin.go
@@ -0,0 +1,252 @@
+package memcached
+
+import (
+ "strings"
+ "time"
+
+ "github.com/bradfitz/gomemcache/memcache"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const PluginName = "memcached"
+
+var EmptyItem = kv.Item{}
+
+type Plugin struct {
+ // config
+ cfg *Config
+ // logger
+ log logger.Logger
+ // memcached client
+ client *memcache.Client
+}
+
+// NewMemcachedClient returns a memcache client using the provided server(s)
+// with equal weight. If a server is listed multiple times,
+// it gets a proportional amount of weight.
+func NewMemcachedClient(url string) kv.Storage {
+ m := memcache.New(url)
+ return &Plugin{
+ client: m,
+ }
+}
+
+func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
+ const op = errors.Op("memcached init")
+ s.cfg = &Config{}
+ s.cfg.InitDefaults()
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ s.log = log
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ s.client = memcache.New(s.cfg.Addr...)
+ return errCh
+}
+
+// Memcached has no stop/close or smt similar to close the connection
+func (s *Plugin) Stop() error {
+ return nil
+}
+
+// RPCService returns associated rpc service.
+func (s *Plugin) RPC() interface{} {
+ return kv.NewRPCServer(s, s.log)
+}
+
+// Name returns plugin user-friendly name
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// Has checks the key for existence
+func (s Plugin) Has(keys ...string) (map[string]bool, error) {
+ const op = errors.Op("memcached Has")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+ m := make(map[string]bool, len(keys))
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ exist, err := s.client.Get(keys[i])
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return nil, err
+ }
+ if exist != nil {
+ m[keys[i]] = true
+ }
+ }
+ return m, nil
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a
+// memcache cache miss. The key must be at most 250 bytes in length.
+func (s Plugin) Get(key string) ([]byte, error) {
+ const op = errors.Op("memcached Get")
+ // to get cases like " "
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ data, err := s.client.Get(key)
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return nil, err
+ }
+ if data != nil {
+ // return the value by the key
+ return data.Value, nil
+ }
+ // data is nil by some reason and error also nil
+ return nil, nil
+}
+
+// return map with key -- string
+// and map value as value -- []byte
+func (s Plugin) MGet(keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("memcached MGet")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+ for i := range keys {
+ // Here also MultiGet
+ data, err := s.client.Get(keys[i])
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return nil, err
+ }
+ if data != nil {
+ m[keys[i]] = data.Value
+ }
+ }
+
+ return m, nil
+}
+
+// Set sets the KV pairs. Keys should be 250 bytes maximum
+// TTL:
+// Expiration is the cache expiration time, in seconds: either a relative
+// time from now (up to 1 month), or an absolute Unix epoch time.
+// Zero means the Item has no expiration time.
+func (s Plugin) Set(items ...kv.Item) error {
+ const op = errors.Op("memcached Set")
+ if items == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ for i := range items {
+ if items[i] == EmptyItem {
+ return errors.E(op, errors.EmptyItem)
+ }
+
+ // pre-allocate item
+ memcachedItem := &memcache.Item{
+ Key: items[i].Key,
+ // unsafe convert
+ Value: []byte(items[i].Value),
+ Flags: 0,
+ }
+
+ // add additional TTL in case of TTL isn't empty
+ if items[i].TTL != "" {
+ // verify the TTL
+ t, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return err
+ }
+ memcachedItem.Expiration = int32(t.Unix())
+ }
+
+ err := s.client.Set(memcachedItem)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Expiration is the cache expiration time, in seconds: either a relative
+// time from now (up to 1 month), or an absolute Unix epoch time.
+// Zero means the Item has no expiration time.
+func (s Plugin) MExpire(items ...kv.Item) error {
+ const op = errors.Op("memcached MExpire")
+ for i := range items {
+ if items[i].TTL == "" || strings.TrimSpace(items[i].Key) == "" {
+ return errors.E(op, errors.Str("should set timeout and at least one key"))
+ }
+
+ // verify provided TTL
+ t, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return err
+ }
+
+ // Touch updates the expiry for the given key. The seconds parameter is either
+ // a Unix timestamp or, if seconds is less than 1 month, the number of seconds
+ // into the future at which time the item will expire. Zero means the item has
+ // no expiration time. ErrCacheMiss is returned if the key is not in the cache.
+ // The key must be at most 250 bytes in length.
+ err = s.client.Touch(items[i].Key, int32(t.Unix()))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// return time in seconds (int32) for a given keys
+func (s Plugin) TTL(keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("memcached HTTLas")
+ return nil, errors.E(op, errors.Str("not valid request for memcached, see https://github.com/memcached/memcached/issues/239"))
+}
+
+func (s Plugin) Delete(keys ...string) error {
+ const op = errors.Op("memcached Has")
+ if keys == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ for i := range keys {
+ err := s.client.Delete(keys[i])
+ // ErrCacheMiss means that a Get failed because the item wasn't present.
+ if err != nil && err != memcache.ErrCacheMiss {
+ return err
+ }
+ }
+ return nil
+}
+
+func (s Plugin) Close() error {
+ return nil
+}
diff --git a/plugins/kv/memcached/plugin_unit_test.go b/plugins/kv/memcached/plugin_unit_test.go
new file mode 100644
index 00000000..3d37748b
--- /dev/null
+++ b/plugins/kv/memcached/plugin_unit_test.go
@@ -0,0 +1,432 @@
+package memcached
+
+import (
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/stretchr/testify/assert"
+)
+
+func initStorage() kv.Storage {
+ return NewMemcachedClient("localhost:11211")
+}
+
+func cleanup(t *testing.T, s kv.Storage, keys ...string) {
+ err := s.Delete(keys...)
+ if err != nil {
+ t.Fatalf("error during cleanup: %s", err.Error())
+ }
+}
+
+func TestStorage_Has(t *testing.T) {
+ s := initStorage()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+}
+
+func TestStorage_Has_Set_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+}
+
+func TestStorage_Has_Set_MGet(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_Has_Set_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.Get("key")
+ assert.NoError(t, err)
+
+ if string(res) != "hello world" {
+ t.Fatal("wrong value by key")
+ }
+}
+
+func TestStorage_Set_Del_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ // check that keys are present
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+
+ assert.NoError(t, s.Delete("key", "key2"))
+ // check that keys are not present
+ res, err = s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 0)
+}
+
+func TestStorage_Set_GetM(t *testing.T) {
+ s := initStorage()
+
+ defer func() {
+ cleanup(t, s, "key", "key2")
+
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_MExpire_TTL(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+ // set timeout to 5 sec
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ i1 := kv.Item{
+ Key: "key",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ i2 := kv.Item{
+ Key: "key2",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ assert.NoError(t, s.MExpire(i1, i2))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestNilAndWrongArgs(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ // check
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ _, err = s.Has("")
+ assert.Error(t, err)
+
+ _, err = s.Get("")
+ assert.Error(t, err)
+
+ _, err = s.Get(" ")
+ assert.Error(t, err)
+
+ _, err = s.Get(" ")
+ assert.Error(t, err)
+
+ _, err = s.MGet("key", "key2", "")
+ assert.Error(t, err)
+
+ _, err = s.MGet("key", "key2", " ")
+ assert.Error(t, err)
+
+ assert.Error(t, s.Set(kv.Item{}))
+
+ err = s.Delete("")
+ assert.Error(t, err)
+
+ err = s.Delete("key", "")
+ assert.Error(t, err)
+
+ err = s.Delete("key", " ")
+ assert.Error(t, err)
+
+ err = s.Delete("key")
+ assert.NoError(t, err)
+}
+
+func TestStorage_SetExpire_TTL(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ // set timeout to 5 sec
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: nowPlusFive,
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: nowPlusFive,
+ }))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestConcurrentReadWriteTransactions(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ m := &sync.RWMutex{}
+ // concurrently set the keys
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ // set is writable transaction
+ // it should stop readable
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }, kv.Item{
+ Key: "key2" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }))
+ m.Unlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.RLock()
+ v, err = s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ m.RUnlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ err = s.Delete("key" + strconv.Itoa(i))
+ assert.NoError(t, err)
+ m.Unlock()
+ }
+ }(s)
+
+ wg.Wait()
+}
diff --git a/plugins/kv/memory/config.go b/plugins/kv/memory/config.go
new file mode 100644
index 00000000..0816f734
--- /dev/null
+++ b/plugins/kv/memory/config.go
@@ -0,0 +1,15 @@
+package memory
+
+// Config is default config for the in-memory driver
+type Config struct {
+ // Enabled or disabled (true or false)
+ Enabled bool
+ // Interval for the check
+ Interval int
+}
+
+// InitDefaults by default driver is turned off
+func (c *Config) InitDefaults() {
+ c.Enabled = false
+ c.Interval = 60 // seconds
+}
diff --git a/plugins/kv/memory/plugin.go b/plugins/kv/memory/plugin.go
new file mode 100644
index 00000000..d2d3721b
--- /dev/null
+++ b/plugins/kv/memory/plugin.go
@@ -0,0 +1,262 @@
+package memory
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+// PluginName is user friendly name for the plugin
+const PluginName = "memory"
+
+type Plugin struct {
+ // heap is user map for the key-value pairs
+ heap sync.Map
+ stop chan struct{}
+
+ log logger.Logger
+ cfg *Config
+}
+
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("in-memory storage init")
+ s.cfg = &Config{}
+ s.cfg.InitDefaults()
+
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ s.log = log
+
+ s.stop = make(chan struct{}, 1)
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ // start in-memory gc for kv
+ go s.gc()
+
+ return errCh
+}
+
+func (s *Plugin) Stop() error {
+ const op = errors.Op("in-memory storage stop")
+ err := s.Close()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ return nil
+}
+
+func (s *Plugin) Has(keys ...string) (map[string]bool, error) {
+ const op = errors.Op("in-memory storage Has")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+ m := make(map[string]bool)
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+
+ if _, ok := s.heap.Load(keys[i]); ok {
+ m[keys[i]] = true
+ }
+ }
+
+ return m, nil
+}
+
+func (s *Plugin) Get(key string) ([]byte, error) {
+ const op = errors.Op("in-memory storage Get")
+ // to get cases like " "
+ keyTrimmed := strings.TrimSpace(key)
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+
+ if data, exist := s.heap.Load(key); exist {
+ // here might be a panic
+ // but data only could be a string, see Set function
+ return []byte(data.(kv.Item).Value), nil
+ }
+ return nil, nil
+}
+
+func (s *Plugin) MGet(keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("in-memory storage MGet")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ for i := range keys {
+ if value, ok := s.heap.Load(keys[i]); ok {
+ m[keys[i]] = value.(kv.Item).Value
+ }
+ }
+
+ return m, nil
+}
+
+func (s *Plugin) Set(items ...kv.Item) error {
+ const op = errors.Op("in-memory storage Set")
+ if items == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ for i := range items {
+ // TTL is set
+ if items[i].TTL != "" {
+ // check the TTL in the item
+ _, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return err
+ }
+ }
+
+ s.heap.Store(items[i].Key, items[i])
+ }
+ return nil
+}
+
+// MExpire sets the expiration time to the key
+// If key already has the expiration time, it will be overwritten
+func (s *Plugin) MExpire(items ...kv.Item) error {
+ const op = errors.Op("in-memory storage MExpire")
+ for i := range items {
+ if items[i].TTL == "" || strings.TrimSpace(items[i].Key) == "" {
+ return errors.E(op, errors.Str("should set timeout and at least one key"))
+ }
+
+ // if key exist, overwrite it value
+ if pItem, ok := s.heap.Load(items[i].Key); ok {
+ // check that time is correct
+ _, err := time.Parse(time.RFC3339, items[i].TTL)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ tmp := pItem.(kv.Item)
+ // guess that t is in the future
+ // in memory is just FOR TESTING PURPOSES
+ // LOGIC ISN'T IDEAL
+ s.heap.Store(items[i].Key, kv.Item{
+ Key: items[i].Key,
+ Value: tmp.Value,
+ TTL: items[i].TTL,
+ })
+ }
+ }
+
+ return nil
+}
+
+func (s *Plugin) TTL(keys ...string) (map[string]interface{}, error) {
+ const op = errors.Op("in-memory storage TTL")
+ if keys == nil {
+ return nil, errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return nil, errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ m := make(map[string]interface{}, len(keys))
+
+ for i := range keys {
+ if item, ok := s.heap.Load(keys[i]); ok {
+ m[keys[i]] = item.(kv.Item).TTL
+ }
+ }
+ return m, nil
+}
+
+func (s *Plugin) Delete(keys ...string) error {
+ const op = errors.Op("in-memory storage Delete")
+ if keys == nil {
+ return errors.E(op, errors.NoKeys)
+ }
+
+ // should not be empty keys
+ for i := range keys {
+ keyTrimmed := strings.TrimSpace(keys[i])
+ if keyTrimmed == "" {
+ return errors.E(op, errors.EmptyKey)
+ }
+ }
+
+ for i := range keys {
+ s.heap.Delete(keys[i])
+ }
+ return nil
+}
+
+// Close clears the in-memory storage
+func (s *Plugin) Close() error {
+ s.stop <- struct{}{}
+ return nil
+}
+
+// RPCService returns associated rpc service.
+func (s *Plugin) RPC() interface{} {
+ return kv.NewRPCServer(s, s.log)
+}
+
+// Name returns plugin user-friendly name
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// ================================== PRIVATE ======================================
+
+func (s *Plugin) gc() {
+ // TODO check
+ ticker := time.NewTicker(time.Duration(s.cfg.Interval) * time.Second)
+ for {
+ select {
+ case <-s.stop:
+ ticker.Stop()
+ return
+ case now := <-ticker.C:
+ // check every second
+ s.heap.Range(func(key, value interface{}) bool {
+ v := value.(kv.Item)
+ if v.TTL == "" {
+ return true
+ }
+
+ t, err := time.Parse(time.RFC3339, v.TTL)
+ if err != nil {
+ return false
+ }
+
+ if now.After(t) {
+ s.log.Debug("key deleted", "key", key)
+ s.heap.Delete(key)
+ }
+ return true
+ })
+ }
+ }
+}
diff --git a/plugins/kv/memory/plugin_unit_test.go b/plugins/kv/memory/plugin_unit_test.go
new file mode 100644
index 00000000..d3b24860
--- /dev/null
+++ b/plugins/kv/memory/plugin_unit_test.go
@@ -0,0 +1,473 @@
+package memory
+
+import (
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/stretchr/testify/assert"
+ "go.uber.org/zap"
+)
+
+func initStorage() kv.Storage {
+ p := &Plugin{
+ stop: make(chan struct{}),
+ }
+ p.cfg = &Config{
+ Enabled: true,
+ Interval: 1,
+ }
+
+ l, _ := zap.NewDevelopment()
+ p.log = logger.NewZapAdapter(l)
+
+ go p.gc()
+
+ return p
+}
+
+func cleanup(t *testing.T, s kv.Storage, keys ...string) {
+ err := s.Delete(keys...)
+ if err != nil {
+ t.Fatalf("error during cleanup: %s", err.Error())
+ }
+}
+
+func TestStorage_Has(t *testing.T) {
+ s := initStorage()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+}
+
+func TestStorage_Has_Set_Has(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+}
+
+func TestStorage_Has_Set_MGet(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_Has_Set_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ res, err := s.Get("key")
+ assert.NoError(t, err)
+
+ if string(res) != "value" {
+ t.Fatal("wrong value by key")
+ }
+}
+
+func TestStorage_Set_Del_Get(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ // check that keys are present
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+
+ assert.NoError(t, s.Delete("key", "key2"))
+ // check that keys are not presents -eo state,uid,pid,ppid,rtprio,time,comm
+ res, err = s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 0)
+}
+
+func TestStorage_Set_GetM(t *testing.T) {
+ s := initStorage()
+
+ defer func() {
+ cleanup(t, s, "key", "key2")
+
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: "",
+ }))
+
+ res, err := s.MGet("key", "key2")
+ assert.NoError(t, err)
+ assert.Len(t, res, 2)
+}
+
+func TestStorage_MExpire_TTL(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+ // set timeout to 5 sec
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ i1 := kv.Item{
+ Key: "key",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ i2 := kv.Item{
+ Key: "key2",
+ Value: "",
+ TTL: nowPlusFive,
+ }
+ assert.NoError(t, s.MExpire(i1, i2))
+
+ time.Sleep(time.Second * 6)
+
+ // ensure that storage is clean
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestNilAndWrongArgs(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ if err := s.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ // check
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+
+ _, err = s.Has("")
+ assert.Error(t, err)
+
+ _, err = s.Get("")
+ assert.Error(t, err)
+
+ _, err = s.Get(" ")
+ assert.Error(t, err)
+
+ _, err = s.Get(" ")
+ assert.Error(t, err)
+
+ _, err = s.MGet("key", "key2", "")
+ assert.Error(t, err)
+
+ _, err = s.MGet("key", "key2", " ")
+ assert.Error(t, err)
+
+ assert.NoError(t, s.Set(kv.Item{}))
+ _, err = s.Has("key")
+ assert.NoError(t, err)
+
+ err = s.Delete("")
+ assert.Error(t, err)
+
+ err = s.Delete("key", "")
+ assert.Error(t, err)
+
+ err = s.Delete("key", " ")
+ assert.Error(t, err)
+
+ err = s.Delete("key")
+ assert.NoError(t, err)
+}
+
+func TestStorage_SetExpire_TTL(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // ensure that storage is clean
+ v, err := s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ nowPlusFive := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+
+ // set timeout to 5 sec
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "value",
+ TTL: nowPlusFive,
+ },
+ kv.Item{
+ Key: "key2",
+ Value: "value",
+ TTL: nowPlusFive,
+ }))
+
+ time.Sleep(time.Second * 2)
+ m, err := s.TTL("key", "key2")
+ assert.NoError(t, err)
+
+ // remove a precision 4.02342342 -> 4
+ keyTTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // remove a precision 4.02342342 -> 4
+ key2TTL, err := strconv.Atoi(m["key"].(string)[0:1])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.True(t, keyTTL < 5)
+ assert.True(t, key2TTL < 5)
+
+ time.Sleep(time.Second * 4)
+
+ // ensure that storage is clean
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ assert.False(t, v["key"])
+ assert.False(t, v["key2"])
+}
+
+func TestConcurrentReadWriteTransactions(t *testing.T) {
+ s := initStorage()
+ defer func() {
+ cleanup(t, s, "key", "key2")
+ if err := s.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ v, err := s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.False(t, v["key"])
+
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key",
+ Value: "hello world",
+ TTL: "",
+ }, kv.Item{
+ Key: "key2",
+ Value: "hello world",
+ TTL: "",
+ }))
+
+ v, err = s.Has("key", "key2")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ assert.True(t, v["key2"])
+
+ wg := &sync.WaitGroup{}
+ wg.Add(3)
+
+ m := &sync.RWMutex{}
+ // concurrently set the keys
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ // set is writable transaction
+ // it should stop readable
+ assert.NoError(t, s.Set(kv.Item{
+ Key: "key" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }, kv.Item{
+ Key: "key2" + strconv.Itoa(i),
+ Value: "hello world" + strconv.Itoa(i),
+ TTL: "",
+ }))
+ m.Unlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.RLock()
+ v, err = s.Has("key")
+ assert.NoError(t, err)
+ // no such key
+ assert.True(t, v["key"])
+ m.RUnlock()
+ }
+ }(s)
+
+ // should be no errors
+ go func(s kv.Storage) {
+ defer wg.Done()
+ for i := 0; i <= 1000; i++ {
+ m.Lock()
+ err = s.Delete("key" + strconv.Itoa(i))
+ assert.NoError(t, err)
+ m.Unlock()
+ }
+ }(s)
+
+ wg.Wait()
+}
diff --git a/plugins/kv/rpc.go b/plugins/kv/rpc.go
new file mode 100644
index 00000000..751f0d12
--- /dev/null
+++ b/plugins/kv/rpc.go
@@ -0,0 +1,110 @@
+package kv
+
+import (
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+// Wrapper for the plugin
+type RPCServer struct {
+ // svc is a plugin implementing Storage interface
+ svc Storage
+ // Logger
+ log logger.Logger
+}
+
+// NewRPCServer construct RPC server for the particular plugin
+func NewRPCServer(srv Storage, log logger.Logger) *RPCServer {
+ return &RPCServer{
+ svc: srv,
+ log: log,
+ }
+}
+
+// data Data
+func (r *RPCServer) Has(in []string, res *map[string]bool) error {
+ const op = errors.Op("rpc server Has")
+ ret, err := r.svc.Has(in...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // update the value in the pointer
+ *res = ret
+ return nil
+}
+
+// in SetData
+func (r *RPCServer) Set(in []Item, ok *bool) error {
+ const op = errors.Op("rpc server Set")
+
+ err := r.svc.Set(in...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ *ok = true
+ return nil
+}
+
+// in Data
+func (r *RPCServer) MGet(in []string, res *map[string]interface{}) error {
+ const op = errors.Op("rpc server MGet")
+ ret, err := r.svc.MGet(in...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // update return value
+ *res = ret
+ return nil
+}
+
+// in Data
+func (r *RPCServer) MExpire(in []Item, ok *bool) error {
+ const op = errors.Op("rpc server MExpire")
+
+ err := r.svc.MExpire(in...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ *ok = true
+ return nil
+}
+
+// in Data
+func (r *RPCServer) TTL(in []string, res *map[string]interface{}) error {
+ const op = errors.Op("rpc server TTL")
+
+ ret, err := r.svc.TTL(in...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ *res = ret
+ return nil
+}
+
+// in Data
+func (r *RPCServer) Delete(in []string, ok *bool) error {
+ const op = errors.Op("rpc server Delete")
+ err := r.svc.Delete(in...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ *ok = true
+ return nil
+}
+
+// in string, storages
+func (r *RPCServer) Close(storage string, ok *bool) error {
+ const op = errors.Op("rpc server Close")
+ err := r.svc.Close()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ *ok = true
+
+ return nil
+}
diff --git a/plugins/logger/config.go b/plugins/logger/config.go
new file mode 100644
index 00000000..f7a5742c
--- /dev/null
+++ b/plugins/logger/config.go
@@ -0,0 +1,94 @@
+package logger
+
+import (
+ "strings"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// ChannelConfig configures loggers per channel.
+type ChannelConfig struct {
+ // Dedicated channels per logger. By default logger allocated via named logger.
+ Channels map[string]Config `json:"channels" yaml:"channels"`
+}
+
+type Config struct {
+ // Mode configures logger based on some default template (development, production, off).
+ Mode string `json:"mode" yaml:"mode"`
+
+ // Level is the minimum enabled logging level. Note that this is a dynamic
+ // level, so calling ChannelConfig.Level.SetLevel will atomically change the log
+ // level of all loggers descended from this config.
+ Level string `json:"level" yaml:"level"`
+
+ // Encoding sets the logger's encoding. Valid values are "json" and
+ // "console", as well as any third-party encodings registered via
+ // RegisterEncoder.
+ Encoding string `json:"encoding" yaml:"encoding"`
+
+ // Output is a list of URLs or file paths to write logging output to.
+ // See Open for details.
+ Output []string `json:"output" yaml:"output"`
+
+ // ErrorOutput is a list of URLs to write internal logger errors to.
+ // The default is standard error.
+ //
+ // Note that this setting only affects internal errors; for sample code that
+ // sends error-level logs to a different location from info- and debug-level
+ // logs, see the package-level AdvancedConfiguration example.
+ ErrorOutput []string `json:"errorOutput" yaml:"errorOutput"`
+}
+
+// ZapConfig converts config into Zap configuration.
+func (cfg *Config) BuildLogger() (*zap.Logger, error) {
+ var zCfg zap.Config
+ switch strings.ToLower(cfg.Mode) {
+ case "off", "none":
+ return zap.NewNop(), nil
+ case "production":
+ zCfg = zap.NewProductionConfig()
+ case "development":
+ zCfg = zap.NewDevelopmentConfig()
+ default:
+ zCfg = zap.Config{
+ Level: zap.NewAtomicLevelAt(zap.DebugLevel),
+ Encoding: "console",
+ EncoderConfig: zapcore.EncoderConfig{
+ MessageKey: "message",
+ LevelKey: "level",
+ TimeKey: "time",
+ NameKey: "name",
+ EncodeName: ColoredHashedNameEncoder,
+ EncodeLevel: ColoredLevelEncoder,
+ EncodeTime: UTCTimeEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ },
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+ }
+
+ if cfg.Level != "" {
+ level := zap.NewAtomicLevel()
+ if err := level.UnmarshalText([]byte(cfg.Level)); err == nil {
+ zCfg.Level = level
+ }
+ }
+
+ if cfg.Encoding != "" {
+ zCfg.Encoding = cfg.Encoding
+ }
+
+ if len(cfg.Output) != 0 {
+ zCfg.OutputPaths = cfg.Output
+ }
+
+ if len(cfg.ErrorOutput) != 0 {
+ zCfg.ErrorOutputPaths = cfg.ErrorOutput
+ }
+
+ // todo: https://github.com/uber-go/zap/blob/master/FAQ.md#does-zap-support-log-rotation
+
+ return zCfg.Build()
+}
diff --git a/plugins/logger/encoder.go b/plugins/logger/encoder.go
new file mode 100644
index 00000000..4ff583c4
--- /dev/null
+++ b/plugins/logger/encoder.go
@@ -0,0 +1,66 @@
+package logger
+
+import (
+ "hash/fnv"
+ "strings"
+ "time"
+
+ "github.com/fatih/color"
+ "go.uber.org/zap/zapcore"
+)
+
+var colorMap = []func(string, ...interface{}) string{
+ color.HiYellowString,
+ color.HiGreenString,
+ color.HiBlueString,
+ color.HiRedString,
+ color.HiCyanString,
+ color.HiMagentaString,
+}
+
+// ColoredLevelEncoder colorizes log levels.
+func ColoredLevelEncoder(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
+ switch level {
+ case zapcore.DebugLevel:
+ enc.AppendString(color.HiWhiteString(level.CapitalString()))
+ case zapcore.InfoLevel:
+ enc.AppendString(color.HiCyanString(level.CapitalString()))
+ case zapcore.WarnLevel:
+ enc.AppendString(color.HiYellowString(level.CapitalString()))
+ case zapcore.ErrorLevel, zapcore.DPanicLevel:
+ enc.AppendString(color.HiRedString(level.CapitalString()))
+ case zapcore.PanicLevel, zapcore.FatalLevel:
+ enc.AppendString(color.HiMagentaString(level.CapitalString()))
+ }
+}
+
+// ColoredNameEncoder colorizes service names.
+func ColoredNameEncoder(s string, enc zapcore.PrimitiveArrayEncoder) {
+ if len(s) < 12 {
+ s += strings.Repeat(" ", 12-len(s))
+ }
+
+ enc.AppendString(color.HiGreenString(s))
+}
+
+// ColoredHashedNameEncoder colorizes service names and assigns different colors to different names.
+func ColoredHashedNameEncoder(s string, enc zapcore.PrimitiveArrayEncoder) {
+ if len(s) < 12 {
+ s += strings.Repeat(" ", 12-len(s))
+ }
+
+ colorID := stringHash(s, len(colorMap))
+ enc.AppendString(colorMap[colorID](s))
+}
+
+// UTCTimeEncoder encodes time into short UTC specific timestamp.
+func UTCTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
+ enc.AppendString(t.UTC().Format("2006/01/02 15:04:05"))
+}
+
+// returns string hash
+func stringHash(name string, base int) int {
+ h := fnv.New32a()
+ _, _ = h.Write([]byte(name))
+ return int(h.Sum32()) % base
+}
diff --git a/plugins/logger/interface.go b/plugins/logger/interface.go
new file mode 100644
index 00000000..876629a9
--- /dev/null
+++ b/plugins/logger/interface.go
@@ -0,0 +1,16 @@
+package logger
+
+type (
+ // Logger is an general RR log interface
+ Logger interface {
+ Debug(msg string, keyvals ...interface{})
+ Info(msg string, keyvals ...interface{})
+ Warn(msg string, keyvals ...interface{})
+ Error(msg string, keyvals ...interface{})
+ }
+)
+
+// With creates a child logger and adds structured context to it
+type WithLogger interface {
+ With(keyvals ...interface{}) Logger
+}
diff --git a/plugins/logger/plugin.go b/plugins/logger/plugin.go
new file mode 100644
index 00000000..01bf5cc0
--- /dev/null
+++ b/plugins/logger/plugin.go
@@ -0,0 +1,69 @@
+package logger
+
+import (
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "go.uber.org/zap"
+)
+
+// PluginName declares plugin name.
+const PluginName = "logs"
+
+// ZapLogger manages zap logger.
+type ZapLogger struct {
+ base *zap.Logger
+ cfg Config
+ channels ChannelConfig
+}
+
+// Init logger service.
+func (z *ZapLogger) Init(cfg config.Configurer) error {
+ const op = errors.Op("zap logger init")
+ err := cfg.UnmarshalKey(PluginName, &z.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ err = cfg.UnmarshalKey(PluginName, &z.channels)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ z.base, err = z.cfg.BuildLogger()
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+ return nil
+}
+
+// DefaultLogger returns default logger.
+func (z *ZapLogger) DefaultLogger() (Logger, error) {
+ return NewZapAdapter(z.base), nil
+}
+
+// NamedLogger returns logger dedicated to the specific channel. Similar to Named() but also reads the core params.
+func (z *ZapLogger) NamedLogger(name string) (Logger, error) {
+ if cfg, ok := z.channels.Channels[name]; ok {
+ l, err := cfg.BuildLogger()
+ if err != nil {
+ return nil, err
+ }
+ return NewZapAdapter(l), nil
+ }
+
+ return NewZapAdapter(z.base.Named(name)), nil
+}
+
+// NamedLogger returns logger dedicated to the specific channel. Similar to Named() but also reads the core params.
+func (z *ZapLogger) ServiceLogger(n endure.Named) (Logger, error) {
+ return z.NamedLogger(n.Name())
+}
+
+// Provides declares factory methods.
+func (z *ZapLogger) Provides() []interface{} {
+ return []interface{}{
+ z.ServiceLogger,
+ z.DefaultLogger,
+ }
+}
diff --git a/plugins/logger/zap_adapter.go b/plugins/logger/zap_adapter.go
new file mode 100644
index 00000000..0a0855b8
--- /dev/null
+++ b/plugins/logger/zap_adapter.go
@@ -0,0 +1,56 @@
+package logger
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+)
+
+type ZapAdapter struct {
+ zl *zap.Logger
+}
+
+// Create NewZapAdapter which uses general log interface
+func NewZapAdapter(zapLogger *zap.Logger) *ZapAdapter {
+ return &ZapAdapter{
+ zl: zapLogger.WithOptions(zap.AddCallerSkip(1)),
+ }
+}
+
+func (log *ZapAdapter) fields(keyvals []interface{}) []zap.Field {
+ // we should have even number of keys and values
+ if len(keyvals)%2 != 0 {
+ return []zap.Field{zap.Error(fmt.Errorf("odd number of keyvals pairs: %v", keyvals))}
+ }
+
+ var fields []zap.Field
+ for i := 0; i < len(keyvals); i += 2 {
+ key, ok := keyvals[i].(string)
+ if !ok {
+ key = fmt.Sprintf("%v", keyvals[i])
+ }
+ fields = append(fields, zap.Any(key, keyvals[i+1]))
+ }
+
+ return fields
+}
+
+func (log *ZapAdapter) Debug(msg string, keyvals ...interface{}) {
+ log.zl.Debug(msg, log.fields(keyvals)...)
+}
+
+func (log *ZapAdapter) Info(msg string, keyvals ...interface{}) {
+ log.zl.Info(msg, log.fields(keyvals)...)
+}
+
+func (log *ZapAdapter) Warn(msg string, keyvals ...interface{}) {
+ log.zl.Warn(msg, log.fields(keyvals)...)
+}
+
+func (log *ZapAdapter) Error(msg string, keyvals ...interface{}) {
+ log.zl.Error(msg, log.fields(keyvals)...)
+}
+
+func (log *ZapAdapter) With(keyvals ...interface{}) Logger {
+ return NewZapAdapter(log.zl.With(log.fields(keyvals)...))
+}
diff --git a/service/metrics/config.go b/plugins/metrics/config.go
index 023eff27..9459bc9b 100644
--- a/service/metrics/config.go
+++ b/plugins/metrics/config.go
@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
- "github.com/spiral/roadrunner/service"
)
// Config configures metrics service.
@@ -59,11 +58,6 @@ type Collector struct {
Objectives map[float64]float64 `json:"objectives"`
}
-// Hydrate configuration.
-func (c *Config) Hydrate(cfg service.Config) error {
- return cfg.Unmarshal(c)
-}
-
// register application specific metrics.
func (c *Config) getCollectors() (map[string]prometheus.Collector, error) {
if c.Collect == nil {
@@ -138,3 +132,7 @@ func (c *Config) getCollectors() (map[string]prometheus.Collector, error) {
return collectors, nil
}
+
+func (c *Config) InitDefaults() {
+
+}
diff --git a/service/metrics/config_test.go b/plugins/metrics/config_test.go
index 5153ead1..665ec9cd 100644
--- a/service/metrics/config_test.go
+++ b/plugins/metrics/config_test.go
@@ -1,48 +1,56 @@
package metrics
import (
+ "bytes"
"testing"
- json "github.com/json-iterator/go"
+ j "github.com/json-iterator/go"
"github.com/prometheus/client_golang/prometheus"
- "github.com/spiral/roadrunner/service"
"github.com/stretchr/testify/assert"
)
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
+var json = j.ConfigCompatibleWithStandardLibrary
func Test_Config_Hydrate_Error1(t *testing.T) {
- cfg := &mockCfg{`{"request": {"From": "Something"}}`}
+ cfg := `{"request": {"From": "Something"}}`
c := &Config{}
+ f := new(bytes.Buffer)
+ f.WriteString(cfg)
- assert.NoError(t, c.Hydrate(cfg))
+ err := json.Unmarshal(f.Bytes(), &c)
+ if err != nil {
+ t.Fatal(err)
+ }
}
func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := &mockCfg{`{"dir": "/dir/"`}
+ cfg := `{"dir": "/dir/"`
c := &Config{}
- assert.Error(t, c.Hydrate(cfg))
+ f := new(bytes.Buffer)
+ f.WriteString(cfg)
+
+ err := json.Unmarshal(f.Bytes(), &c)
+ assert.Error(t, err)
}
func Test_Config_Metrics(t *testing.T) {
- cfg := &mockCfg{`{
+ cfg := `{
"collect":{
"metric1":{"type": "gauge"},
"metric2":{ "type": "counter"},
"metric3":{"type": "summary"},
"metric4":{"type": "histogram"}
}
-}`}
+}`
c := &Config{}
+ f := new(bytes.Buffer)
+ f.WriteString(cfg)
- assert.NoError(t, c.Hydrate(cfg))
+ err := json.Unmarshal(f.Bytes(), &c)
+ if err != nil {
+ t.Fatal(err)
+ }
m, err := c.getCollectors()
assert.NoError(t, err)
@@ -54,17 +62,22 @@ func Test_Config_Metrics(t *testing.T) {
}
func Test_Config_MetricsVector(t *testing.T) {
- cfg := &mockCfg{`{
+ cfg := `{
"collect":{
"metric1":{"type": "gauge","labels":["label"]},
"metric2":{ "type": "counter","labels":["label"]},
"metric3":{"type": "summary","labels":["label"]},
"metric4":{"type": "histogram","labels":["label"]}
}
-}`}
+}`
c := &Config{}
+ f := new(bytes.Buffer)
+ f.WriteString(cfg)
- assert.NoError(t, c.Hydrate(cfg))
+ err := json.Unmarshal(f.Bytes(), &c)
+ if err != nil {
+ t.Fatal(err)
+ }
m, err := c.getCollectors()
assert.NoError(t, err)
diff --git a/plugins/metrics/doc.go b/plugins/metrics/doc.go
new file mode 100644
index 00000000..1abe097a
--- /dev/null
+++ b/plugins/metrics/doc.go
@@ -0,0 +1 @@
+package metrics
diff --git a/plugins/metrics/interface.go b/plugins/metrics/interface.go
new file mode 100644
index 00000000..87ba4017
--- /dev/null
+++ b/plugins/metrics/interface.go
@@ -0,0 +1,7 @@
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+type StatProvider interface {
+ MetricsCollector() []prometheus.Collector
+}
diff --git a/plugins/metrics/plugin.go b/plugins/metrics/plugin.go
new file mode 100644
index 00000000..fb9096a1
--- /dev/null
+++ b/plugins/metrics/plugin.go
@@ -0,0 +1,229 @@
+package metrics
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "golang.org/x/sys/cpu"
+)
+
+const (
+ // PluginName declares plugin name.
+ PluginName = "metrics"
+ // maxHeaderSize declares max header size for prometheus server
+ maxHeaderSize = 1024 * 1024 * 100 // 104MB
+)
+
+type statsProvider struct {
+ collectors []prometheus.Collector
+ name string
+}
+
+// Plugin to manage application metrics using Prometheus.
+type Plugin struct {
+ cfg Config
+ log logger.Logger
+ mu sync.Mutex // all receivers are pointers
+ http *http.Server
+ collectors sync.Map // all receivers are pointers
+ registry *prometheus.Registry
+}
+
+// Init service.
+func (m *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("metrics init")
+ err := cfg.UnmarshalKey(PluginName, &m.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ // TODO figure out what is Init
+ m.cfg.InitDefaults()
+
+ m.log = log
+ m.registry = prometheus.NewRegistry()
+
+ // Default
+ err = m.registry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // Default
+ err = m.registry.Register(prometheus.NewGoCollector())
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ collectors, err := m.cfg.getCollectors()
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ // Register invocation will be later in the Serve method
+ for k, v := range collectors {
+ m.collectors.Store(k, statsProvider{
+ collectors: []prometheus.Collector{v},
+ name: k,
+ })
+ }
+ return nil
+}
+
+// Register new prometheus collector.
+func (m *Plugin) Register(c prometheus.Collector) error {
+ return m.registry.Register(c)
+}
+
+// Serve prometheus metrics service.
+func (m *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ m.collectors.Range(func(key, value interface{}) bool {
+ // key - name
+ // value - statsProvider struct
+ c := value.(statsProvider)
+ for _, v := range c.collectors {
+ if err := m.registry.Register(v); err != nil {
+ errCh <- err
+ return false
+ }
+ }
+
+ return true
+ })
+
+ var topCipherSuites []uint16
+ var defaultCipherSuitesTLS13 []uint16
+
+ hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
+ hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
+ // Keep in sync with crypto/aes/cipher_s390x.go.
+ hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
+
+ hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
+
+ if hasGCMAsm {
+ // If AES-GCM hardware is provided then prioritise AES-GCM
+ // cipher suites.
+ topCipherSuites = []uint16{
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ }
+ defaultCipherSuitesTLS13 = []uint16{
+ tls.TLS_AES_128_GCM_SHA256,
+ tls.TLS_CHACHA20_POLY1305_SHA256,
+ tls.TLS_AES_256_GCM_SHA384,
+ }
+ } else {
+ // Without AES-GCM hardware, we put the ChaCha20-Poly1305
+ // cipher suites first.
+ topCipherSuites = []uint16{
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ }
+ defaultCipherSuitesTLS13 = []uint16{
+ tls.TLS_CHACHA20_POLY1305_SHA256,
+ tls.TLS_AES_128_GCM_SHA256,
+ tls.TLS_AES_256_GCM_SHA384,
+ }
+ }
+
+ DefaultCipherSuites := make([]uint16, 0, 22)
+ DefaultCipherSuites = append(DefaultCipherSuites, topCipherSuites...)
+ DefaultCipherSuites = append(DefaultCipherSuites, defaultCipherSuitesTLS13...)
+
+ m.http = &http.Server{
+ Addr: m.cfg.Address,
+ Handler: promhttp.HandlerFor(m.registry, promhttp.HandlerOpts{}),
+ IdleTimeout: time.Hour * 24,
+ ReadTimeout: time.Minute * 60,
+ MaxHeaderBytes: maxHeaderSize,
+ ReadHeaderTimeout: time.Minute * 60,
+ WriteTimeout: time.Minute * 60,
+ TLSConfig: &tls.Config{
+ CurvePreferences: []tls.CurveID{
+ tls.CurveP256,
+ tls.CurveP384,
+ tls.CurveP521,
+ tls.X25519,
+ },
+ CipherSuites: DefaultCipherSuites,
+ MinVersion: tls.VersionTLS12,
+ PreferServerCipherSuites: true,
+ },
+ }
+
+ go func() {
+ err := m.http.ListenAndServe()
+ if err != nil && err != http.ErrServerClosed {
+ errCh <- err
+ return
+ }
+ }()
+
+ return errCh
+}
+
+// Stop prometheus metrics service.
+func (m *Plugin) Stop() error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if m.http != nil {
+ // timeout is 10 seconds
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+ err := m.http.Shutdown(ctx)
+ if err != nil {
+ // Function should be Stop() error
+ m.log.Error("stop error", "error", errors.Errorf("error shutting down the metrics server: error %v", err))
+ }
+ }
+ return nil
+}
+
+// Collects used to collect all plugins which implement metrics.StatProvider interface (and Named)
+func (m *Plugin) Collects() []interface{} {
+ return []interface{}{
+ m.AddStatProvider,
+ }
+}
+
+// Collector returns application specific collector by name or nil if collector not found.
+func (m *Plugin) AddStatProvider(name endure.Named, stat StatProvider) error {
+ m.collectors.Store(name.Name(), statsProvider{
+ collectors: stat.MetricsCollector(),
+ name: name.Name(),
+ })
+ return nil
+}
+
+// RPC interface satisfaction
+func (m *Plugin) Name() string {
+ return PluginName
+}
+
+// RPC interface satisfaction
+func (m *Plugin) RPC() interface{} {
+ return &rpcServer{
+ svc: m,
+ log: m.log,
+ }
+}
diff --git a/plugins/metrics/rpc.go b/plugins/metrics/rpc.go
new file mode 100644
index 00000000..f9c6accb
--- /dev/null
+++ b/plugins/metrics/rpc.go
@@ -0,0 +1,294 @@
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+type rpcServer struct {
+ svc *Plugin
+ log logger.Logger
+}
+
+// Metric represent single metric produced by the application.
+type Metric struct {
+ // Collector name.
+ Name string
+
+ // Collector value.
+ Value float64
+
+ // Labels associated with metric. Only for vector metrics. Must be provided in a form of label values.
+ Labels []string
+}
+
+// Add new metric to the designated collector.
+func (rpc *rpcServer) Add(m *Metric, ok *bool) error {
+ const op = errors.Op("Add metric")
+ rpc.log.Info("Adding metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
+ c, exist := rpc.svc.collectors.Load(m.Name)
+ if !exist {
+ rpc.log.Error("undefined collector", "collector", m.Name)
+ return errors.E(op, errors.Errorf("undefined collector %s, try first Declare the desired collector", m.Name))
+ }
+
+ switch c := c.(type) {
+ case prometheus.Gauge:
+ c.Add(m.Value)
+
+ case *prometheus.GaugeVec:
+ if len(m.Labels) == 0 {
+ rpc.log.Error("required labels for collector", "collector", m.Name)
+ return errors.E(op, errors.Errorf("required labels for collector %s", m.Name))
+ }
+
+ gauge, err := c.GetMetricWithLabelValues(m.Labels...)
+ if err != nil {
+ rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
+ return errors.E(op, err)
+ }
+ gauge.Add(m.Value)
+ case prometheus.Counter:
+ c.Add(m.Value)
+
+ case *prometheus.CounterVec:
+ if len(m.Labels) == 0 {
+ return errors.E(op, errors.Errorf("required labels for collector `%s`", m.Name))
+ }
+
+ gauge, err := c.GetMetricWithLabelValues(m.Labels...)
+ if err != nil {
+ rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
+ return errors.E(op, err)
+ }
+ gauge.Add(m.Value)
+
+ default:
+ return errors.E(op, errors.Errorf("collector %s does not support method `Add`", m.Name))
+ }
+
+ // RPC, set ok to true as return value. Need by rpc.Call reply argument
+ *ok = true
+ rpc.log.Info("new metric successfully added", "name", m.Name, "labels", m.Labels, "value", m.Value)
+ return nil
+}
+
+// Sub subtract the value from the specific metric (gauge only).
+func (rpc *rpcServer) Sub(m *Metric, ok *bool) error {
+ const op = errors.Op("Subtracting metric")
+ rpc.log.Info("Subtracting value from metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
+ c, exist := rpc.svc.collectors.Load(m.Name)
+ if !exist {
+ rpc.log.Error("undefined collector", "name", m.Name, "value", m.Value, "labels", m.Labels)
+ return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
+ }
+ if c == nil {
+ // can it be nil ??? I guess can't
+ return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
+ }
+
+ switch c := c.(type) {
+ case prometheus.Gauge:
+ c.Sub(m.Value)
+
+ case *prometheus.GaugeVec:
+ if len(m.Labels) == 0 {
+ rpc.log.Error("required labels for collector, but none was provided", "name", m.Name, "value", m.Value)
+ return errors.E(op, errors.Errorf("required labels for collector %s", m.Name))
+ }
+
+ gauge, err := c.GetMetricWithLabelValues(m.Labels...)
+ if err != nil {
+ rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
+ return errors.E(op, err)
+ }
+ gauge.Sub(m.Value)
+ default:
+ return errors.E(op, errors.Errorf("collector `%s` does not support method `Sub`", m.Name))
+ }
+ rpc.log.Info("Subtracting operation applied successfully", "name", m.Name, "labels", m.Labels, "value", m.Value)
+
+ *ok = true
+ return nil
+}
+
+// Observe the value (histogram and summary only).
+func (rpc *rpcServer) Observe(m *Metric, ok *bool) error {
+ const op = errors.Op("Observe metrics")
+ rpc.log.Info("Observing metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
+
+ c, exist := rpc.svc.collectors.Load(m.Name)
+ if !exist {
+ rpc.log.Error("undefined collector", "name", m.Name, "value", m.Value, "labels", m.Labels)
+ return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
+ }
+ if c == nil {
+ return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
+ }
+
+ switch c := c.(type) {
+ case *prometheus.SummaryVec:
+ if len(m.Labels) == 0 {
+ return errors.E(op, errors.Errorf("required labels for collector `%s`", m.Name))
+ }
+
+ observer, err := c.GetMetricWithLabelValues(m.Labels...)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ observer.Observe(m.Value)
+
+ case prometheus.Histogram:
+ c.Observe(m.Value)
+
+ case *prometheus.HistogramVec:
+ if len(m.Labels) == 0 {
+ return errors.E(op, errors.Errorf("required labels for collector `%s`", m.Name))
+ }
+
+ observer, err := c.GetMetricWithLabelValues(m.Labels...)
+ if err != nil {
+ rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
+ return errors.E(op, err)
+ }
+ observer.Observe(m.Value)
+ default:
+ return errors.E(op, errors.Errorf("collector `%s` does not support method `Observe`", m.Name))
+ }
+
+ rpc.log.Info("observe operation finished successfully", "name", m.Name, "labels", m.Labels, "value", m.Value)
+
+ *ok = true
+ return nil
+}
+
+// Declare is used to register new collector in prometheus
+// THE TYPES ARE:
+// NamedCollector -> Collector with the name
+// bool -> RPC reply value
+// RETURNS:
+// error
+func (rpc *rpcServer) Declare(nc *NamedCollector, ok *bool) error {
+ const op = errors.Op("Declare metric")
+ rpc.log.Info("Declaring new metric", "name", nc.Name, "type", nc.Type, "namespace", nc.Namespace)
+ _, exist := rpc.svc.collectors.Load(nc.Name)
+ if exist {
+ rpc.log.Error("metric with provided name already exist", "name", nc.Name, "type", nc.Type, "namespace", nc.Namespace)
+ return errors.E(op, errors.Errorf("tried to register existing collector with the name `%s`", nc.Name))
+ }
+
+ var collector prometheus.Collector
+ switch nc.Type {
+ case Histogram:
+ opts := prometheus.HistogramOpts{
+ Name: nc.Name,
+ Namespace: nc.Namespace,
+ Subsystem: nc.Subsystem,
+ Help: nc.Help,
+ Buckets: nc.Buckets,
+ }
+
+ if len(nc.Labels) != 0 {
+ collector = prometheus.NewHistogramVec(opts, nc.Labels)
+ } else {
+ collector = prometheus.NewHistogram(opts)
+ }
+ case Gauge:
+ opts := prometheus.GaugeOpts{
+ Name: nc.Name,
+ Namespace: nc.Namespace,
+ Subsystem: nc.Subsystem,
+ Help: nc.Help,
+ }
+
+ if len(nc.Labels) != 0 {
+ collector = prometheus.NewGaugeVec(opts, nc.Labels)
+ } else {
+ collector = prometheus.NewGauge(opts)
+ }
+ case Counter:
+ opts := prometheus.CounterOpts{
+ Name: nc.Name,
+ Namespace: nc.Namespace,
+ Subsystem: nc.Subsystem,
+ Help: nc.Help,
+ }
+
+ if len(nc.Labels) != 0 {
+ collector = prometheus.NewCounterVec(opts, nc.Labels)
+ } else {
+ collector = prometheus.NewCounter(opts)
+ }
+ case Summary:
+ opts := prometheus.SummaryOpts{
+ Name: nc.Name,
+ Namespace: nc.Namespace,
+ Subsystem: nc.Subsystem,
+ Help: nc.Help,
+ }
+
+ if len(nc.Labels) != 0 {
+ collector = prometheus.NewSummaryVec(opts, nc.Labels)
+ } else {
+ collector = prometheus.NewSummary(opts)
+ }
+
+ default:
+ return errors.E(op, errors.Errorf("unknown collector type %s", nc.Type))
+ }
+
+ // add collector to sync.Map
+ rpc.svc.collectors.Store(nc.Name, collector)
+ // that method might panic, we handle it by recover
+ err := rpc.svc.Register(collector)
+ if err != nil {
+ *ok = false
+ return errors.E(op, err)
+ }
+
+ rpc.log.Info("metric successfully added", "name", nc.Name, "type", nc.Type, "namespace", nc.Namespace)
+
+ *ok = true
+ return nil
+}
+
+// Set the metric value (only for gaude).
+func (rpc *rpcServer) Set(m *Metric, ok *bool) (err error) {
+ const op = errors.Op("Set metric")
+ rpc.log.Info("Observing metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
+
+ c, exist := rpc.svc.collectors.Load(m.Name)
+ if !exist {
+ return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
+ }
+ if c == nil {
+ return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
+ }
+
+ switch c := c.(type) {
+ case prometheus.Gauge:
+ c.Set(m.Value)
+
+ case *prometheus.GaugeVec:
+ if len(m.Labels) == 0 {
+ rpc.log.Error("required labels for collector", "collector", m.Name)
+ return errors.E(op, errors.Errorf("required labels for collector %s", m.Name))
+ }
+
+ gauge, err := c.GetMetricWithLabelValues(m.Labels...)
+ if err != nil {
+ rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
+ return errors.E(op, err)
+ }
+ gauge.Set(m.Value)
+
+ default:
+ return errors.E(op, errors.Errorf("collector `%s` does not support method Set", m.Name))
+ }
+
+ rpc.log.Info("set operation finished successfully", "name", m.Name, "labels", m.Labels, "value", m.Value)
+
+ *ok = true
+ return nil
+}
diff --git a/plugins/redis/config.go b/plugins/redis/config.go
new file mode 100644
index 00000000..ebcefed1
--- /dev/null
+++ b/plugins/redis/config.go
@@ -0,0 +1,32 @@
+package redis
+
+import "time"
+
+type Config struct {
+ Addrs []string `yaml:"addrs"`
+ DB int `yaml:"db"`
+ Username string `yaml:"username"`
+ Password string `yaml:"password"`
+ MasterName string `yaml:"master_name"`
+ SentinelPassword string `yaml:"sentinel_password"`
+ RouteByLatency bool `yaml:"route_by_latency"`
+ RouteRandomly bool `yaml:"route_randomly"`
+ MaxRetries int `yaml:"max_retries"`
+ DialTimeout time.Duration `yaml:"dial_timeout"`
+ MinRetryBackoff time.Duration `yaml:"min_retry_backoff"`
+ MaxRetryBackoff time.Duration `yaml:"max_retry_backoff"`
+ PoolSize int `yaml:"pool_size"`
+ MinIdleConns int `yaml:"min_idle_conns"`
+ MaxConnAge time.Duration `yaml:"max_conn_age"`
+ ReadTimeout time.Duration `yaml:"read_timeout"`
+ WriteTimeout time.Duration `yaml:"write_timeout"`
+ PoolTimeout time.Duration `yaml:"pool_timeout"`
+ IdleTimeout time.Duration `yaml:"idle_timeout"`
+ IdleCheckFreq time.Duration `yaml:"idle_check_freq"`
+ ReadOnly bool `yaml:"read_only"`
+}
+
+// InitDefaults initializing fill config with default values
+func (s *Config) InitDefaults() {
+ s.Addrs = []string{"localhost:6379"} // default addr is pointing to local storage
+}
diff --git a/plugins/redis/interface.go b/plugins/redis/interface.go
new file mode 100644
index 00000000..909c8ca4
--- /dev/null
+++ b/plugins/redis/interface.go
@@ -0,0 +1,9 @@
+package redis
+
+import "github.com/go-redis/redis/v8"
+
+// Redis in the redis KV plugin interface
+type Redis interface {
+ // GetClient
+ GetClient() redis.UniversalClient
+}
diff --git a/plugins/redis/plugin.go b/plugins/redis/plugin.go
new file mode 100644
index 00000000..fe465340
--- /dev/null
+++ b/plugins/redis/plugin.go
@@ -0,0 +1,75 @@
+package redis
+
+import (
+ "github.com/go-redis/redis/v8"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const PluginName = "redis"
+
+type Plugin struct {
+ // config for RR integration
+ cfg *Config
+ // logger
+ log logger.Logger
+ // redis universal client
+ universalClient redis.UniversalClient
+}
+
+func (s *Plugin) GetClient() redis.UniversalClient {
+ return s.universalClient
+}
+
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("redis plugin init")
+ s.cfg = &Config{}
+ s.cfg.InitDefaults()
+
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ s.log = log
+
+ s.universalClient = redis.NewUniversalClient(&redis.UniversalOptions{
+ Addrs: s.cfg.Addrs,
+ DB: s.cfg.DB,
+ Username: s.cfg.Username,
+ Password: s.cfg.Password,
+ SentinelPassword: s.cfg.SentinelPassword,
+ MaxRetries: s.cfg.MaxRetries,
+ MinRetryBackoff: s.cfg.MaxRetryBackoff,
+ MaxRetryBackoff: s.cfg.MaxRetryBackoff,
+ DialTimeout: s.cfg.DialTimeout,
+ ReadTimeout: s.cfg.ReadTimeout,
+ WriteTimeout: s.cfg.WriteTimeout,
+ PoolSize: s.cfg.PoolSize,
+ MinIdleConns: s.cfg.MinIdleConns,
+ MaxConnAge: s.cfg.MaxConnAge,
+ PoolTimeout: s.cfg.PoolTimeout,
+ IdleTimeout: s.cfg.IdleTimeout,
+ IdleCheckFrequency: s.cfg.IdleCheckFreq,
+ ReadOnly: s.cfg.ReadOnly,
+ RouteByLatency: s.cfg.RouteByLatency,
+ RouteRandomly: s.cfg.RouteRandomly,
+ MasterName: s.cfg.MasterName,
+ })
+
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+func (s Plugin) Stop() error {
+ return s.universalClient.Close()
+}
+
+func (s *Plugin) Name() string {
+ return PluginName
+}
diff --git a/service/reload/config.go b/plugins/reload/config.go
index 46267045..9ca2c0dc 100644
--- a/service/reload/config.go
+++ b/plugins/reload/config.go
@@ -1,11 +1,9 @@
package reload
import (
- "errors"
"time"
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
+ "github.com/spiral/errors"
)
// Config is a Reload configuration point.
@@ -35,38 +33,25 @@ type ServiceConfig struct {
// Ignore is set of files which would not be watched
Ignore []string
-
- // service is a link to service to restart
- service *roadrunner.Controllable
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
-
- return nil
}
// InitDefaults sets missing values to their default values.
-func (c *Config) InitDefaults() error {
+func InitDefaults(c *Config) {
c.Interval = time.Second
c.Patterns = []string{".php"}
-
- return nil
}
// Valid validates the configuration.
func (c *Config) Valid() error {
+ const op = errors.Op("config validation [reload plugin]")
if c.Interval < time.Second {
- return errors.New("too short interval")
+ return errors.E(op, errors.Str("too short interval"))
}
if c.Services == nil {
- return errors.New("should add at least 1 service")
+ return errors.E(op, errors.Str("should add at least 1 service"))
} else if len(c.Services) == 0 {
- return errors.New("service initialized, however, no config added")
+ return errors.E(op, errors.Str("service initialized, however, no config added"))
}
return nil
diff --git a/plugins/reload/plugin.go b/plugins/reload/plugin.go
new file mode 100644
index 00000000..eb1b61b2
--- /dev/null
+++ b/plugins/reload/plugin.go
@@ -0,0 +1,159 @@
+package reload
+
+import (
+ "os"
+ "strings"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/resetter"
+)
+
+// PluginName contains default plugin name.
+const PluginName string = "reload"
+const thresholdChanBuffer uint = 1000
+
+type Plugin struct {
+ cfg *Config
+ log logger.Logger
+ watcher *Watcher
+ services map[string]interface{}
+ res resetter.Resetter
+ stopc chan struct{}
+}
+
+// Init controller service
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger, res resetter.Resetter) error {
+ const op = errors.Op("reload plugin init")
+ s.cfg = &Config{}
+ InitDefaults(s.cfg)
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ // disable plugin in case of error
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ s.log = log
+ s.res = res
+ s.stopc = make(chan struct{}, 1)
+ s.services = make(map[string]interface{})
+
+ var configs []WatcherConfig
+
+ for serviceName, serviceConfig := range s.cfg.Services {
+ ignored, err := ConvertIgnored(serviceConfig.Ignore)
+ if err != nil {
+ return errors.E(op, err)
+ }
+ configs = append(configs, WatcherConfig{
+ ServiceName: serviceName,
+ Recursive: serviceConfig.Recursive,
+ Directories: serviceConfig.Dirs,
+ FilterHooks: func(filename string, patterns []string) error {
+ for i := 0; i < len(patterns); i++ {
+ if strings.Contains(filename, patterns[i]) {
+ return nil
+ }
+ }
+ return errors.E(op, errors.SkipFile)
+ },
+ Files: make(map[string]os.FileInfo),
+ Ignored: ignored,
+ FilePatterns: append(serviceConfig.Patterns, s.cfg.Patterns...),
+ })
+ }
+
+ s.watcher, err = NewWatcher(configs, s.log)
+ if err != nil {
+ return errors.E(op, err)
+ }
+
+ return nil
+}
+
+func (s *Plugin) Serve() chan error {
+ const op = errors.Op("reload plugin serve")
+ errCh := make(chan error, 1)
+ if s.cfg.Interval < time.Second {
+ errCh <- errors.E(op, errors.Str("reload interval is too fast"))
+ return errCh
+ }
+
+ // make a map with unique services
+ // so, if we would have a 100 events from http service
+ // in map we would see only 1 key and it's config
+ treshholdc := make(chan struct {
+ serviceConfig ServiceConfig
+ service string
+ }, thresholdChanBuffer)
+
+ // use the same interval
+ timer := time.NewTimer(s.cfg.Interval)
+
+ go func() {
+ for e := range s.watcher.Event {
+ treshholdc <- struct {
+ serviceConfig ServiceConfig
+ service string
+ }{serviceConfig: s.cfg.Services[e.service], service: e.service}
+ }
+ }()
+
+ // map with configs by services
+ updated := make(map[string]ServiceConfig, len(s.cfg.Services))
+
+ go func() {
+ for {
+ select {
+ case cfg := <-treshholdc:
+ // logic is following:
+ // restart
+ timer.Stop()
+ // replace previous value in map by more recent without adding new one
+ updated[cfg.service] = cfg.serviceConfig
+ // if we getting a lot of events, we shouldn't restart particular service on each of it (user doing batch move or very fast typing)
+ // instead, we are resetting the timer and wait for s.cfg.Interval time
+ // If there is no more events, we restart service only once
+ timer.Reset(s.cfg.Interval)
+ case <-timer.C:
+ if len(updated) > 0 {
+ for name := range updated {
+ err := s.res.ResetByName(name)
+ if err != nil {
+ timer.Stop()
+ errCh <- errors.E(op, err)
+ return
+ }
+ }
+ // zero map
+ updated = make(map[string]ServiceConfig, len(s.cfg.Services))
+ }
+ case <-s.stopc:
+ timer.Stop()
+ return
+ }
+ }
+ }()
+
+ go func() {
+ err := s.watcher.StartPolling(s.cfg.Interval)
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return
+ }
+ }()
+
+ return errCh
+}
+
+func (s *Plugin) Stop() error {
+ s.watcher.Stop()
+ s.stopc <- struct{}{}
+ return nil
+}
+
+func (s *Plugin) Name() string {
+ return PluginName
+}
diff --git a/service/reload/watcher.go b/plugins/reload/watcher.go
index 1397dfa5..08c85af9 100644
--- a/service/reload/watcher.go
+++ b/plugins/reload/watcher.go
@@ -1,16 +1,15 @@
package reload
import (
- "errors"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
-)
-var ErrorSkip = errors.New("file is skipped")
-var NoWalkerConfig = errors.New("should add at least one walker config, when reload is set to true")
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
// SimpleHook is used to filter by simple criteria, CONTAINS
type SimpleHook func(filename string, pattern []string) error
@@ -62,18 +61,23 @@ type Watcher struct {
// config for each service
// need pointer here to assign files
watcherConfigs map[string]WatcherConfig
+
+ // logger
+ log logger.Logger
}
// Options is used to set Watcher Options
type Options func(*Watcher)
// NewWatcher returns new instance of File Watcher
-func NewWatcher(configs []WatcherConfig, options ...Options) (*Watcher, error) {
+func NewWatcher(configs []WatcherConfig, log logger.Logger, options ...Options) (*Watcher, error) {
w := &Watcher{
Event: make(chan Event),
mu: &sync.Mutex{},
- close: make(chan struct{}, 1),
+ log: log,
+
+ close: make(chan struct{}),
//workingDir: workDir,
watcherConfigs: make(map[string]WatcherConfig),
@@ -98,10 +102,11 @@ func NewWatcher(configs []WatcherConfig, options ...Options) (*Watcher, error) {
// initFs makes initial map with files
func (w *Watcher) initFs() error {
+ const op = errors.Op("init fs")
for srvName, config := range w.watcherConfigs {
fileList, err := w.retrieveFileList(srvName, config)
if err != nil {
- return err
+ return errors.E(op, err)
}
// workaround. in golang you can't assign to map in struct field
tmp := w.watcherConfigs[srvName]
@@ -129,17 +134,6 @@ func ConvertIgnored(ignored []string) (map[string]struct{}, error) {
return ign, nil
}
-// GetAllFiles returns all files initialized for particular company
-func (w *Watcher) GetAllFiles(serviceName string) []os.FileInfo {
- var ret []os.FileInfo
-
- for _, v := range w.watcherConfigs[serviceName].Files {
- ret = append(ret, v)
- }
-
- return ret
-}
-
// https://en.wikipedia.org/wiki/Inotify
// SetMaxFileEvents sets max file notify events for Watcher
// In case of file watch errors, this value can be increased system-wide
@@ -154,6 +148,7 @@ func (w *Watcher) GetAllFiles(serviceName string) []os.FileInfo {
// pass map from outside
func (w *Watcher) retrieveFilesSingle(serviceName, path string) (map[string]os.FileInfo, error) {
+ const op = errors.Op("retrieve")
stat, err := os.Stat(path)
if err != nil {
return nil, err
@@ -184,7 +179,7 @@ outer:
// if filename does not contain pattern --> ignore that file
if w.watcherConfigs[serviceName].FilePatterns != nil && w.watcherConfigs[serviceName].FilterHooks != nil {
err = w.watcherConfigs[serviceName].FilterHooks(fileInfoList[i].Name(), w.watcherConfigs[serviceName].FilePatterns)
- if err == ErrorSkip {
+ if errors.Is(errors.SkipFile, err) {
continue outer
}
}
@@ -197,9 +192,10 @@ outer:
func (w *Watcher) StartPolling(duration time.Duration) error {
w.mu.Lock()
+ const op = errors.Op("start polling")
if w.started {
w.mu.Unlock()
- return errors.New("already started")
+ return errors.E(op, errors.Str("already started"))
}
w.started = true
@@ -279,8 +275,9 @@ func (w *Watcher) retrieveFilesRecursive(serviceName, root string) (map[string]o
fileList := make(map[string]os.FileInfo)
return fileList, filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ const op = errors.Op("retrieve files recursive")
if err != nil {
- return err
+ return errors.E(op, err)
}
// If path is ignored and it's a directory, skip the directory. If it's
@@ -296,7 +293,7 @@ func (w *Watcher) retrieveFilesRecursive(serviceName, root string) (map[string]o
// if filename does not contain pattern --> ignore that file
err = w.watcherConfigs[serviceName].FilterHooks(info.Name(), w.watcherConfigs[serviceName].FilePatterns)
- if err == ErrorSkip {
+ if errors.Is(errors.SkipFile, err) {
return nil
}
@@ -318,6 +315,7 @@ func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
for pth := range w.watcherConfigs[serviceName].Files {
if _, found := files[pth]; !found {
removes[pth] = w.watcherConfigs[serviceName].Files[pth]
+ w.log.Debug("file added to the list of removed files", "path", pth, "name", w.watcherConfigs[serviceName].Files[pth].Name(), "size", w.watcherConfigs[serviceName].Files[pth].Size())
}
}
@@ -330,11 +328,13 @@ func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
if !found {
// A file was created.
creates[pth] = files[pth]
+ w.log.Debug("file was created", "path", pth, "name", files[pth].Name(), "size", files[pth].Size())
continue
}
if oldInfo.ModTime() != files[pth].ModTime() || oldInfo.Mode() != files[pth].Mode() {
w.watcherConfigs[serviceName].Files[pth] = files[pth]
+ w.log.Debug("file was updated", "path", pth, "name", files[pth].Name(), "size", files[pth].Size())
w.Event <- Event{
Path: pth,
Info: files[pth],
@@ -347,6 +347,7 @@ func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
for pth := range creates {
// add file to the plugin watch files
w.watcherConfigs[serviceName].Files[pth] = creates[pth]
+ w.log.Debug("file was added to watcher", "path", pth, "name", creates[pth].Name(), "size", creates[pth].Size())
w.Event <- Event{
Path: pth,
@@ -358,6 +359,7 @@ func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
for pth := range removes {
// delete path from the config
delete(w.watcherConfigs[serviceName].Files, pth)
+ w.log.Debug("file was removed from watcher", "path", pth, "name", removes[pth].Name(), "size", removes[pth].Size())
w.Event <- Event{
Path: pth,
diff --git a/plugins/resetter/interface.go b/plugins/resetter/interface.go
new file mode 100644
index 00000000..47d8d791
--- /dev/null
+++ b/plugins/resetter/interface.go
@@ -0,0 +1,17 @@
+package resetter
+
+// If plugin implements Resettable interface, than it state can be resetted without reload in runtime via RPC/HTTP
+type Resettable interface {
+ // Reset reload all plugins
+ Reset() error
+}
+
+// Resetter interface is the Resetter plugin main interface
+type Resetter interface {
+ // Reset all registered plugins
+ ResetAll() error
+ // Reset by plugin name
+ ResetByName(string) error
+ // GetAll registered plugins
+ GetAll() []string
+}
diff --git a/plugins/resetter/plugin.go b/plugins/resetter/plugin.go
new file mode 100644
index 00000000..5d294086
--- /dev/null
+++ b/plugins/resetter/plugin.go
@@ -0,0 +1,80 @@
+package resetter
+
+import (
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+const PluginName = "resetter"
+
+type Plugin struct {
+ registry map[string]Resettable
+ log logger.Logger
+}
+
+func (p *Plugin) ResetAll() error {
+ const op = errors.Op("reset all")
+ for name := range p.registry {
+ err := p.registry[name].Reset()
+ if err != nil {
+ return errors.E(op, err)
+ }
+ }
+ return nil
+}
+
+func (p *Plugin) ResetByName(plugin string) error {
+ const op = errors.Op("reset by name")
+ if plugin, ok := p.registry[plugin]; ok {
+ return plugin.Reset()
+ }
+ return errors.E(op, errors.Errorf("can't find plugin: %s", plugin))
+}
+
+func (p *Plugin) GetAll() []string {
+ all := make([]string, 0, len(p.registry))
+ for name := range p.registry {
+ all = append(all, name)
+ }
+ return all
+}
+
+func (p *Plugin) Init(log logger.Logger) error {
+ p.registry = make(map[string]Resettable)
+ p.log = log
+ return nil
+}
+
+// Reset named service.
+func (p *Plugin) Reset(name string) error {
+ svc, ok := p.registry[name]
+ if !ok {
+ return errors.E("no such service", errors.Str(name))
+ }
+
+ return svc.Reset()
+}
+
+// RegisterTarget resettable service.
+func (p *Plugin) RegisterTarget(name endure.Named, r Resettable) error {
+ p.registry[name.Name()] = r
+ return nil
+}
+
+// Collects declares services to be collected.
+func (p *Plugin) Collects() []interface{} {
+ return []interface{}{
+ p.RegisterTarget,
+ }
+}
+
+// Name of the service.
+func (p *Plugin) Name() string {
+ return PluginName
+}
+
+// RPCService returns associated rpc service.
+func (p *Plugin) RPC() interface{} {
+ return &rpc{srv: p, log: p.log}
+}
diff --git a/plugins/resetter/rpc.go b/plugins/resetter/rpc.go
new file mode 100644
index 00000000..69c955b0
--- /dev/null
+++ b/plugins/resetter/rpc.go
@@ -0,0 +1,30 @@
+package resetter
+
+import "github.com/spiral/roadrunner/v2/plugins/logger"
+
+type rpc struct {
+ srv *Plugin
+ log logger.Logger
+}
+
+// List all resettable plugins.
+func (rpc *rpc) List(_ bool, list *[]string) error {
+ rpc.log.Debug("started List method")
+ *list = make([]string, 0)
+
+ for name := range rpc.srv.registry {
+ *list = append(*list, name)
+ }
+ rpc.log.Debug("services list", "services", *list)
+
+ rpc.log.Debug("finished List method")
+ return nil
+}
+
+// Reset named plugin.
+func (rpc *rpc) Reset(service string, done *bool) error {
+ rpc.log.Debug("started Reset method for the service", "service", service)
+ defer rpc.log.Debug("finished Reset method for the service", "service", service)
+ *done = true
+ return rpc.srv.Reset(service)
+}
diff --git a/service/rpc/config.go b/plugins/rpc/config.go
index cc492622..88ad7f0e 100644
--- a/service/rpc/config.go
+++ b/plugins/rpc/config.go
@@ -5,34 +5,20 @@ import (
"net"
"strings"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/util"
+ "github.com/spiral/roadrunner/v2/utils"
)
// Config defines RPC service config.
type Config struct {
- // Indicates if RPC connection is enabled.
- Enable bool
-
// Listen string
Listen string
}
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
-
- return c.Valid()
-}
-
// InitDefaults allows to init blank config with pre-defined set of default values.
-func (c *Config) InitDefaults() error {
- c.Enable = true
- c.Listen = "tcp://127.0.0.1:6001"
-
- return nil
+func (c *Config) InitDefaults() {
+ if c.Listen == "" {
+ c.Listen = "tcp://127.0.0.1:6001"
+ }
}
// Valid returns nil if config is valid.
@@ -46,7 +32,7 @@ func (c *Config) Valid() error {
// Listener creates new rpc socket Listener.
func (c *Config) Listener() (net.Listener, error) {
- return util.CreateListener(c.Listen)
+ return utils.CreateListener(c.Listen)
}
// Dialer creates rpc socket Dialer.
diff --git a/plugins/rpc/doc/plugin_arch.drawio b/plugins/rpc/doc/plugin_arch.drawio
new file mode 100644
index 00000000..dec5f0b2
--- /dev/null
+++ b/plugins/rpc/doc/plugin_arch.drawio
@@ -0,0 +1 @@
+<mxfile host="Electron" modified="2020-10-19T17:14:19.125Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/13.7.9 Chrome/85.0.4183.121 Electron/10.1.3 Safari/537.36" etag="2J39x4EyFr1zaE9BXKM4" version="13.7.9" type="device"><diagram id="q2oMKs6VHyn7y0AfAXBL" name="Page-1">7Vttc9o4EP41zLQfksE2GPIxQHPXu7RlQntt7ptiC1sX2XJlOUB//a1sGdtIJDQFnE6YyUys1YutfR7trlai44yj5R8cJeEH5mPasbv+suNMOrZtORcO/JOSVSEZWv1CEHDiq0aVYEZ+YCXsKmlGfJw2GgrGqCBJU+ixOMaeaMgQ52zRbDZntPnWBAVYE8w8RHXpV+KLUEkt96Kq+BOTIFSvHtqDoiJCZWM1kzREPlvURM67jjPmjIniKVqOMZXKK/VS9LvaUrv+MI5jsUuHL/zu0yx7//HT3Pln8vfN59vvS/usVHMqVuWMsQ8KUEXGRcgCFiP6rpKOOMtiH8thu1Cq2lwzloDQAuF/WIiVQhNlgoEoFBFVtXhJxLfa860c6ryvSpOlGjkvrMpCLPjqW71Q6yWLVbe8VPabs1hcoYhQKRizjBPMYcIf8UJVqq+8gGKhC6mArTpWohQG8lSrfz88xF8/ds/+uiLe7MsXtLiyZ2clVxEPsHik3WDNBFhCmEUYvh36cUyRIA/N70CKy8G6XQU3PCjEfwZ9q030K8RvazVPoV8BftvA+7dE33KOBP9jX/mAaKbedDOFkbpTmgUk1qjRBH4REoFnCcr1sADj3wT55xVv0PMD5gIvayJdU6rWGSi3otyMYw3OlWRRme21VwlrFtsdHEi9jqbe9zERha+ak0DTL0xVNJWIKAliePZAMaA+ZyQVQsA5XaqKiPh+sShxSn6gu3woiU7CSCzyCfVHnf5EjgXrMC103go+3Q18hho6QwM4pfPcOzg9DZwJTnDspyBk8Rqk8ylnDxCB8N8DLcveD1z2BlxWWa4vpu4x8epreOmuK/YvZcQnIaAoTYm34XeO5kMMun/aFRjdj45QDYG+AYBStrMHUW+YSgpWBOgNtxCgHKJwgapXPercGKhvbwxkbQxUKEYbKCfJetrP542r8aa0vt0U9gsE1rpzKfWVeK97ia+Xc41glolhB1viA32Jj+3O5YhIXc9loAHFEczdpRKWO95Ay/2eyZ1UrqqzQq8S14tkmeurrIanQP0vRvmVQYA052WwVAwHE7+rXrHBp/bCI3f4tPu1jMGReyCwLT06KoLPVPDMExnHmvrSBYkoinGpIVWz07oUcm8y8kJC/Wu0YpmcXiqQd1+WRiHj5AcMi0qIoJqXMNhuo8VM9lQLO1/oeFqiY22IPqBlo+E1SoUSeIxSlKTkbj2NCGwhiUdMCBbt0/k8P47uuQarULapE8Vye4diytDg+ke7R2hAKHaPx4wyIMYkZgWBCKUbopJDFM/FVgalsOEhcXCdt5n0KsmNUoUUMeg7p3kgEoI/wHG+axZIbPUHI9DyWIYl4BnsMZStqpw7iwT22WMWw1wQycHFwKMFTsUvU+Tx1fk0cUr34e7GE/tQBqV0SxpNpJGeYf6QK+VNjMX5TeK9PbGlTbb07ZbZYl1sYUsKTCEeltvAIlKr+aNuSqHqxJw2mTMwBC7HZY6eOSiYMydYni3IeHH8aILnxIk9c8Lq9tomxQ7pCUpyqAszUZ4lWc/iw3qXqQjwOc+8n1kaSRydJI6BEBTdYTqF3WixH57woq1h0/ryueDsGLAOD0UFPeNQ2AcYPmT+G7FK8NvCTMjHkzdply1HdCfmIzhDHvMIR3Av9jDVrKTOjjnUCzPaRzpN1Ra+Ciafk9Xo/nK6wmAsfpMMhrZ+DazZmsHoNTNdPcvgD1xDpmuwB4dgpIX9dLxY8aTKdZ78wp7osn2t/lQyw8SZg3kFPTmqcSZGkTIsgNeJLS2yxZTMOCpb9IizMigcByQFmyITGlYxV4A2o0iqyc+PvOGvYYPmTNbl2Xgzq17Wgdie/Ia1cYFkqO8pHftAx2FGVPUMVVJkul8VLK61cXJl67gc6pTSbAvcVgJ245259TW5Vm5M1k6i9xPlO7uG+b1Ww3zdOVdXCk5h/pHsgtM0C64p7WNywqWz3j8tdsgLX0tXHJ+itiNFbVsu176UIN/SL7xMOQOFR2lOl7a9fN3MP4rYHpbzxq7dsGk/1O1QMzT6nYOAqSAZFqaPvY78hYecQIBjzJGQgbNgsk2UeaH8Ji93RdLvefdY3ohDeZyNlx7G8iGjJMqvA5/pV61fE9YGy93fU6ANxer3NcWNwupXSs67/wE=</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/rpc/interface.go b/plugins/rpc/interface.go
new file mode 100644
index 00000000..683fd2ec
--- /dev/null
+++ b/plugins/rpc/interface.go
@@ -0,0 +1,7 @@
+package rpc
+
+// RPCer declares the ability to create set of public RPC methods.
+type RPCer interface {
+ // Provides RPC methods for the given service.
+ RPC() interface{}
+}
diff --git a/plugins/rpc/plugin.go b/plugins/rpc/plugin.go
new file mode 100644
index 00000000..c5813e7b
--- /dev/null
+++ b/plugins/rpc/plugin.go
@@ -0,0 +1,161 @@
+package rpc
+
+import (
+ "net"
+ "net/rpc"
+ "sync/atomic"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+// PluginName contains default plugin name.
+const PluginName = "RPC"
+
+type pluggable struct {
+ service RPCer
+ name string
+}
+
+// Plugin is RPC service.
+type Plugin struct {
+ cfg Config
+ log logger.Logger
+ rpc *rpc.Server
+ // set of the plugins, which are implement RPCer interface and can be plugged into the RR via RPC
+ plugins []pluggable
+ listener net.Listener
+ closed *uint32
+}
+
+// Init rpc service. Must return true if service is enabled.
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("rpc plugin init")
+ if !cfg.Has(PluginName) {
+ return errors.E(op, errors.Disabled)
+ }
+
+ err := cfg.UnmarshalKey(PluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+ s.cfg.InitDefaults()
+
+ s.log = log
+ state := uint32(0)
+ s.closed = &state
+ atomic.StoreUint32(s.closed, 0)
+
+ return s.cfg.Valid()
+}
+
+// Serve serves the service.
+func (s *Plugin) Serve() chan error {
+ const op = errors.Op("serve rpc plugin")
+ errCh := make(chan error, 1)
+
+ s.rpc = rpc.NewServer()
+
+ services := make([]string, 0, len(s.plugins))
+
+ // Attach all services
+ for i := 0; i < len(s.plugins); i++ {
+ err := s.Register(s.plugins[i].name, s.plugins[i].service.RPC())
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+
+ services = append(services, s.plugins[i].name)
+ }
+
+ var err error
+ s.listener, err = s.cfg.Listener()
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ s.log.Debug("Started RPC service", "address", s.cfg.Listen, "services", services)
+
+ go func() {
+ for {
+ conn, err := s.listener.Accept()
+ if err != nil {
+ if atomic.LoadUint32(s.closed) == 1 {
+ // just log and continue, this is not a critical issue, we just called Stop
+ s.log.Warn("listener accept error, connection closed", "error", err)
+ return
+ }
+
+ s.log.Error("listener accept error", "error", err)
+ errCh <- errors.E(errors.Op("listener accept"), errors.Serve, err)
+ return
+ }
+
+ go s.rpc.ServeCodec(goridgeRpc.NewCodec(conn))
+ }
+ }()
+
+ return errCh
+}
+
+// Stop stops the service.
+func (s *Plugin) Stop() error {
+ // store closed state
+ atomic.StoreUint32(s.closed, 1)
+ err := s.listener.Close()
+ if err != nil {
+ return errors.E(errors.Op("stop RPC socket"), err)
+ }
+ return nil
+}
+
+// Name contains service name.
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// Depends declares services to collect for RPC.
+func (s *Plugin) Collects() []interface{} {
+ return []interface{}{
+ s.RegisterPlugin,
+ }
+}
+
+// RegisterPlugin registers RPC service plugin.
+func (s *Plugin) RegisterPlugin(name endure.Named, p RPCer) {
+ s.plugins = append(s.plugins, pluggable{
+ service: p,
+ name: name.Name(),
+ })
+}
+
+// Register publishes in the server the set of methods of the
+// receiver value that satisfy the following conditions:
+// - exported method of exported type
+// - two arguments, both of exported type
+// - the second argument is a pointer
+// - one return value, of type error
+// It returns an error if the receiver is not an exported type or has
+// no suitable methods. It also logs the error using package log.
+func (s *Plugin) Register(name string, svc interface{}) error {
+ if s.rpc == nil {
+ return errors.E("RPC service is not configured")
+ }
+
+ return s.rpc.RegisterName(name, svc)
+}
+
+// Client creates new RPC client.
+func (s *Plugin) Client() (*rpc.Client, error) {
+ conn, err := s.cfg.Dialer()
+ if err != nil {
+ return nil, err
+ }
+
+ return rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn)), nil
+}
diff --git a/plugins/server/config.go b/plugins/server/config.go
new file mode 100644
index 00000000..a990efd3
--- /dev/null
+++ b/plugins/server/config.go
@@ -0,0 +1,147 @@
+package server
+
+import (
+ "time"
+)
+
+// All config (.rr.yaml)
+// For other section use pointer to distinguish between `empty` and `not present`
+type Config struct {
+ // Server config section
+ Server struct {
+ // Command to run as application.
+ Command string `yaml:"command"`
+ // User to run application under.
+ User string `yaml:"user"`
+ // Group to run application under.
+ Group string `yaml:"group"`
+ // Env represents application environment.
+ Env Env `yaml:"env"`
+ // Relay defines connection method and factory to be used to connect to workers:
+ // "pipes", "tcp://:6001", "unix://rr.sock"
+ // This config section must not change on re-configuration.
+ Relay string `yaml:"relay"`
+ // RelayTimeout defines for how long socket factory will be waiting for worker connection. This config section
+ // must not change on re-configuration. Defaults to 60s.
+ RelayTimeout time.Duration `yaml:"relayTimeout"`
+ } `yaml:"server"`
+
+ RPC *struct {
+ Listen string `yaml:"listen"`
+ } `yaml:"rpc"`
+ Logs *struct {
+ Mode string `yaml:"mode"`
+ Level string `yaml:"level"`
+ } `yaml:"logs"`
+ HTTP *struct {
+ Address string `yaml:"address"`
+ MaxRequestSize int `yaml:"max_request_size"`
+ Middleware []string `yaml:"middleware"`
+ Uploads struct {
+ Forbid []string `yaml:"forbid"`
+ } `yaml:"uploads"`
+ TrustedSubnets []string `yaml:"trusted_subnets"`
+ Pool struct {
+ NumWorkers int `yaml:"num_workers"`
+ MaxJobs int `yaml:"max_jobs"`
+ AllocateTimeout string `yaml:"allocate_timeout"`
+ DestroyTimeout string `yaml:"destroy_timeout"`
+ Supervisor struct {
+ WatchTick int `yaml:"watch_tick"`
+ TTL int `yaml:"ttl"`
+ IdleTTL int `yaml:"idle_ttl"`
+ ExecTTL int `yaml:"exec_ttl"`
+ MaxWorkerMemory int `yaml:"max_worker_memory"`
+ } `yaml:"supervisor"`
+ } `yaml:"pool"`
+ Ssl struct {
+ Port int `yaml:"port"`
+ Redirect bool `yaml:"redirect"`
+ Cert string `yaml:"cert"`
+ Key string `yaml:"key"`
+ } `yaml:"ssl"`
+ Fcgi struct {
+ Address string `yaml:"address"`
+ } `yaml:"fcgi"`
+ HTTP2 struct {
+ Enabled bool `yaml:"enabled"`
+ H2C bool `yaml:"h2c"`
+ MaxConcurrentStreams int `yaml:"max_concurrent_streams"`
+ } `yaml:"http2"`
+ } `yaml:"http"`
+ Redis *struct {
+ Addrs []string `yaml:"addrs"`
+ MasterName string `yaml:"master_name"`
+ Username string `yaml:"username"`
+ Password string `yaml:"password"`
+ DB int `yaml:"db"`
+ SentinelPassword string `yaml:"sentinel_password"`
+ RouteByLatency bool `yaml:"route_by_latency"`
+ RouteRandomly bool `yaml:"route_randomly"`
+ DialTimeout int `yaml:"dial_timeout"`
+ MaxRetries int `yaml:"max_retries"`
+ MinRetryBackoff int `yaml:"min_retry_backoff"`
+ MaxRetryBackoff int `yaml:"max_retry_backoff"`
+ PoolSize int `yaml:"pool_size"`
+ MinIdleConns int `yaml:"min_idle_conns"`
+ MaxConnAge int `yaml:"max_conn_age"`
+ ReadTimeout int `yaml:"read_timeout"`
+ WriteTimeout int `yaml:"write_timeout"`
+ PoolTimeout int `yaml:"pool_timeout"`
+ IdleTimeout int `yaml:"idle_timeout"`
+ IdleCheckFreq int `yaml:"idle_check_freq"`
+ ReadOnly bool `yaml:"read_only"`
+ } `yaml:"redis"`
+ Boltdb *struct {
+ Dir string `yaml:"dir"`
+ File string `yaml:"file"`
+ Bucket string `yaml:"bucket"`
+ Permissions int `yaml:"permissions"`
+ TTL int `yaml:"TTL"`
+ } `yaml:"boltdb"`
+ Memcached *struct {
+ Addr []string `yaml:"addr"`
+ } `yaml:"memcached"`
+ Memory *struct {
+ Enabled bool `yaml:"enabled"`
+ Interval int `yaml:"interval"`
+ } `yaml:"memory"`
+ Metrics *struct {
+ Address string `yaml:"address"`
+ Collect struct {
+ AppMetric struct {
+ Type string `yaml:"type"`
+ Help string `yaml:"help"`
+ Labels []string `yaml:"labels"`
+ Buckets []float64 `yaml:"buckets"`
+ Objectives []struct {
+ Num2 float64 `yaml:"2,omitempty"`
+ One4 float64 `yaml:"1.4,omitempty"`
+ } `yaml:"objectives"`
+ } `yaml:"app_metric"`
+ } `yaml:"collect"`
+ } `yaml:"metrics"`
+ Reload *struct {
+ Interval string `yaml:"interval"`
+ Patterns []string `yaml:"patterns"`
+ Services struct {
+ HTTP struct {
+ Recursive bool `yaml:"recursive"`
+ Ignore []string `yaml:"ignore"`
+ Patterns []string `yaml:"patterns"`
+ Dirs []string `yaml:"dirs"`
+ } `yaml:"http"`
+ } `yaml:"services"`
+ } `yaml:"reload"`
+}
+
+// InitDefaults for the server config
+func (cfg *Config) InitDefaults() {
+ if cfg.Server.Relay == "" {
+ cfg.Server.Relay = "pipes"
+ }
+
+ if cfg.Server.RelayTimeout == 0 {
+ cfg.Server.RelayTimeout = time.Second * 60
+ }
+}
diff --git a/plugins/server/interface.go b/plugins/server/interface.go
new file mode 100644
index 00000000..a2d8b92b
--- /dev/null
+++ b/plugins/server/interface.go
@@ -0,0 +1,21 @@
+package server
+
+import (
+ "context"
+ "os/exec"
+
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+)
+
+// Env variables type alias
+type Env map[string]string
+
+// Server creates workers for the application.
+type Server interface {
+ CmdFactory(env Env) (func() *exec.Cmd, error)
+ NewWorker(ctx context.Context, env Env, listeners ...events.Listener) (worker.BaseProcess, error)
+ NewWorkerPool(ctx context.Context, opt poolImpl.Config, env Env, listeners ...events.Listener) (pool.Pool, error)
+}
diff --git a/plugins/server/plugin.go b/plugins/server/plugin.go
new file mode 100644
index 00000000..565c80c4
--- /dev/null
+++ b/plugins/server/plugin.go
@@ -0,0 +1,257 @@
+package server
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+
+ // core imports
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ "github.com/spiral/roadrunner/v2/pkg/pipe"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ "github.com/spiral/roadrunner/v2/pkg/socket"
+ "github.com/spiral/roadrunner/v2/utils"
+)
+
+// PluginName for the server
+const PluginName = "server"
+
+// RR_RELAY env variable key (internal)
+const RR_RELAY = "RR_RELAY" //nolint:golint,stylecheck
+// RR_RPC env variable key (internal) if the RPC presents
+const RR_RPC = "" //nolint:golint,stylecheck
+// RR_HTTP env variable key (internal) if the HTTP presents
+const RR_HTTP = "false" //nolint:golint,stylecheck
+
+// Plugin manages worker
+type Plugin struct {
+ cfg Config
+ log logger.Logger
+ factory worker.Factory
+}
+
+// Init application provider.
+func (server *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("Init")
+ err := cfg.Unmarshal(&server.cfg)
+ if err != nil {
+ return errors.E(op, errors.Init, err)
+ }
+ server.cfg.InitDefaults()
+ server.log = log
+
+ server.factory, err = server.initFactory()
+ if err != nil {
+ return errors.E(err)
+ }
+
+ return nil
+}
+
+// Name contains service name.
+func (server *Plugin) Name() string {
+ return PluginName
+}
+
+// Serve (Start) server plugin (just a mock here to satisfy interface)
+func (server *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+// Stop used to close chosen in config factory
+func (server *Plugin) Stop() error {
+ if server.factory == nil {
+ return nil
+ }
+
+ return server.factory.Close()
+}
+
+// CmdFactory provides worker command factory associated with given context.
+func (server *Plugin) CmdFactory(env Env) (func() *exec.Cmd, error) {
+ const op = errors.Op("cmd factory")
+ var cmdArgs []string
+
+ // create command according to the config
+ cmdArgs = append(cmdArgs, strings.Split(server.cfg.Server.Command, " ")...)
+ if len(cmdArgs) < 2 {
+ return nil, errors.E(op, errors.Str("should be in form of `php <script>"))
+ }
+ if cmdArgs[0] != "php" {
+ return nil, errors.E(op, errors.Str("first arg in command should be `php`"))
+ }
+
+ _, err := os.Stat(cmdArgs[1])
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+ return func() *exec.Cmd {
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) //nolint:gosec
+ utils.IsolateProcess(cmd)
+
+ // if user is not empty, and OS is linux or macos
+ // execute php worker from that particular user
+ if server.cfg.Server.User != "" {
+ err := utils.ExecuteFromUser(cmd, server.cfg.Server.User)
+ if err != nil {
+ return nil
+ }
+ }
+
+ cmd.Env = server.setEnv(env)
+
+ return cmd
+ }, nil
+}
+
+// NewWorker issues new standalone worker.
+func (server *Plugin) NewWorker(ctx context.Context, env Env, listeners ...events.Listener) (worker.BaseProcess, error) {
+ const op = errors.Op("new worker")
+
+ list := make([]events.Listener, 0, len(listeners))
+ list = append(list, server.collectWorkerLogs)
+
+ spawnCmd, err := server.CmdFactory(env)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ w, err := server.factory.SpawnWorkerWithTimeout(ctx, spawnCmd(), list...)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ return w, nil
+}
+
+// NewWorkerPool issues new worker pool.
+func (server *Plugin) NewWorkerPool(ctx context.Context, opt poolImpl.Config, env Env, listeners ...events.Listener) (pool.Pool, error) {
+ const op = errors.Op("server plugins new worker pool")
+ spawnCmd, err := server.CmdFactory(env)
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ list := make([]events.Listener, 0, 1)
+ list = append(list, server.collectPoolLogs)
+ if len(listeners) != 0 {
+ list = append(list, listeners...)
+ }
+
+ p, err := poolImpl.Initialize(ctx, spawnCmd, server.factory, opt, poolImpl.AddListeners(list...))
+ if err != nil {
+ return nil, errors.E(op, err)
+ }
+
+ return p, nil
+}
+
+// creates relay and worker factory.
+func (server *Plugin) initFactory() (worker.Factory, error) {
+ const op = errors.Op("server factory init")
+ if server.cfg.Server.Relay == "" || server.cfg.Server.Relay == "pipes" {
+ return pipe.NewPipeFactory(), nil
+ }
+
+ dsn := strings.Split(server.cfg.Server.Relay, "://")
+ if len(dsn) != 2 {
+ return nil, errors.E(op, errors.Network, errors.Str("invalid DSN (tcp://:6001, unix://file.sock)"))
+ }
+
+ lsn, err := utils.CreateListener(server.cfg.Server.Relay)
+ if err != nil {
+ return nil, errors.E(op, errors.Network, err)
+ }
+
+ switch dsn[0] {
+ // sockets group
+ case "unix":
+ return socket.NewSocketServer(lsn, server.cfg.Server.RelayTimeout), nil
+ case "tcp":
+ return socket.NewSocketServer(lsn, server.cfg.Server.RelayTimeout), nil
+ default:
+ return nil, errors.E(op, errors.Network, errors.Str("invalid DSN (tcp://:6001, unix://file.sock)"))
+ }
+}
+
+func (server *Plugin) setEnv(e Env) []string {
+ env := append(os.Environ(), fmt.Sprintf(RR_RELAY+"=%s", server.cfg.Server.Relay))
+ for k, v := range e {
+ env = append(env, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
+ }
+
+ // set internal env variables
+ if server.cfg.HTTP != nil {
+ env = append(env, fmt.Sprintf("%s=%s", RR_HTTP, "true"))
+ }
+ if server.cfg.RPC != nil && server.cfg.RPC.Listen != "" {
+ env = append(env, fmt.Sprintf("%s=%s", RR_RPC, server.cfg.RPC.Listen))
+ }
+
+ // set env variables from the config
+ if len(server.cfg.Server.Env) > 0 {
+ for k, v := range server.cfg.Server.Env {
+ env = append(env, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
+ }
+ }
+
+ return env
+}
+
+func (server *Plugin) collectPoolLogs(event interface{}) {
+ if we, ok := event.(events.PoolEvent); ok {
+ switch we.Event {
+ case events.EventMaxMemory:
+ server.log.Info("worker max memory reached", "pid", we.Payload.(worker.BaseProcess).Pid())
+ case events.EventNoFreeWorkers:
+ server.log.Info("no free workers in pool", "error", we.Payload.(error).Error())
+ case events.EventPoolError:
+ server.log.Info("pool error", "error", we.Payload.(error).Error())
+ case events.EventSupervisorError:
+ server.log.Info("pool supervisor error", "error", we.Payload.(error).Error())
+ case events.EventTTL:
+ server.log.Info("worker TTL reached", "pid", we.Payload.(worker.BaseProcess).Pid())
+ case events.EventWorkerConstruct:
+ if _, ok := we.Payload.(error); ok {
+ server.log.Error("worker construction error", "error", we.Payload.(error).Error())
+ return
+ }
+ server.log.Info("worker constructed", "pid", we.Payload.(worker.BaseProcess).Pid())
+ case events.EventWorkerDestruct:
+ server.log.Info("worker destructed", "pid", we.Payload.(worker.BaseProcess).Pid())
+ case events.EventExecTTL:
+ server.log.Info("EVENT EXEC TTL PLACEHOLDER")
+ case events.EventIdleTTL:
+ server.log.Info("worker IDLE timeout reached", "pid", we.Payload.(worker.BaseProcess).Pid())
+ }
+ }
+
+ if we, ok := event.(events.WorkerEvent); ok {
+ switch we.Event {
+ case events.EventWorkerError:
+ server.log.Info(we.Payload.(error).Error(), "pid", we.Worker.(worker.BaseProcess).Pid())
+ case events.EventWorkerLog:
+ server.log.Info(strings.TrimRight(string(we.Payload.([]byte)), " \n\t"), "pid", we.Worker.(worker.BaseProcess).Pid())
+ }
+ }
+}
+
+func (server *Plugin) collectWorkerLogs(event interface{}) {
+ if we, ok := event.(events.WorkerEvent); ok {
+ switch we.Event {
+ case events.EventWorkerError:
+ server.log.Error(we.Payload.(error).Error(), "pid", we.Worker.(worker.BaseProcess).Pid())
+ case events.EventWorkerLog:
+ server.log.Info(strings.TrimRight(string(we.Payload.([]byte)), " \n\t"), "pid", we.Worker.(worker.BaseProcess).Pid())
+ }
+ }
+}
diff --git a/plugins/static/config.go b/plugins/static/config.go
new file mode 100644
index 00000000..f5d26b2d
--- /dev/null
+++ b/plugins/static/config.go
@@ -0,0 +1,76 @@
+package static
+
+import (
+ "os"
+ "path"
+ "strings"
+
+ "github.com/spiral/errors"
+)
+
+// Config describes file location and controls access to them.
+type Config struct {
+ Static struct {
+ // Dir contains name of directory to control access to.
+ Dir string
+
+ // Forbid specifies list of file extensions which are forbidden for access.
+ // Example: .php, .exe, .bat, .htaccess and etc.
+ Forbid []string
+
+ // Always specifies list of extensions which must always be served by static
+ // service, even if file not found.
+ Always []string
+
+ // Request headers to add to every static.
+ Request map[string]string
+
+ // Response headers to add to every static.
+ Response map[string]string
+ }
+}
+
+// Valid returns nil if config is valid.
+func (c *Config) Valid() error {
+ const op = errors.Op("static plugin validation")
+ st, err := os.Stat(c.Static.Dir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return errors.E(op, errors.Errorf("root directory '%s' does not exists", c.Static.Dir))
+ }
+
+ return err
+ }
+
+ if !st.IsDir() {
+ return errors.E(op, errors.Errorf("invalid root directory '%s'", c.Static.Dir))
+ }
+
+ return nil
+}
+
+// AlwaysForbid must return true if file extension is not allowed for the upload.
+func (c *Config) AlwaysForbid(filename string) bool {
+ ext := strings.ToLower(path.Ext(filename))
+
+ for _, v := range c.Static.Forbid {
+ if ext == v {
+ return true
+ }
+ }
+
+ return false
+}
+
+// AlwaysServe must indicate that file is expected to be served by static service.
+func (c *Config) AlwaysServe(filename string) bool {
+ ext := strings.ToLower(path.Ext(filename))
+
+ for _, v := range c.Static.Always {
+ if ext == v {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/plugins/static/plugin.go b/plugins/static/plugin.go
new file mode 100644
index 00000000..06b384df
--- /dev/null
+++ b/plugins/static/plugin.go
@@ -0,0 +1,110 @@
+package static
+
+import (
+ "net/http"
+ "path"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+// ID contains default service name.
+const PluginName = "static"
+
+const RootPluginName = "http"
+
+// Plugin serves static files. Potentially convert into middleware?
+type Plugin struct {
+ // server configuration (location, forbidden files and etc)
+ cfg *Config
+
+ log logger.Logger
+
+ // root is initiated http directory
+ root http.Dir
+}
+
+// Init must return configure service and return true if service hasStatus enabled. Must return error in case of
+// misconfiguration. Services must not be used without proper configuration pushed first.
+func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ const op = errors.Op("static plugin init")
+ err := cfg.UnmarshalKey(RootPluginName, &s.cfg)
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ s.log = log
+ s.root = http.Dir(s.cfg.Static.Dir)
+
+ err = s.cfg.Valid()
+ if err != nil {
+ return errors.E(op, errors.Disabled, err)
+ }
+
+ return nil
+}
+
+func (s *Plugin) Name() string {
+ return PluginName
+}
+
+// middleware must return true if request/response pair is handled within the middleware.
+func (s *Plugin) Middleware(next http.Handler) http.HandlerFunc {
+ // Define the http.HandlerFunc
+ return func(w http.ResponseWriter, r *http.Request) {
+ if s.cfg.Static.Request != nil {
+ for k, v := range s.cfg.Static.Request {
+ r.Header.Add(k, v)
+ }
+ }
+
+ if s.cfg.Static.Response != nil {
+ for k, v := range s.cfg.Static.Response {
+ w.Header().Set(k, v)
+ }
+ }
+
+ if !s.handleStatic(w, r) {
+ next.ServeHTTP(w, r)
+ }
+ }
+}
+
+func (s *Plugin) handleStatic(w http.ResponseWriter, r *http.Request) bool {
+ fPath := path.Clean(r.URL.Path)
+
+ if s.cfg.AlwaysForbid(fPath) {
+ return false
+ }
+
+ f, err := s.root.Open(fPath)
+ if err != nil {
+ s.log.Error("file open error", "error", err)
+ if s.cfg.AlwaysServe(fPath) {
+ w.WriteHeader(404)
+ return true
+ }
+
+ return false
+ }
+ defer func() {
+ err = f.Close()
+ if err != nil {
+ s.log.Error("file closing error", "error", err)
+ }
+ }()
+
+ d, err := f.Stat()
+ if err != nil {
+ return false
+ }
+
+ // do not serve directories
+ if d.IsDir() {
+ return false
+ }
+
+ http.ServeContent(w, r, d.Name(), d.ModTime(), f)
+ return true
+}
diff --git a/pool.go b/pool.go
deleted file mode 100644
index d863e96f..00000000
--- a/pool.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package roadrunner
-
-const (
- // EventWorkerConstruct thrown when new worker is spawned.
- EventWorkerConstruct = iota + 100
-
- // EventWorkerDestruct thrown after worker destruction.
- EventWorkerDestruct
-
- // EventWorkerKill thrown after worker is being forcefully killed.
- EventWorkerKill
-
- // EventWorkerError thrown any worker related even happen (passed with WorkerError)
- EventWorkerError
-
- // EventWorkerDead thrown when worker stops worker for any reason.
- EventWorkerDead
-
- // EventPoolError caused on pool wide errors
- EventPoolError
-)
-
-// Pool managed set of inner worker processes.
-type Pool interface {
- // Listen all caused events to attached controller.
- Listen(l func(event int, ctx interface{}))
-
- // Exec one task with given payload and context, returns result or error.
- Exec(rqs *Payload) (rsp *Payload, err error)
-
- // Workers returns worker list associated with the pool.
- Workers() (workers []*Worker)
-
- // Remove forces pool to remove specific worker. Return true is this is first remove request on given worker.
- Remove(w *Worker, err error) bool
-
- // Destroy all underlying workers (but let them to complete the task).
- Destroy()
-}
diff --git a/protocol.go b/protocol.go
deleted file mode 100644
index 486f1055..00000000
--- a/protocol.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "os"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/goridge/v2"
-)
-
-type stopCommand struct {
- Stop bool `json:"stop"`
-}
-
-type pidCommand struct {
- Pid int `json:"pid"`
-}
-
-func sendControl(rl goridge.Relay, v interface{}) error {
- if data, ok := v.([]byte); ok {
- return rl.Send(data, goridge.PayloadControl|goridge.PayloadRaw)
- }
-
- j := json.ConfigCompatibleWithStandardLibrary
- data, err := j.Marshal(v)
- if err != nil {
- return fmt.Errorf("invalid payload: %s", err)
- }
-
- return rl.Send(data, goridge.PayloadControl)
-}
-
-func fetchPID(rl goridge.Relay) (pid int, err error) {
- if err := sendControl(rl, pidCommand{Pid: os.Getpid()}); err != nil {
- return 0, err
- }
-
- body, p, err := rl.Receive()
- if err != nil {
- return 0, err
- }
- if !p.HasFlag(goridge.PayloadControl) {
- return 0, fmt.Errorf("unexpected response, header is missing")
- }
-
- link := &pidCommand{}
- if err := json.Unmarshal(body, link); err != nil {
- return 0, err
- }
-
- return link.Pid, nil
-}
diff --git a/protocol_test.go b/protocol_test.go
deleted file mode 100644
index f17ffe79..00000000
--- a/protocol_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package roadrunner
-
-import (
- "testing"
-
- "github.com/pkg/errors"
- "github.com/spiral/goridge/v2"
- "github.com/stretchr/testify/assert"
-)
-
-type relayMock struct {
- error bool
- payload string
-}
-
-func (r *relayMock) Send(data []byte, flags byte) (err error) {
- if r.error {
- return errors.New("send error")
- }
-
- return nil
-}
-
-func (r *relayMock) Receive() (data []byte, p goridge.Prefix, err error) {
- return []byte(r.payload), goridge.NewPrefix().WithFlag(goridge.PayloadControl), nil
-}
-
-func (r *relayMock) Close() error {
- return nil
-}
-
-func Test_Protocol_Errors(t *testing.T) {
- err := sendControl(&relayMock{}, make(chan int))
- assert.Error(t, err)
-}
-
-func Test_Protocol_FetchPID(t *testing.T) {
- pid, err := fetchPID(&relayMock{error: false, payload: "{\"pid\":100}"})
- assert.NoError(t, err)
- assert.Equal(t, 100, pid)
-
- _, err = fetchPID(&relayMock{error: true, payload: "{\"pid\":100}"})
- assert.Error(t, err)
-
- _, err = fetchPID(&relayMock{error: false, payload: "{\"pid:100"})
- assert.Error(t, err)
-}
diff --git a/rr b/rr
new file mode 100755
index 00000000..c0ff4c40
--- /dev/null
+++ b/rr
Binary files differ
diff --git a/server.go b/server.go
deleted file mode 100644
index a0eb8fcf..00000000
--- a/server.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package roadrunner
-
-import (
- "sync"
-
- "github.com/pkg/errors"
-)
-
-const (
- // EventServerStart triggered when server creates new pool.
- EventServerStart = iota + 200
-
- // EventServerStop triggered when server creates new pool.
- EventServerStop
-
- // EventServerFailure triggered when server is unable to replace dead pool.
- EventServerFailure
-
- // EventPoolConstruct triggered when server creates new pool.
- EventPoolConstruct
-
- // EventPoolDestruct triggered when server destroys existed pool.
- EventPoolDestruct
-)
-
-// Controllable defines the ability to attach rr controller.
-type Controllable interface {
- // Server represents RR server
- Server() *Server
-}
-
-// Server manages pool creation and swapping.
-type Server struct {
- // configures server, pool, cmd creation and factory.
- cfg *ServerConfig
-
- // protects pool while the re-configuration
- mu sync.Mutex
-
- // indicates that server was started
- started bool
-
- // creates and connects to workers
- factory Factory
-
- // associated pool controller
- controller Controller
-
- // currently active pool instance
- mup sync.Mutex
- pool Pool
- pController Controller
-
- // observes pool events (can be attached to multiple pools at the same time)
- mul sync.Mutex
- lsn func(event int, ctx interface{})
-}
-
-// NewServer creates new router. Make sure to call configure before the usage.
-func NewServer(cfg *ServerConfig) *Server {
- return &Server{cfg: cfg}
-}
-
-// Listen attaches server event controller.
-func (s *Server) Listen(l func(event int, ctx interface{})) {
- s.mul.Lock()
- defer s.mul.Unlock()
-
- s.lsn = l
-}
-
-// Attach attaches worker controller.
-func (s *Server) Attach(c Controller) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- s.controller = c
-
- s.mul.Lock()
- if s.pController != nil && s.pool != nil {
- s.pController.Detach()
- s.pController = s.controller.Attach(s.pool)
- }
- s.mul.Unlock()
-}
-
-// Start underlying worker pool, configure factory and command provider.
-func (s *Server) Start() (err error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.factory, err = s.cfg.makeFactory(); err != nil {
- return err
- }
-
- if s.pool, err = NewPool(s.cfg.makeCommand(), s.factory, *s.cfg.Pool); err != nil {
- return err
- }
-
- if s.controller != nil {
- s.pController = s.controller.Attach(s.pool)
- }
-
- s.pool.Listen(s.poolListener)
- s.started = true
- s.throw(EventServerStart, s)
-
- return nil
-}
-
-// Stop underlying worker pool and close the factory.
-func (s *Server) Stop() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if !s.started {
- return
- }
-
- s.throw(EventPoolDestruct, s.pool)
-
- if s.pController != nil {
- s.pController.Detach()
- s.pController = nil
- }
-
- s.pool.Destroy()
- s.factory.Close()
-
- s.factory = nil
- s.pool = nil
- s.started = false
- s.throw(EventServerStop, s)
-}
-
-var ErrNoAssociatedPool = errors.New("no associared pool")
-
-// Exec one task with given payload and context, returns result or error.
-func (s *Server) Exec(rqs *Payload) (rsp *Payload, err error) {
- pool := s.Pool()
- if pool == nil {
- return nil, ErrNoAssociatedPool
- }
-
- return pool.Exec(rqs)
-}
-
-// Reconfigure re-configures underlying pool and destroys it's previous version if any. Reconfigure will ignore factory
-// and relay settings.
-func (s *Server) Reconfigure(cfg *ServerConfig) error {
- s.mup.Lock()
- defer s.mup.Unlock()
-
- s.mu.Lock()
- if !s.started {
- s.cfg = cfg
- s.mu.Unlock()
- return nil
- }
- s.mu.Unlock()
-
- if s.cfg.Differs(cfg) {
- return errors.New("unable to reconfigure server (cmd and pool changes are allowed)")
- }
-
- s.mu.Lock()
- previous := s.pool
- pWatcher := s.pController
- s.mu.Unlock()
-
- pool, err := NewPool(cfg.makeCommand(), s.factory, *cfg.Pool)
- if err != nil {
- return err
- }
-
- pool.Listen(s.poolListener)
-
- s.mu.Lock()
- s.cfg.Pool, s.pool = cfg.Pool, pool
-
- if s.controller != nil {
- s.pController = s.controller.Attach(pool)
- }
-
- s.mu.Unlock()
-
- s.throw(EventPoolConstruct, pool)
-
- if previous != nil {
- go func(previous Pool, pWatcher Controller) {
- s.throw(EventPoolDestruct, previous)
- if pWatcher != nil {
- pWatcher.Detach()
- }
-
- previous.Destroy()
- }(previous, pWatcher)
- }
-
- return nil
-}
-
-// Reset resets the state of underlying pool and rebuilds all of it's workers.
-func (s *Server) Reset() error {
- s.mu.Lock()
- cfg := s.cfg
- s.mu.Unlock()
-
- return s.Reconfigure(cfg)
-}
-
-// Workers returns worker list associated with the server pool.
-func (s *Server) Workers() (workers []*Worker) {
- p := s.Pool()
- if p == nil {
- return nil
- }
-
- return p.Workers()
-}
-
-// Pool returns active pool or error.
-func (s *Server) Pool() Pool {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- return s.pool
-}
-
-// Listen pool events.
-func (s *Server) poolListener(event int, ctx interface{}) {
- if event == EventPoolError {
- // pool failure, rebuilding
- if err := s.Reset(); err != nil {
- s.mu.Lock()
- s.started = false
- s.pool = nil
- s.factory = nil
- s.mu.Unlock()
-
- // everything is dead, this is recoverable but heavy state
- s.throw(EventServerFailure, err)
- }
- }
-
- // bypassing to user specified lsn
- s.throw(event, ctx)
-}
-
-// throw invokes event handler if any.
-func (s *Server) throw(event int, ctx interface{}) {
- s.mul.Lock()
- if s.lsn != nil {
- s.lsn(event, ctx)
- }
- s.mul.Unlock()
-}
diff --git a/server_config.go b/server_config.go
deleted file mode 100644
index ea4da8dd..00000000
--- a/server_config.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package roadrunner
-
-import (
- "errors"
- "fmt"
- "net"
- "os"
- "os/exec"
- "strings"
- "sync"
- "syscall"
- "time"
-
- "github.com/spiral/roadrunner/osutil"
-)
-
-// CommandProducer can produce commands.
-type CommandProducer func(cfg *ServerConfig) func() *exec.Cmd
-
-// ServerConfig config combines factory, pool and cmd configurations.
-type ServerConfig struct {
- // Command includes command strings with all the parameters, example: "php worker.php pipes".
- Command string
-
- // User under which process will be started
- User string
-
- // CommandProducer overwrites
- CommandProducer CommandProducer
-
- // Relay defines connection method and factory to be used to connect to workers:
- // "pipes", "tcp://:6001", "unix://rr.sock"
- // This config section must not change on re-configuration.
- Relay string
-
- // RelayTimeout defines for how long socket factory will be waiting for worker connection. This config section
- // must not change on re-configuration.
- RelayTimeout time.Duration
-
- // Pool defines worker pool configuration, number of workers, timeouts and etc. This config section might change
- // while server is running.
- Pool *Config
-
- // values defines set of values to be passed to the command context.
- mu sync.Mutex
- env map[string]string
-}
-
-// InitDefaults sets missing values to their default values.
-func (cfg *ServerConfig) InitDefaults() error {
- cfg.Relay = "pipes"
- cfg.RelayTimeout = time.Minute
-
- if cfg.Pool == nil {
- cfg.Pool = &Config{}
- }
-
- return cfg.Pool.InitDefaults()
-}
-
-// UpscaleDurations converts duration values from nanoseconds to seconds.
-func (cfg *ServerConfig) UpscaleDurations() {
- if cfg.RelayTimeout < time.Microsecond {
- cfg.RelayTimeout = time.Second * time.Duration(cfg.RelayTimeout.Nanoseconds())
- }
-
- if cfg.Pool.AllocateTimeout < time.Microsecond {
- cfg.Pool.AllocateTimeout = time.Second * time.Duration(cfg.Pool.AllocateTimeout.Nanoseconds())
- }
-
- if cfg.Pool.DestroyTimeout < time.Microsecond {
- cfg.Pool.DestroyTimeout = time.Second * time.Duration(cfg.Pool.DestroyTimeout.Nanoseconds())
- }
-}
-
-// Differs returns true if configuration has changed but ignores pool or cmd changes.
-func (cfg *ServerConfig) Differs(new *ServerConfig) bool {
- return cfg.Relay != new.Relay || cfg.RelayTimeout != new.RelayTimeout
-}
-
-// SetEnv sets new environment variable. Value is automatically uppercase-d.
-func (cfg *ServerConfig) SetEnv(k, v string) {
- cfg.mu.Lock()
- defer cfg.mu.Unlock()
-
- if cfg.env == nil {
- cfg.env = make(map[string]string)
- }
-
- cfg.env[k] = v
-}
-
-// GetEnv must return list of env variables.
-func (cfg *ServerConfig) GetEnv() (env []string) {
- env = append(os.Environ(), fmt.Sprintf("RR_RELAY=%s", cfg.Relay))
- for k, v := range cfg.env {
- env = append(env, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
- }
-
- return
-}
-
-//=================================== PRIVATE METHODS ======================================================
-
-func (cfg *ServerConfig) makeCommand() func() *exec.Cmd {
- cfg.mu.Lock()
- defer cfg.mu.Unlock()
-
- if cfg.CommandProducer != nil {
- return cfg.CommandProducer(cfg)
- }
-
- var cmdArgs []string
- cmdArgs = append(cmdArgs, strings.Split(cfg.Command, " ")...)
-
- return func() *exec.Cmd {
- cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
- osutil.IsolateProcess(cmd)
-
- // if user is not empty, and OS is linux or macos
- // execute php worker from that particular user
- if cfg.User != "" {
- err := osutil.ExecuteFromUser(cmd, cfg.User)
- if err != nil {
- return nil
- }
- }
-
- cmd.Env = cfg.GetEnv()
-
- return cmd
- }
-}
-
-// makeFactory creates and connects new factory instance based on given parameters.
-func (cfg *ServerConfig) makeFactory() (Factory, error) {
- if cfg.Relay == "pipes" || cfg.Relay == "pipe" {
- return NewPipeFactory(), nil
- }
-
- dsn := strings.Split(cfg.Relay, "://")
- if len(dsn) != 2 {
- return nil, errors.New("invalid relay DSN (pipes, tcp://:6001, unix://rr.sock)")
- }
-
- if dsn[0] == "unix" && fileExists(dsn[1]) {
- err := syscall.Unlink(dsn[1])
- if err != nil {
- return nil, err
- }
- }
-
- ln, err := net.Listen(dsn[0], dsn[1])
- if err != nil {
- return nil, err
- }
-
- return NewSocketFactory(ln, cfg.RelayTimeout), nil
-}
-
-// fileExists checks if a file exists and is not a directory before we
-// try using it to prevent further errors.
-func fileExists(filename string) bool {
- info, err := os.Stat(filename)
- if os.IsNotExist(err) {
- return false
- }
- return !info.IsDir()
-}
diff --git a/server_config_test.go b/server_config_test.go
deleted file mode 100644
index 4dae7550..00000000
--- a/server_config_test.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package roadrunner
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_ServerConfig_PipeFactory(t *testing.T) {
- cfg := &ServerConfig{Relay: "pipes"}
- f, err := cfg.makeFactory()
-
- assert.NoError(t, err)
- assert.IsType(t, &PipeFactory{}, f)
-
- cfg = &ServerConfig{Relay: "pipe"}
- f, err = cfg.makeFactory()
- assert.NoError(t, err)
- assert.NotNil(t, f)
- defer func() {
- err := f.Close()
- if err != nil {
- t.Errorf("error closing factory or underlying connections: error %v", err)
- }
- }()
-
- assert.NoError(t, err)
- assert.IsType(t, &PipeFactory{}, f)
-}
-
-func Test_ServerConfig_SocketFactory(t *testing.T) {
- cfg := &ServerConfig{Relay: "tcp://:9111"}
- f1, err := cfg.makeFactory()
- assert.NoError(t, err)
- assert.NotNil(t, f1)
- defer func() {
- err := f1.Close()
-
- if err != nil {
- t.Errorf("error closing factory or underlying connections: error %v", err)
- }
- }()
-
- assert.NoError(t, err)
- assert.IsType(t, &SocketFactory{}, f1)
- assert.Equal(t, "tcp", f1.(*SocketFactory).ls.Addr().Network())
- assert.Equal(t, "[::]:9111", f1.(*SocketFactory).ls.Addr().String())
-
- cfg = &ServerConfig{Relay: "tcp://localhost:9112"}
- f, err := cfg.makeFactory()
- assert.NoError(t, err)
- assert.NotNil(t, f)
- defer func() {
- err := f.Close()
- if err != nil {
- t.Errorf("error closing factory or underlying connections: error %v", err)
- }
- }()
-
- assert.NoError(t, err)
- assert.IsType(t, &SocketFactory{}, f)
- assert.Equal(t, "tcp", f.(*SocketFactory).ls.Addr().Network())
- assert.Equal(t, "127.0.0.1:9112", f.(*SocketFactory).ls.Addr().String())
-}
-
-func Test_ServerConfig_UnixSocketFactory(t *testing.T) {
- cfg := &ServerConfig{Relay: "unix://unix.sock"}
- f, err := cfg.makeFactory()
- if err != nil {
- t.Error(err)
- }
-
- defer func() {
- err := f.Close()
- if err != nil {
- t.Errorf("error closing factory or underlying connections: error %v", err)
- }
- }()
-
- assert.NoError(t, err)
- assert.IsType(t, &SocketFactory{}, f)
- assert.Equal(t, "unix", f.(*SocketFactory).ls.Addr().Network())
- assert.Equal(t, "unix.sock", f.(*SocketFactory).ls.Addr().String())
-}
-
-func Test_ServerConfig_ErrorFactory(t *testing.T) {
- cfg := &ServerConfig{Relay: "uni:unix.sock"}
- f, err := cfg.makeFactory()
- assert.Nil(t, f)
- assert.Error(t, err)
- assert.Equal(t, "invalid relay DSN (pipes, tcp://:6001, unix://rr.sock)", err.Error())
-}
-
-func Test_ServerConfig_ErrorMethod(t *testing.T) {
- cfg := &ServerConfig{Relay: "xinu://unix.sock"}
-
- f, err := cfg.makeFactory()
- assert.Nil(t, f)
- assert.Error(t, err)
-}
-
-func Test_ServerConfig_Cmd(t *testing.T) {
- cfg := &ServerConfig{
- Command: "php tests/client.php pipes",
- }
-
- cmd := cfg.makeCommand()
- assert.NotNil(t, cmd)
-}
-
-func Test_ServerConfig_SetEnv(t *testing.T) {
- cfg := &ServerConfig{
- Command: "php tests/client.php pipes",
- Relay: "pipes",
- }
-
- cfg.SetEnv("key", "value")
-
- cmd := cfg.makeCommand()
- assert.NotNil(t, cmd)
-
- c := cmd()
-
- assert.Contains(t, c.Env, "KEY=value")
- assert.Contains(t, c.Env, "RR_RELAY=pipes")
-}
-
-func Test_ServerConfig_SetEnv_Relay(t *testing.T) {
- cfg := &ServerConfig{
- Command: "php tests/client.php pipes",
- Relay: "unix://rr.sock",
- }
-
- cfg.SetEnv("key", "value")
-
- cmd := cfg.makeCommand()
- assert.NotNil(t, cmd)
-
- c := cmd()
-
- assert.Contains(t, c.Env, "KEY=value")
- assert.Contains(t, c.Env, "RR_RELAY=unix://rr.sock")
-}
-
-func Test_ServerConfigDefaults(t *testing.T) {
- cfg := &ServerConfig{
- Command: "php tests/client.php pipes",
- }
-
- err := cfg.InitDefaults()
- if err != nil {
- t.Errorf("error during the InitDefaults: error %v", err)
- }
-
- assert.Equal(t, "pipes", cfg.Relay)
- assert.Equal(t, time.Minute, cfg.Pool.AllocateTimeout)
- assert.Equal(t, time.Minute, cfg.Pool.DestroyTimeout)
-}
-
-func Test_Config_Upscale(t *testing.T) {
- cfg := &ServerConfig{
- Command: "php tests/client.php pipes",
- RelayTimeout: 1,
- Pool: &Config{
- AllocateTimeout: 1,
- DestroyTimeout: 1,
- },
- }
-
- cfg.UpscaleDurations()
- assert.Equal(t, time.Second, cfg.RelayTimeout)
- assert.Equal(t, time.Second, cfg.Pool.AllocateTimeout)
- assert.Equal(t, time.Second, cfg.Pool.DestroyTimeout)
-}
diff --git a/server_test.go b/server_test.go
deleted file mode 100644
index a8d52856..00000000
--- a/server_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package roadrunner
-
-import (
- "os/exec"
- "runtime"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestServer_PipesEcho(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func TestServer_NoPool(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.Error(t, err)
- assert.Nil(t, res)
-}
-
-func TestServer_SocketEcho(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo tcp",
- Relay: "tcp://:9007",
- RelayTimeout: 10 * time.Second,
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func TestServer_Configure_BeforeStart(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- err := rr.Reconfigure(&ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 2,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- assert.NoError(t, err)
-
- assert.NoError(t, rr.Start())
-
- res, err := rr.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
- assert.Len(t, rr.Workers(), 2)
-}
-
-func TestServer_Stop_NotStarted(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
-
- rr.Stop()
- assert.Nil(t, rr.Workers())
-}
-
-func TestServer_Reconfigure(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
- assert.Len(t, rr.Workers(), 1)
-
- err := rr.Reconfigure(&ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 2,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- assert.NoError(t, err)
-
- assert.Len(t, rr.Workers(), 2)
-}
-
-func TestServer_Reset(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
- assert.Len(t, rr.Workers(), 1)
-
- pid := *rr.Workers()[0].Pid
- assert.NoError(t, rr.Reset())
- assert.Len(t, rr.Workers(), 1)
- assert.NotEqual(t, pid, rr.Workers()[0].Pid)
-}
-
-func TestServer_ReplacePool(t *testing.T) {
- rr := NewServer(
- &ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
-
- constructed := make(chan interface{})
- rr.Listen(func(e int, ctx interface{}) {
- if e == EventPoolConstruct {
- close(constructed)
- }
- })
-
- err := rr.Reset()
- if err != nil {
- t.Errorf("error resetting the pool: error %v", err)
- }
- <-constructed
-
- for _, w := range rr.Workers() {
- assert.Equal(t, StateReady, w.state.Value())
- }
-}
-
-func TestServer_ServerFailure(t *testing.T) {
- rr := NewServer(&ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
-
- failure := make(chan interface{})
- rr.Listen(func(e int, ctx interface{}) {
- if e == EventServerFailure {
- failure <- nil
- }
- })
-
- // emulating potential server failure
- rr.cfg.Command = "php tests/client.php echo broken-connection"
- rr.pool.(*StaticPool).cmd = func() *exec.Cmd {
- return exec.Command("php", "tests/client.php", "echo", "broken-connection")
- }
- // killing random worker and expecting pool to replace it
- err := rr.Workers()[0].cmd.Process.Kill()
- if err != nil {
- t.Errorf("error killing the process: error %v", err)
- }
-
- <-failure
- assert.True(t, true)
-}
diff --git a/service/container.go b/service/container.go
deleted file mode 100644
index 49eea733..00000000
--- a/service/container.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package service
-
-import (
- "fmt"
- "reflect"
- "sync"
-
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-var errNoConfig = fmt.Errorf("no config has been provided")
-var errTempFix223 = fmt.Errorf("temporary error for fix #223") // meant no error here, just shutdown the server
-
-// InitMethod contains name of the method to be automatically invoked while service initialization. Must return
-// (bool, error). Container can be requested as well. Config can be requested in a form
-// of service.Config or pointer to service specific config struct (automatically unmarshalled), config argument must
-// implement service.HydrateConfig.
-const InitMethod = "Init"
-
-// Service can serve. Services can provide Init method which must return (bool, error) signature and might accept
-// other services and/or configs as dependency.
-type Service interface {
- // Serve serves.
- Serve() error
-
- // Detach stops the service.
- Stop()
-}
-
-// Container controls all internal RR services and provides plugin based system.
-type Container interface {
- // Register add new service to the container under given name.
- Register(name string, service interface{})
-
- // Reconfigure configures all underlying services with given configuration.
- Init(cfg Config) error
-
- // Check if svc has been registered.
- Has(service string) bool
-
- // get returns svc instance by it's name or nil if svc not found. Method returns current service status
- // as second value.
- Get(service string) (svc interface{}, status int)
-
- // Serve all configured services. Non blocking.
- Serve() error
-
- // Close all active services.
- Stop()
-
- // List service names.
- List() []string
-}
-
-// Config provides ability to slice configuration sections and unmarshal configuration data into
-// given structure.
-type Config interface {
- // get nested config section (sub-map), returns nil if section not found.
- Get(service string) Config
-
- // Unmarshal unmarshal config data into given struct.
- Unmarshal(out interface{}) error
-}
-
-// HydrateConfig provides ability to automatically hydrate config with values using
-// service.Config as the source.
-type HydrateConfig interface {
- // Hydrate must populate config values using given config source.
- // Must return error if config is not valid.
- Hydrate(cfg Config) error
-}
-
-// DefaultsConfig declares ability to be initated without config data provided.
-type DefaultsConfig interface {
- // InitDefaults allows to init blank config with pre-defined set of default values.
- InitDefaults() error
-}
-
-type container struct {
- log logrus.FieldLogger
- mu sync.Mutex
- services []*entry
- errc chan struct {
- name string
- err error
- }
-}
-
-// NewContainer creates new service container.
-func NewContainer(log logrus.FieldLogger) Container {
- return &container{
- log: log,
- services: make([]*entry, 0),
- errc: make(chan struct {
- name string
- err error
- }, 1),
- }
-}
-
-// Register add new service to the container under given name.
-func (c *container) Register(name string, service interface{}) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- c.services = append(c.services, &entry{
- name: name,
- svc: service,
- status: StatusInactive,
- })
-}
-
-// Check hasStatus svc has been registered.
-func (c *container) Has(target string) bool {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, e := range c.services {
- if e.name == target {
- return true
- }
- }
-
- return false
-}
-
-// get returns svc instance by it's name or nil if svc not found.
-func (c *container) Get(target string) (svc interface{}, status int) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, e := range c.services {
- if e.name == target {
- return e.svc, e.getStatus()
- }
- }
-
- return nil, StatusUndefined
-}
-
-// Init configures all underlying services with given configuration.
-func (c *container) Init(cfg Config) error {
- for _, e := range c.services {
- if e.getStatus() >= StatusOK {
- return fmt.Errorf("service [%s] has already been configured", e.name)
- }
-
- // inject service dependencies
- if ok, err := c.initService(e.svc, cfg.Get(e.name)); err != nil {
- // soft error (skipping)
- if err == errNoConfig {
- c.log.Debugf("[%s]: disabled", e.name)
- continue
- }
-
- return errors.Wrap(err, fmt.Sprintf("[%s]", e.name))
- } else if ok {
- e.setStatus(StatusOK)
- } else {
- c.log.Debugf("[%s]: disabled", e.name)
- }
- }
-
- return nil
-}
-
-// Serve all configured services. Non blocking.
-func (c *container) Serve() error {
- var running = 0
- for _, e := range c.services {
- if e.hasStatus(StatusOK) && e.canServe() {
- running++
- c.log.Debugf("[%s]: started", e.name)
- go func(e *entry) {
- e.setStatus(StatusServing)
- defer e.setStatus(StatusStopped)
- if err := e.svc.(Service).Serve(); err != nil {
- c.errc <- struct {
- name string
- err error
- }{name: e.name, err: errors.Wrap(err, fmt.Sprintf("[%s]", e.name))}
- } else {
- c.errc <- struct {
- name string
- err error
- }{name: e.name, err: errTempFix223}
- }
- }(e)
- }
- }
-
- // simple handler to handle empty configs
- if running == 0 {
- return nil
- }
-
- for fail := range c.errc {
- if fail.err == errTempFix223 {
- // if we call stop, then stop all plugins
- break
- } else {
- c.log.Errorf("[%s]: %s", fail.name, fail.err)
- c.Stop()
- return fail.err
- }
- }
-
- return nil
-}
-
-// Detach sends stop command to all running services.
-func (c *container) Stop() {
- for _, e := range c.services {
- if e.hasStatus(StatusServing) {
- e.setStatus(StatusStopping)
- e.svc.(Service).Stop()
- e.setStatus(StatusStopped)
-
- c.log.Debugf("[%s]: stopped", e.name)
- }
- }
-}
-
-// List all service names.
-func (c *container) List() []string {
- names := make([]string, 0, len(c.services))
- for _, e := range c.services {
- names = append(names, e.name)
- }
-
- return names
-}
-
-// calls Init method with automatically resolved arguments.
-func (c *container) initService(s interface{}, segment Config) (bool, error) {
- r := reflect.TypeOf(s)
-
- m, ok := r.MethodByName(InitMethod)
- if !ok {
- // no Init method is presented, assuming service does not need initialization.
- return true, nil
- }
-
- if err := c.verifySignature(m); err != nil {
- return false, err
- }
-
- // hydrating
- values, err := c.resolveValues(s, m, segment)
- if err != nil {
- return false, err
- }
-
- // initiating service
- out := m.Func.Call(values)
-
- if out[1].IsNil() {
- return out[0].Bool(), nil
- }
-
- return out[0].Bool(), out[1].Interface().(error)
-}
-
-// resolveValues returns slice of call arguments for service Init method.
-func (c *container) resolveValues(s interface{}, m reflect.Method, cfg Config) (values []reflect.Value, err error) {
- for i := 0; i < m.Type.NumIn(); i++ {
- v := m.Type.In(i)
-
- switch {
- case v.ConvertibleTo(reflect.ValueOf(s).Type()): // service itself
- values = append(values, reflect.ValueOf(s))
-
- case v.Implements(reflect.TypeOf((*Container)(nil)).Elem()): // container
- values = append(values, reflect.ValueOf(c))
-
- case v.Implements(reflect.TypeOf((*logrus.StdLogger)(nil)).Elem()),
- v.Implements(reflect.TypeOf((*logrus.FieldLogger)(nil)).Elem()),
- v.ConvertibleTo(reflect.ValueOf(c.log).Type()): // logger
- values = append(values, reflect.ValueOf(c.log))
-
- case v.Implements(reflect.TypeOf((*HydrateConfig)(nil)).Elem()): // injectable config
- sc := reflect.New(v.Elem())
-
- if dsc, ok := sc.Interface().(DefaultsConfig); ok {
- err := dsc.InitDefaults()
- if err != nil {
- return nil, err
- }
- if cfg == nil {
- values = append(values, sc)
- continue
- }
-
- } else if cfg == nil {
- return nil, errNoConfig
- }
-
- if err := sc.Interface().(HydrateConfig).Hydrate(cfg); err != nil {
- return nil, err
- }
-
- values = append(values, sc)
-
- case v.Implements(reflect.TypeOf((*Config)(nil)).Elem()): // generic config section
- if cfg == nil {
- return nil, errNoConfig
- }
-
- values = append(values, reflect.ValueOf(cfg))
-
- default: // dependency on other service (resolution to nil if service can't be found)
- value, err := c.resolveValue(v)
- if err != nil {
- return nil, err
- }
-
- values = append(values, value)
- }
- }
-
- return
-}
-
-// verifySignature checks if Init method has valid signature
-func (c *container) verifySignature(m reflect.Method) error {
- if m.Type.NumOut() != 2 {
- return fmt.Errorf("method Init must have exact 2 return values")
- }
-
- if m.Type.Out(0).Kind() != reflect.Bool {
- return fmt.Errorf("first return value of Init method must be bool type")
- }
-
- if !m.Type.Out(1).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
- return fmt.Errorf("second return value of Init method value must be error type")
- }
-
- return nil
-}
-
-func (c *container) resolveValue(v reflect.Type) (reflect.Value, error) {
- value := reflect.Value{}
- for _, e := range c.services {
- if !e.hasStatus(StatusOK) {
- continue
- }
-
- if v.Kind() == reflect.Interface && reflect.TypeOf(e.svc).Implements(v) {
- if value.IsValid() {
- return value, fmt.Errorf("disambiguous dependency `%s`", v)
- }
-
- value = reflect.ValueOf(e.svc)
- }
-
- if v.ConvertibleTo(reflect.ValueOf(e.svc).Type()) {
- if value.IsValid() {
- return value, fmt.Errorf("disambiguous dependency `%s`", v)
- }
-
- value = reflect.ValueOf(e.svc)
- }
- }
-
- if !value.IsValid() {
- // placeholder (make sure to check inside the method)
- value = reflect.New(v).Elem()
- }
-
- return value, nil
-}
diff --git a/service/container_test.go b/service/container_test.go
deleted file mode 100644
index f990b2cb..00000000
--- a/service/container_test.go
+++ /dev/null
@@ -1,534 +0,0 @@
-package service
-
-import (
- "errors"
- "sync"
- "testing"
- "time"
-
- json "github.com/json-iterator/go"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/stretchr/testify/assert"
-)
-
-type testService struct {
- mu sync.Mutex
- waitForServe chan interface{}
- delay time.Duration
- ok bool
- cfg Config
- c Container
- cfgE, serveE error
- done chan interface{}
-}
-
-func (t *testService) Init(cfg Config, c Container) (enabled bool, err error) {
- t.cfg = cfg
- t.c = c
- t.done = make(chan interface{})
- return t.ok, t.cfgE
-}
-
-func (t *testService) Serve() error {
- time.Sleep(t.delay)
-
- if t.serveE != nil {
- return t.serveE
- }
-
- if c := t.waitChan(); c != nil {
- close(c)
- t.setChan(nil)
- }
-
- <-t.done
- return nil
-}
-
-func (t *testService) Stop() {
- close(t.done)
-}
-
-func (t *testService) waitChan() chan interface{} {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- return t.waitForServe
-}
-
-func (t *testService) setChan(c chan interface{}) {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- t.waitForServe = c
-}
-
-type testCfg struct{ cfg string }
-
-func (cfg *testCfg) Get(name string) Config {
- vars := make(map[string]interface{})
- j := json.ConfigCompatibleWithStandardLibrary
- err := j.Unmarshal([]byte(cfg.cfg), &vars)
- if err != nil {
- panic("error unmarshalling the cfg.cfg value")
- }
-
- v, ok := vars[name]
- if !ok {
- return nil
- }
-
- d, _ := j.Marshal(v)
- return &testCfg{cfg: string(d)}
-}
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-// Config defines RPC service config.
-type dConfig struct {
- // Indicates if RPC connection is enabled.
- Value string
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *dConfig) Hydrate(cfg Config) error {
- return cfg.Unmarshal(c)
-}
-
-// InitDefaults allows to init blank config with pre-defined set of default values.
-func (c *dConfig) InitDefaults() error {
- c.Value = "default"
-
- return nil
-}
-
-type dService struct {
- Cfg *dConfig
-}
-
-func (s *dService) Init(cfg *dConfig) (bool, error) {
- s.Cfg = cfg
- return true, nil
-}
-
-func TestContainer_Register(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testService{})
-
- assert.Equal(t, 0, len(hook.Entries))
-}
-
-func TestContainer_Has(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testService{})
-
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.True(t, c.Has("test"))
- assert.False(t, c.Has("another"))
-}
-
-func TestContainer_List(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testService{})
-
- assert.Equal(t, 0, len(hook.Entries))
- assert.Equal(t, 1, len(c.List()))
-
- assert.True(t, c.Has("test"))
- assert.False(t, c.Has("another"))
-}
-
-func TestContainer_Get(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testService{})
- assert.Equal(t, 0, len(hook.Entries))
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusInactive, st)
-
- s, st = c.Get("another")
- assert.Nil(t, s)
- assert.Equal(t, StatusUndefined, st)
-}
-
-func TestContainer_Stop_NotStarted(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testService{})
- assert.Equal(t, 0, len(hook.Entries))
-
- c.Stop()
-}
-
-func TestContainer_Configure(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{ok: true}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.NoError(t, c.Init(&testCfg{`{"test":"something"}`}))
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusOK, st)
-}
-
-func TestContainer_Init_Default(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &dService{}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.NoError(t, c.Init(&testCfg{`{}`}))
-
- s, st := c.Get("test")
- assert.IsType(t, &dService{}, s)
- assert.Equal(t, StatusOK, st)
-
- assert.Equal(t, "default", svc.Cfg.Value)
-}
-
-func TestContainer_Init_Default_Overwrite(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &dService{}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.NoError(t, c.Init(&testCfg{`{"test":{"value": "something"}}`}))
-
- s, st := c.Get("test")
- assert.IsType(t, &dService{}, s)
- assert.Equal(t, StatusOK, st)
-
- assert.Equal(t, "something", svc.Cfg.Value)
-}
-
-func TestContainer_ConfigureNull(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{ok: true}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.NoError(t, c.Init(&testCfg{`{"another":"something"}`}))
- assert.Equal(t, 1, len(hook.Entries))
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusInactive, st)
-}
-
-func TestContainer_ConfigureDisabled(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{ok: false}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.NoError(t, c.Init(&testCfg{`{"test":"something"}`}))
- assert.Equal(t, 1, len(hook.Entries))
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusInactive, st)
-}
-
-func TestContainer_ConfigureError(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{
- ok: false,
- cfgE: errors.New("configure error"),
- }
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- err := c.Init(&testCfg{`{"test":"something"}`})
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "configure error")
- assert.Contains(t, err.Error(), "test")
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusInactive, st)
-}
-
-func TestContainer_ConfigureTwice(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{ok: true}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- assert.NoError(t, c.Init(&testCfg{`{"test":"something"}`}))
- assert.Error(t, c.Init(&testCfg{`{"test":"something"}`}))
-}
-
-// bug #276 test
-func TestContainer_ServeEmptyContainer(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{ok: true}
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
-
- go assert.NoError(t, c.Serve())
-
- time.Sleep(time.Millisecond * 500)
-
- c.Stop()
-}
-
-func TestContainer_Serve(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{
- ok: true,
- waitForServe: make(chan interface{}),
- }
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
- assert.NoError(t, c.Init(&testCfg{`{"test":"something"}`}))
-
- go func() {
- assert.NoError(t, c.Serve())
- }()
-
- <-svc.waitChan()
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusServing, st)
-
- c.Stop()
-
- s, st = c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusStopped, st)
-}
-
-func TestContainer_ServeError(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{
- ok: true,
- waitForServe: make(chan interface{}),
- serveE: errors.New("serve error"),
- }
-
- c := NewContainer(logger)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
- assert.NoError(t, c.Init(&testCfg{`{"test":"something"}`}))
-
- err := c.Serve()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "serve error")
- assert.Contains(t, err.Error(), "test")
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusStopped, st)
-}
-
-func TestContainer_ServeErrorMultiple(t *testing.T) {
- logger, hook := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- svc := &testService{
- ok: true,
- delay: time.Millisecond * 10,
- waitForServe: make(chan interface{}),
- serveE: errors.New("serve error"),
- }
-
- svc2 := &testService{
- ok: true,
- waitForServe: make(chan interface{}),
- }
-
- c := NewContainer(logger)
- c.Register("test2", svc2)
- c.Register("test", svc)
- assert.Equal(t, 0, len(hook.Entries))
- assert.NoError(t, c.Init(&testCfg{`{"test":"something", "test2":"something-else"}`}))
-
- err := c.Serve()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "serve error")
- assert.Contains(t, err.Error(), "test")
-
- s, st := c.Get("test")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusStopped, st)
-
- s, st = c.Get("test2")
- assert.IsType(t, &testService{}, s)
- assert.Equal(t, StatusStopped, st)
-}
-
-type testInitA struct{}
-
-func (t *testInitA) Init() error {
- return nil
-}
-
-type testInitB struct{}
-
-func (t *testInitB) Init() (int, error) {
- return 0, nil
-}
-
-func TestContainer_InitErrorA(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testInitA{})
-
- assert.Error(t, c.Init(&testCfg{`{"test":"something", "test2":"something-else"}`}))
-}
-
-func TestContainer_InitErrorB(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testInitB{})
-
- assert.Error(t, c.Init(&testCfg{`{"test":"something", "test2":"something-else"}`}))
-}
-
-type testInitC struct{}
-
-func (r *testInitC) Test() bool {
- return true
-}
-
-func TestContainer_NoInit(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testInitC{})
-
- assert.NoError(t, c.Init(&testCfg{`{"test":"something", "test2":"something-else"}`}))
-}
-
-type testInitD struct {
- c *testInitC //nolint:golint,unused,structcheck
-}
-
-type DCfg struct {
- V string
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *DCfg) Hydrate(cfg Config) error {
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
- if c.V == "fail" {
- return errors.New("failed config")
- }
-
- return nil
-}
-
-func (t *testInitD) Init(r *testInitC, c Container, cfg *DCfg) (bool, error) {
- if r == nil {
- return false, errors.New("unable to find testInitC")
- }
-
- if c == nil {
- return false, errors.New("unable to find Container")
- }
-
- if cfg.V != "ok" {
- return false, errors.New("invalid config")
- }
-
- return false, nil
-}
-
-func TestContainer_InitDependency(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testInitC{})
- c.Register("test2", &testInitD{})
-
- assert.NoError(t, c.Init(&testCfg{`{"test":"something", "test2":{"v":"ok"}}`}))
-}
-
-func TestContainer_InitDependencyFail(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test", &testInitC{})
- c.Register("test2", &testInitD{})
-
- assert.Error(t, c.Init(&testCfg{`{"test":"something", "test2":{"v":"fail"}}`}))
-}
-
-func TestContainer_InitDependencyEmpty(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := NewContainer(logger)
- c.Register("test2", &testInitD{})
-
- assert.Contains(t, c.Init(&testCfg{`{"test2":{"v":"ok"}}`}).Error(), "testInitC")
-}
diff --git a/service/entry.go b/service/entry.go
deleted file mode 100644
index 497742d1..00000000
--- a/service/entry.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package service
-
-import (
- "sync"
-)
-
-const (
- // StatusUndefined when service bus can not find the service.
- StatusUndefined = iota
-
- // StatusInactive when service has been registered in container.
- StatusInactive
-
- // StatusOK when service has been properly configured.
- StatusOK
-
- // StatusServing when service is currently done.
- StatusServing
-
- // StatusStopping when service is currently stopping.
- StatusStopping
-
- // StatusStopped when service being stopped.
- StatusStopped
-)
-
-// entry creates association between service instance and given name.
-type entry struct {
- name string
- svc interface{}
- mu sync.Mutex
- status int
-}
-
-// status returns service status
-func (e *entry) getStatus() int {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- return e.status
-}
-
-// setStarted indicates that service hasStatus status.
-func (e *entry) setStatus(status int) {
- e.mu.Lock()
- defer e.mu.Unlock()
- e.status = status
-}
-
-// hasStatus checks if entry in specific status
-func (e *entry) hasStatus(status int) bool {
- return e.getStatus() == status
-}
-
-// canServe returns true is service can serve.
-func (e *entry) canServe() bool {
- _, ok := e.svc.(Service)
- return ok
-}
diff --git a/service/entry_test.go b/service/entry_test.go
deleted file mode 100644
index 5ca9c338..00000000
--- a/service/entry_test.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package service
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestEntry_CanServeFalse(t *testing.T) {
- e := &entry{svc: nil}
- assert.False(t, e.canServe())
-}
-
-func TestEntry_CanServeTrue(t *testing.T) {
- e := &entry{svc: &testService{}}
- assert.True(t, e.canServe())
-}
diff --git a/service/env/config.go b/service/env/config.go
deleted file mode 100644
index a7da695e..00000000
--- a/service/env/config.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package env
-
-import (
- "github.com/spiral/roadrunner/service"
-)
-
-// Config defines set of env values for RR workers.
-type Config struct {
- // values to set as worker _ENV.
- Values map[string]string
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- return cfg.Unmarshal(&c.Values)
-}
-
-// InitDefaults allows to init blank config with pre-defined set of default values.
-func (c *Config) InitDefaults() error {
- c.Values = make(map[string]string)
- return nil
-}
diff --git a/service/env/config_test.go b/service/env/config_test.go
deleted file mode 100644
index cc2bdf97..00000000
--- a/service/env/config_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package env
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate(t *testing.T) {
- cfg := &mockCfg{`{"key":"value"}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
- assert.Len(t, c.Values, 1)
-}
-
-func Test_Config_Hydrate_Empty(t *testing.T) {
- cfg := &mockCfg{`{}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
- assert.Len(t, c.Values, 0)
-}
-
-func Test_Config_Defaults(t *testing.T) {
- c := &Config{}
- err := c.InitDefaults()
- if err != nil {
- t.Errorf("Test_Config_Defaults failed: error %v", err)
- }
- assert.Len(t, c.Values, 0)
-}
diff --git a/service/env/environment.go b/service/env/environment.go
deleted file mode 100644
index ab8febf7..00000000
--- a/service/env/environment.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package env
-
-// Environment aggregates list of environment variables. This interface can be used in custom implementation to drive
-// values from external sources.
-type Environment interface {
- Setter
- Getter
-
- // Copy all environment values.
- Copy(setter Setter) error
-}
-
-// Setter provides ability to set environment value.
-type Setter interface {
- // SetEnv sets or creates environment value.
- SetEnv(key, value string)
-}
-
-// Getter provides ability to set environment value.
-type Getter interface {
- // GetEnv must return list of env variables.
- GetEnv() (map[string]string, error)
-}
diff --git a/service/env/service.go b/service/env/service.go
deleted file mode 100644
index 83175b36..00000000
--- a/service/env/service.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package env
-
-// ID contains default service name.
-const ID = "env"
-
-// Service provides ability to map _ENV values from config file.
-type Service struct {
- // values is default set of values.
- values map[string]string
-}
-
-// NewService creates new env service instance for given rr version.
-func NewService(defaults map[string]string) *Service {
- s := &Service{values: defaults}
- return s
-}
-
-// Init must return configure svc and return true if svc hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (s *Service) Init(cfg *Config) (bool, error) {
- if s.values == nil {
- s.values = make(map[string]string)
- s.values["RR"] = "true"
- }
-
- for k, v := range cfg.Values {
- s.values[k] = v
- }
-
- return true, nil
-}
-
-// GetEnv must return list of env variables.
-func (s *Service) GetEnv() (map[string]string, error) {
- return s.values, nil
-}
-
-// SetEnv sets or creates environment value.
-func (s *Service) SetEnv(key, value string) {
- s.values[key] = value
-}
-
-// Copy all environment values.
-func (s *Service) Copy(setter Setter) error {
- values, err := s.GetEnv()
- if err != nil {
- return err
- }
-
- for k, v := range values {
- setter.SetEnv(k, v)
- }
-
- return nil
-}
diff --git a/service/env/service_test.go b/service/env/service_test.go
deleted file mode 100644
index a354214c..00000000
--- a/service/env/service_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package env
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_NewService(t *testing.T) {
- s := NewService(map[string]string{"version": "test"})
- assert.Len(t, s.values, 1)
-}
-
-func Test_Init(t *testing.T) {
- var err error
- s := &Service{}
- _, err = s.Init(&Config{})
- if err != nil {
- t.Errorf("error during the s.Init: error %v", err)
- }
- assert.Len(t, s.values, 1)
-
- values, err := s.GetEnv()
- assert.NoError(t, err)
- assert.Equal(t, "true", values["RR"])
-}
-
-func Test_Extend(t *testing.T) {
- var err error
- s := NewService(map[string]string{"RR": "version"})
-
- _, err = s.Init(&Config{Values: map[string]string{"key": "value"}})
- if err != nil {
- t.Errorf("error during the s.Init: error %v", err)
- }
- assert.Len(t, s.values, 2)
-
- values, err := s.GetEnv()
- assert.NoError(t, err)
- assert.Len(t, values, 2)
- assert.Equal(t, "version", values["RR"])
- assert.Equal(t, "value", values["key"])
-}
-
-func Test_Set(t *testing.T) {
- var err error
- s := NewService(map[string]string{"RR": "version"})
-
- _, err = s.Init(&Config{Values: map[string]string{"key": "value"}})
- if err != nil {
- t.Errorf("error during the s.Init: error %v", err)
- }
- assert.Len(t, s.values, 2)
-
- s.SetEnv("key", "value-new")
- s.SetEnv("other", "new")
-
- values, err := s.GetEnv()
- assert.NoError(t, err)
- assert.Len(t, values, 3)
- assert.Equal(t, "version", values["RR"])
- assert.Equal(t, "value-new", values["key"])
- assert.Equal(t, "new", values["other"])
-}
-
-func Test_Copy(t *testing.T) {
- s1 := NewService(map[string]string{"RR": "version"})
- s2 := NewService(map[string]string{})
-
- s1.SetEnv("key", "value-new")
- s1.SetEnv("other", "new")
-
- assert.NoError(t, s1.Copy(s2))
-
- values, err := s2.GetEnv()
- assert.NoError(t, err)
- assert.Len(t, values, 3)
- assert.Equal(t, "version", values["RR"])
- assert.Equal(t, "value-new", values["key"])
- assert.Equal(t, "new", values["other"])
-}
diff --git a/service/gzip/config.go b/service/gzip/config.go
deleted file mode 100644
index 00ac559d..00000000
--- a/service/gzip/config.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package gzip
-
-import (
- "github.com/spiral/roadrunner/service"
-)
-
-// Config describes file location and controls access to them.
-type Config struct {
- Enable bool
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- return cfg.Unmarshal(c)
-}
-
-// InitDefaults sets missing values to their default values.
-func (c *Config) InitDefaults() error {
- c.Enable = true
-
- return nil
-}
diff --git a/service/gzip/config_test.go b/service/gzip/config_test.go
deleted file mode 100644
index 8d03aecf..00000000
--- a/service/gzip/config_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package gzip
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate(t *testing.T) {
- cfg := &mockCfg{`{"enable": true}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error(t *testing.T) {
- cfg := &mockCfg{`{"enable": "invalid"}`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := &mockCfg{`{"enable": 1}`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Defaults(t *testing.T) {
- c := &Config{}
- err := c.InitDefaults()
- if err != nil {
- t.Errorf("error during the InitDefaults: error %v", err)
- }
- assert.Equal(t, true, c.Enable)
-}
diff --git a/service/gzip/service.go b/service/gzip/service.go
deleted file mode 100644
index 2ba95158..00000000
--- a/service/gzip/service.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package gzip
-
-import (
- "errors"
- "net/http"
-
- "github.com/NYTimes/gziphandler"
- rrhttp "github.com/spiral/roadrunner/service/http"
-)
-
-// ID contains default service name.
-const ID = "gzip"
-
-var httpNotInitialized = errors.New("http service should be defined properly in config to use gzip")
-
-type Service struct {
- cfg *Config
-}
-
-func (s *Service) Init(cfg *Config, r *rrhttp.Service) (bool, error) {
- s.cfg = cfg
- if !s.cfg.Enable {
- return false, nil
- }
- if r == nil {
- return false, httpNotInitialized
- }
-
- r.AddMiddleware(s.middleware)
-
- return true, nil
-}
-
-func (s *Service) middleware(f http.HandlerFunc) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- gziphandler.GzipHandler(f).ServeHTTP(w, r)
- }
-}
diff --git a/service/gzip/service_test.go b/service/gzip/service_test.go
deleted file mode 100644
index d886a339..00000000
--- a/service/gzip/service_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package gzip
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- rrhttp "github.com/spiral/roadrunner/service/http"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- gzip string
- httpCfg string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == rrhttp.ID {
- return &testCfg{target: cfg.httpCfg}
- }
-
- if name == ID {
- return &testCfg{target: cfg.gzip}
- }
- return nil
-}
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.target), out)
-}
-
-func Test_Disabled(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{cfg: &Config{Enable: true}})
-
- assert.NoError(t, c.Init(&testCfg{
- httpCfg: `{
- "address": ":6029",
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- }
- }`,
- gzip: `{"enable":false}`,
- }))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusInactive, st)
-}
-
-// TEST bug #275
-func Test_Bug275(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.Error(t, c.Init(&testCfg{
- httpCfg: "",
- gzip: `{"enable":true}`,
- }))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusInactive, st)
-}
diff --git a/service/headers/config_test.go b/service/headers/config_test.go
deleted file mode 100644
index 4b7c56df..00000000
--- a/service/headers/config_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package headers
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate_Error1(t *testing.T) {
- cfg := &mockCfg{`{"request": {"From": "Something"}}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := &mockCfg{`{"dir": "/dir/"`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
diff --git a/service/headers/service.go b/service/headers/service.go
deleted file mode 100644
index a3a9d9da..00000000
--- a/service/headers/service.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package headers
-
-import (
- "net/http"
- "strconv"
-
- rrhttp "github.com/spiral/roadrunner/service/http"
-)
-
-// ID contains default service name.
-const ID = "headers"
-
-// Service serves headers files. Potentially convert into middleware?
-type Service struct {
- // server configuration (location, forbidden files and etc)
- cfg *Config
-}
-
-// Init must return configure service and return true if service hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (s *Service) Init(cfg *Config, r *rrhttp.Service) (bool, error) {
- if r == nil {
- return false, nil
- }
-
- s.cfg = cfg
- r.AddMiddleware(s.middleware)
-
- return true, nil
-}
-
-// middleware must return true if request/response pair is handled within the middleware.
-func (s *Service) middleware(f http.HandlerFunc) http.HandlerFunc {
- // Define the http.HandlerFunc
- return func(w http.ResponseWriter, r *http.Request) {
-
- if s.cfg.Request != nil {
- for k, v := range s.cfg.Request {
- r.Header.Add(k, v)
- }
- }
-
- if s.cfg.Response != nil {
- for k, v := range s.cfg.Response {
- w.Header().Set(k, v)
- }
- }
-
- if s.cfg.CORS != nil {
- if r.Method == http.MethodOptions {
- s.preflightRequest(w, r)
- return
- }
-
- s.corsHeaders(w, r)
- }
-
- f(w, r)
- }
-}
-
-// configure OPTIONS response
-func (s *Service) preflightRequest(w http.ResponseWriter, r *http.Request) {
- headers := w.Header()
-
- headers.Add("Vary", "Origin")
- headers.Add("Vary", "Access-Control-Request-Method")
- headers.Add("Vary", "Access-Control-Request-Headers")
-
- if s.cfg.CORS.AllowedOrigin != "" {
- headers.Set("Access-Control-Allow-Origin", s.cfg.CORS.AllowedOrigin)
- }
-
- if s.cfg.CORS.AllowedHeaders != "" {
- headers.Set("Access-Control-Allow-Headers", s.cfg.CORS.AllowedHeaders)
- }
-
- if s.cfg.CORS.AllowedMethods != "" {
- headers.Set("Access-Control-Allow-Methods", s.cfg.CORS.AllowedMethods)
- }
-
- if s.cfg.CORS.AllowCredentials != nil {
- headers.Set("Access-Control-Allow-Credentials", strconv.FormatBool(*s.cfg.CORS.AllowCredentials))
- }
-
- if s.cfg.CORS.MaxAge > 0 {
- headers.Set("Access-Control-Max-Age", strconv.Itoa(s.cfg.CORS.MaxAge))
- }
-
- w.WriteHeader(http.StatusOK)
-}
-
-// configure CORS headers
-func (s *Service) corsHeaders(w http.ResponseWriter, r *http.Request) {
- headers := w.Header()
-
- headers.Add("Vary", "Origin")
-
- if s.cfg.CORS.AllowedOrigin != "" {
- headers.Set("Access-Control-Allow-Origin", s.cfg.CORS.AllowedOrigin)
- }
-
- if s.cfg.CORS.AllowedHeaders != "" {
- headers.Set("Access-Control-Allow-Headers", s.cfg.CORS.AllowedHeaders)
- }
-
- if s.cfg.CORS.ExposedHeaders != "" {
- headers.Set("Access-Control-Expose-Headers", s.cfg.CORS.ExposedHeaders)
- }
-
- if s.cfg.CORS.AllowCredentials != nil {
- headers.Set("Access-Control-Allow-Credentials", strconv.FormatBool(*s.cfg.CORS.AllowCredentials))
- }
-}
diff --git a/service/headers/service_test.go b/service/headers/service_test.go
deleted file mode 100644
index 03a55d1e..00000000
--- a/service/headers/service_test.go
+++ /dev/null
@@ -1,341 +0,0 @@
-package headers
-
-import (
- "io/ioutil"
- "net/http"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff/v4"
- json "github.com/json-iterator/go"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- rrhttp "github.com/spiral/roadrunner/service/http"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- httpCfg string
- headers string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == rrhttp.ID {
- return &testCfg{target: cfg.httpCfg}
- }
-
- if name == ID {
- return &testCfg{target: cfg.headers}
- }
- return nil
-}
-
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- return json.Unmarshal([]byte(cfg.target), out)
-}
-
-func Test_RequestHeaders(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- headers: `{"request":{"input": "custom-header"}}`,
- httpCfg: `{
- "enable": true,
- "address": ":6078",
- "maxRequestSize": 1024,
- "workers":{
- "command": "php ../../tests/http/client.php header pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- req, err := http.NewRequest("GET", "http://localhost:6078?hello=value", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
-
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "CUSTOM-HEADER", string(b))
-
- err = r.Body.Close()
- if err != nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_ResponseHeaders(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- headers: `{"response":{"output": "output-header"},"request":{"input": "custom-header"}}`,
- httpCfg: `{
- "enable": true,
- "address": ":6079",
- "maxRequestSize": 1024,
- "workers":{
- "command": "php ../../tests/http/client.php header pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- req, err := http.NewRequest("GET", "http://localhost:6079?hello=value", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- assert.Equal(t, "output-header", r.Header.Get("output"))
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "CUSTOM-HEADER", string(b))
-
- err = r.Body.Close()
- if err != nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestCORS_OPTIONS(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- headers: `{
-"cors":{
- "allowedOrigin": "*",
- "allowedHeaders": "*",
- "allowedMethods": "GET,POST,PUT,DELETE",
- "allowCredentials": true,
- "exposedHeaders": "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma",
- "maxAge": 600
-}
-}`,
- httpCfg: `{
- "enable": true,
- "address": ":16379",
- "maxRequestSize": 1024,
- "workers":{
- "command": "php ../../tests/http/client.php headers pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- req, err := http.NewRequest("OPTIONS", "http://localhost:16379", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Headers"))
- assert.Equal(t, "GET,POST,PUT,DELETE", r.Header.Get("Access-Control-Allow-Methods"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Origin"))
- assert.Equal(t, "600", r.Header.Get("Access-Control-Max-Age"))
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
-
- _, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
- assert.Equal(t, 200, r.StatusCode)
-
- err = r.Body.Close()
- if err != nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestCORS_Pass(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- headers: `{
-"cors":{
- "allowedOrigin": "*",
- "allowedHeaders": "*",
- "allowedMethods": "GET,POST,PUT,DELETE",
- "allowCredentials": true,
- "exposedHeaders": "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma",
- "maxAge": 600
-}
-}`,
- httpCfg: `{
- "enable": true,
- "address": ":6672",
- "maxRequestSize": 1024,
- "workers":{
- "command": "php ../../tests/http/client.php headers pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- req, err := http.NewRequest("GET", "http://localhost:6672", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Headers"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Origin"))
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
-
- _, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
- assert.Equal(t, 200, r.StatusCode)
-
- err = r.Body.Close()
- if err != nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/service/health/config.go b/service/health/config.go
deleted file mode 100644
index 60a52d6e..00000000
--- a/service/health/config.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package health
-
-import (
- "errors"
- "strings"
-
- "github.com/spiral/roadrunner/service"
-)
-
-// Config configures the health service
-type Config struct {
- // Address to listen on
- Address string
-}
-
-// Hydrate the config
-func (c *Config) Hydrate(cfg service.Config) error {
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
- return c.Valid()
-}
-
-// Valid validates the configuration.
-func (c *Config) Valid() error {
- // Validate the address
- if c.Address != "" && !strings.Contains(c.Address, ":") {
- return errors.New("malformed http server address")
- }
-
- return nil
-}
diff --git a/service/health/config_test.go b/service/health/config_test.go
deleted file mode 100644
index c02c46fc..00000000
--- a/service/health/config_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package health
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
-
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate_Error1(t *testing.T) {
- cfg := &mockCfg{`{"address": "localhost:8080"}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
- assert.Equal(t, "localhost:8080", c.Address)
-}
-
-func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := &mockCfg{`{"dir": "/dir/"`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Valid1(t *testing.T) {
- cfg := &mockCfg{`{"address": "localhost"}`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Valid2(t *testing.T) {
- cfg := &mockCfg{`{"address": ":1111"}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
-}
diff --git a/service/health/service.go b/service/health/service.go
deleted file mode 100644
index b9b22a8a..00000000
--- a/service/health/service.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package health
-
-import (
- "context"
- "fmt"
- "net/http"
- "sync"
- "time"
-
- "github.com/sirupsen/logrus"
-
- rrhttp "github.com/spiral/roadrunner/service/http"
-)
-
-const (
- // ID declares public service name.
- ID = "health"
- // maxHeaderSize declares max header size for prometheus server
- maxHeaderSize = 1024 * 1024 * 100 // 104MB
-)
-
-// Service to serve an endpoint for checking the health of the worker pool
-type Service struct {
- cfg *Config
- log *logrus.Logger
- mu sync.Mutex
- http *http.Server
- httpService *rrhttp.Service
-}
-
-// Init health service
-func (s *Service) Init(cfg *Config, r *rrhttp.Service, log *logrus.Logger) (bool, error) {
- // Ensure the httpService is set
- if r == nil {
- return false, nil
- }
-
- s.cfg = cfg
- s.log = log
- s.httpService = r
- return true, nil
-}
-
-// Serve the health endpoint
-func (s *Service) Serve() error {
- // Configure and start the http server
- s.mu.Lock()
- s.http = &http.Server{
- Addr: s.cfg.Address,
- Handler: s,
- IdleTimeout: time.Hour * 24,
- ReadTimeout: time.Minute * 60,
- MaxHeaderBytes: maxHeaderSize,
- ReadHeaderTimeout: time.Minute * 60,
- WriteTimeout: time.Minute * 60,
- }
- s.mu.Unlock()
-
- err := s.http.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- return err
- }
-
- return nil
-}
-
-// Stop the health endpoint
-func (s *Service) Stop() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.http != nil {
- // gracefully stop the server
- go func() {
- err := s.http.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- s.log.Error(fmt.Errorf("error shutting down the metrics server: error %v", err))
- }
- }()
- }
-}
-
-// ServeHTTP returns the health of the pool of workers
-func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- status := http.StatusOK
- if !s.isHealthy() {
- status = http.StatusInternalServerError
- }
- w.WriteHeader(status)
-}
-
-// isHealthy checks the server, pool and ensures at least one worker is active
-func (s *Service) isHealthy() bool {
- httpService := s.httpService
- if httpService == nil {
- return false
- }
-
- server := httpService.Server()
- if server == nil {
- return false
- }
-
- pool := server.Pool()
- if pool == nil {
- return false
- }
-
- // Ensure at least one worker is active
- for _, w := range pool.Workers() {
- if w.State().IsActive() {
- return true
- }
- }
-
- return false
-}
diff --git a/service/health/service_test.go b/service/health/service_test.go
deleted file mode 100644
index 3488d631..00000000
--- a/service/health/service_test.go
+++ /dev/null
@@ -1,318 +0,0 @@
-package health
-
-import (
- "io/ioutil"
- "net/http"
- "testing"
- "time"
-
- json "github.com/json-iterator/go"
-
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- rrhttp "github.com/spiral/roadrunner/service/http"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- healthCfg string
- httpCfg string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == ID {
- return &testCfg{target: cfg.healthCfg}
- }
-
- if name == rrhttp.ID {
- return &testCfg{target: cfg.httpCfg}
- }
-
- return nil
-}
-
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- err := j.Unmarshal([]byte(cfg.target), out)
- return err
-}
-
-func TestService_Serve(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- healthCfg: `{
- "address": "localhost:2116"
- }`,
- httpCfg: `{
- "address": "localhost:2115",
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- }))
-
- s, status := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, status)
-
- hS, httpStatus := c.Get(rrhttp.ID)
- assert.NotNil(t, hS)
- assert.Equal(t, service.StatusOK, httpStatus)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
- defer c.Stop()
-
- _, res, err := get("http://localhost:2116/")
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, res.StatusCode)
-}
-
-func TestService_Serve_DeadWorker(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- healthCfg: `{
- "address": "localhost:2117"
- }`,
- httpCfg: `{
- "address": "localhost:2118",
- "workers":{
- "command": "php ../../tests/http/slow-client.php echo pipes 1000",
- "pool": {"numWorkers": 1}
- }
- }`,
- }))
-
- s, status := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, status)
-
- hS, httpStatus := c.Get(rrhttp.ID)
- assert.NotNil(t, hS)
- assert.Equal(t, service.StatusOK, httpStatus)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("server error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
- defer c.Stop()
-
- // Kill the worker
- httpSvc := hS.(*rrhttp.Service)
- err := httpSvc.Server().Workers()[0].Kill()
- if err != nil {
- t.Errorf("error killing the worker: error %v", err)
- }
-
- // Check health check
- _, res, err := get("http://localhost:2117/")
- assert.NoError(t, err)
- assert.Equal(t, http.StatusInternalServerError, res.StatusCode)
-}
-
-func TestService_Serve_DeadWorkerStillHealthy(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- healthCfg: `{
- "address": "localhost:2119"
- }`,
- httpCfg: `{
- "address": "localhost:2120",
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 2}
- }
- }`,
- }))
-
- s, status := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, status)
-
- hS, httpStatus := c.Get(rrhttp.ID)
- assert.NotNil(t, hS)
- assert.Equal(t, service.StatusOK, httpStatus)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Second * 1)
- defer c.Stop()
-
- // Kill one of the workers
- httpSvc := hS.(*rrhttp.Service)
- err := httpSvc.Server().Workers()[0].Kill()
- if err != nil {
- t.Errorf("error killing the worker: error %v", err)
- }
-
- // Check health check
- _, res, err := get("http://localhost:2119/")
- assert.NoError(t, err)
- assert.Equal(t, http.StatusOK, res.StatusCode)
-}
-
-func TestService_Serve_NoHTTPService(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- healthCfg: `{
- "address": "localhost:2121"
- }`,
- }))
-
- s, status := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusInactive, status)
-}
-
-func TestService_Serve_NoServer(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- healthSvc := &Service{}
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, healthSvc)
-
- assert.NoError(t, c.Init(&testCfg{
- healthCfg: `{
- "address": "localhost:2122"
- }`,
- httpCfg: `{
- "address": "localhost:2123",
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- }))
-
- s, status := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, status)
-
- hS, httpStatus := c.Get(rrhttp.ID)
- assert.NotNil(t, hS)
- assert.Equal(t, service.StatusOK, httpStatus)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
- defer c.Stop()
-
- // Set the httpService to nil
- healthSvc.httpService = nil
-
- _, res, err := get("http://localhost:2122/")
- assert.NoError(t, err)
- assert.Equal(t, http.StatusInternalServerError, res.StatusCode)
-}
-
-func TestService_Serve_NoPool(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- httpSvc := &rrhttp.Service{}
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, httpSvc)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- healthCfg: `{
- "address": "localhost:2124"
- }`,
- httpCfg: `{
- "address": "localhost:2125",
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- }))
-
- s, status := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, status)
-
- hS, httpStatus := c.Get(rrhttp.ID)
- assert.NotNil(t, hS)
- assert.Equal(t, service.StatusOK, httpStatus)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
- defer c.Stop()
-
- // Stop the pool
- httpSvc.Server().Stop()
-
- _, res, err := get("http://localhost:2124/")
- assert.NoError(t, err)
- assert.Equal(t, http.StatusInternalServerError, res.StatusCode)
-}
-
-// get request and return body
-func get(url string) (string, *http.Response, error) {
- r, err := http.Get(url)
- if err != nil {
- return "", nil, err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", nil, err
- }
- return string(b), r, err
-}
diff --git a/service/http/config_test.go b/service/http/config_test.go
deleted file mode 100644
index 18b8f5a3..00000000
--- a/service/http/config_test.go
+++ /dev/null
@@ -1,330 +0,0 @@
-package http
-
-import (
- "os"
- "testing"
- "time"
-
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- return json.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate_Error1(t *testing.T) {
- cfg := &mockCfg{`{"address": "localhost:8080"}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := &mockCfg{`{"dir": "/dir/"`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Valid(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.NoError(t, cfg.Valid())
-}
-
-func Test_Trusted_Subnets(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- TrustedSubnets: []string{"200.1.0.0/16"},
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.NoError(t, cfg.parseCIDRs())
-
- assert.True(t, cfg.IsTrusted("200.1.0.10"))
- assert.False(t, cfg.IsTrusted("127.0.0.0.1"))
-}
-
-func Test_Trusted_Subnets_Err(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- TrustedSubnets: []string{"200.1.0.0"},
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.parseCIDRs())
-}
-
-func Test_Config_Valid_SSL(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- SSL: SSLConfig{
- Cert: "fixtures/server.crt",
- Key: "fixtures/server.key",
- },
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Hydrate(&testCfg{httpCfg: "{}"}))
-
- assert.NoError(t, cfg.Valid())
- assert.True(t, cfg.EnableTLS())
- assert.Equal(t, 443, cfg.SSL.Port)
-}
-
-func Test_Config_SSL_No_key(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- SSL: SSLConfig{
- Cert: "fixtures/server.crt",
- },
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_SSL_No_Cert(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- SSL: SSLConfig{
- Key: "fixtures/server.key",
- },
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_NoUploads(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_NoHTTP2(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 0,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_NoWorkers(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_NoPool(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 0,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_DeadPool(t *testing.T) {
- cfg := &Config{
- Address: ":8080",
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Config_InvalidAddress(t *testing.T) {
- cfg := &Config{
- Address: "unexpected_address",
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- HTTP2: &HTTP2Config{
- Enabled: true,
- },
- Workers: &roadrunner.ServerConfig{
- Command: "php tests/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- },
- }
-
- assert.Error(t, cfg.Valid())
-}
diff --git a/service/http/fcgi_test.go b/service/http/fcgi_test.go
deleted file mode 100644
index cf67a68b..00000000
--- a/service/http/fcgi_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package http
-
-import (
- "io/ioutil"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
- "github.com/yookoala/gofast"
-)
-
-func Test_FCGI_Service_Echo(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{httpCfg: `{
- "fcgi": {
- "address": "tcp://0.0.0.0:6082"
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() { assert.NoError(t, c.Serve()) }()
- time.Sleep(time.Second * 1)
-
- fcgiConnFactory := gofast.SimpleConnFactory("tcp", "0.0.0.0:6082")
-
- fcgiHandler := gofast.NewHandler(
- gofast.BasicParamsMap(gofast.BasicSession),
- gofast.SimpleClientFactory(fcgiConnFactory, 0),
- )
-
- w := httptest.NewRecorder()
- req := httptest.NewRequest("GET", "http://site.local/?hello=world", nil)
- fcgiHandler.ServeHTTP(w, req)
-
- body, err := ioutil.ReadAll(w.Result().Body)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, w.Result().StatusCode)
- assert.Equal(t, "WORLD", string(body))
- c.Stop()
-}
-
-func Test_FCGI_Service_Request_Uri(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{httpCfg: `{
- "fcgi": {
- "address": "tcp://0.0.0.0:6083"
- },
- "workers":{
- "command": "php ../../tests/http/client.php request-uri pipes",
- "pool": {"numWorkers": 1}
- }
- }`}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() { assert.NoError(t, c.Serve()) }()
- time.Sleep(time.Second * 1)
-
- fcgiConnFactory := gofast.SimpleConnFactory("tcp", "0.0.0.0:6083")
-
- fcgiHandler := gofast.NewHandler(
- gofast.BasicParamsMap(gofast.BasicSession),
- gofast.SimpleClientFactory(fcgiConnFactory, 0),
- )
-
- w := httptest.NewRecorder()
- req := httptest.NewRequest("GET", "http://site.local/hello-world", nil)
- fcgiHandler.ServeHTTP(w, req)
-
- body, err := ioutil.ReadAll(w.Result().Body)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, w.Result().StatusCode)
- assert.Equal(t, "http://site.local/hello-world", string(body))
- c.Stop()
-}
diff --git a/service/http/h2c_test.go b/service/http/h2c_test.go
deleted file mode 100644
index f17538bc..00000000
--- a/service/http/h2c_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package http
-
-import (
- "net/http"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff/v4"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Service_H2C(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "address": ":6029",
- "http2": {"h2c":true},
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error serving: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- req, err := http.NewRequest("PRI", "http://localhost:6029?hello=world", nil)
- if err != nil {
- return err
- }
-
- req.Header.Add("Upgrade", "h2c")
- req.Header.Add("Connection", "HTTP2-Settings")
- req.Header.Add("HTTP2-Settings", "")
-
- r, err2 := http.DefaultClient.Do(req)
- if err2 != nil {
- return err2
- }
-
- assert.Equal(t, "101 Switching Protocols", r.Status)
-
- err3 := r.Body.Close()
- if err3 != nil {
- return err3
- }
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/service/http/rpc.go b/service/http/rpc.go
deleted file mode 100644
index 7b38dece..00000000
--- a/service/http/rpc.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package http
-
-import (
- "github.com/pkg/errors"
- "github.com/spiral/roadrunner/util"
-)
-
-type rpcServer struct{ svc *Service }
-
-// WorkerList contains list of workers.
-type WorkerList struct {
- // Workers is list of workers.
- Workers []*util.State `json:"workers"`
-}
-
-// Reset resets underlying RR worker pool and restarts all of it's workers.
-func (rpc *rpcServer) Reset(reset bool, r *string) error {
- if rpc.svc == nil || rpc.svc.handler == nil {
- return errors.New("http server is not running")
- }
-
- *r = "OK"
- return rpc.svc.Server().Reset()
-}
-
-// Workers returns list of active workers and their stats.
-func (rpc *rpcServer) Workers(list bool, r *WorkerList) (err error) {
- if rpc.svc == nil || rpc.svc.handler == nil {
- return errors.New("http server is not running")
- }
-
- r.Workers, err = util.ServerState(rpc.svc.Server())
- return err
-}
diff --git a/service/http/rpc_test.go b/service/http/rpc_test.go
deleted file mode 100644
index 62f27ede..00000000
--- a/service/http/rpc_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package http
-
-import (
- "os"
- "strconv"
- "testing"
- "time"
-
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_RPC(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rpc.ID, &rpc.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- rpcCfg: `{"enable":true, "listen":"tcp://:5004"}`,
- httpCfg: `{
- "enable": true,
- "address": ":16031",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- s, _ := c.Get(ID)
- ss := s.(*Service)
-
- s2, _ := c.Get(rpc.ID)
- rs := s2.(*rpc.Service)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Second)
-
- res, _, err := get("http://localhost:16031")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, strconv.Itoa(*ss.rr.Workers()[0].Pid), res)
-
- cl, err := rs.Client()
- assert.NoError(t, err)
-
- r := ""
- assert.NoError(t, cl.Call("http.Reset", true, &r))
- assert.Equal(t, "OK", r)
-
- res2, _, err := get("http://localhost:16031")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, strconv.Itoa(*ss.rr.Workers()[0].Pid), res2)
- assert.NotEqual(t, res, res2)
- c.Stop()
-}
-
-func Test_RPC_Unix(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rpc.ID, &rpc.Service{})
- c.Register(ID, &Service{})
-
- sock := `unix://` + os.TempDir() + `/rpc.unix`
- data, _ := json.Marshal(sock)
-
- assert.NoError(t, c.Init(&testCfg{
- rpcCfg: `{"enable":true, "listen":` + string(data) + `}`,
- httpCfg: `{
- "enable": true,
- "address": ":6032",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- s, _ := c.Get(ID)
- ss := s.(*Service)
-
- s2, _ := c.Get(rpc.ID)
- rs := s2.(*rpc.Service)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- res, _, err := get("http://localhost:6032")
- if err != nil {
- c.Stop()
- t.Fatal(err)
- }
- if ss.rr.Workers() != nil && len(ss.rr.Workers()) > 0 {
- assert.Equal(t, strconv.Itoa(*ss.rr.Workers()[0].Pid), res)
- } else {
- c.Stop()
- t.Fatal("no workers initialized")
- }
-
- cl, err := rs.Client()
- if err != nil {
- c.Stop()
- t.Fatal(err)
- }
-
- r := ""
- assert.NoError(t, cl.Call("http.Reset", true, &r))
- assert.Equal(t, "OK", r)
-
- res2, _, err := get("http://localhost:6032")
- if err != nil {
- c.Stop()
- t.Fatal(err)
- }
- assert.Equal(t, strconv.Itoa(*ss.rr.Workers()[0].Pid), res2)
- assert.NotEqual(t, res, res2)
- c.Stop()
-}
-
-func Test_Workers(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rpc.ID, &rpc.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- rpcCfg: `{"enable":true, "listen":"tcp://:5005"}`,
- httpCfg: `{
- "enable": true,
- "address": ":6033",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- s, _ := c.Get(ID)
- ss := s.(*Service)
-
- s2, _ := c.Get(rpc.ID)
- rs := s2.(*rpc.Service)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- cl, err := rs.Client()
- assert.NoError(t, err)
-
- r := &WorkerList{}
- assert.NoError(t, cl.Call("http.Workers", true, &r))
- assert.Len(t, r.Workers, 1)
-
- assert.Equal(t, *ss.rr.Workers()[0].Pid, r.Workers[0].Pid)
- c.Stop()
-}
-
-func Test_Errors(t *testing.T) {
- r := &rpcServer{nil}
-
- assert.Error(t, r.Reset(true, nil))
- assert.Error(t, r.Workers(true, nil))
-}
diff --git a/service/http/service.go b/service/http/service.go
deleted file mode 100644
index 7a175dcb..00000000
--- a/service/http/service.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package http
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/fcgi"
- "net/url"
- "strings"
- "sync"
-
- "github.com/sirupsen/logrus"
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service/env"
- "github.com/spiral/roadrunner/service/http/attributes"
- "github.com/spiral/roadrunner/service/rpc"
- "github.com/spiral/roadrunner/util"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/h2c"
- "golang.org/x/sys/cpu"
-)
-
-const (
- // ID contains default service name.
- ID = "http"
-
- // EventInitSSL thrown at moment of https initialization. SSL server passed as context.
- EventInitSSL = 750
-)
-
-var couldNotAppendPemError = errors.New("could not append Certs from PEM")
-
-// http middleware type.
-type middleware func(f http.HandlerFunc) http.HandlerFunc
-
-// Service manages rr, http servers.
-type Service struct {
- sync.Mutex
- sync.WaitGroup
-
- cfg *Config
- log *logrus.Logger
- cprod roadrunner.CommandProducer
- env env.Environment
- lsns []func(event int, ctx interface{})
- mdwr []middleware
-
- rr *roadrunner.Server
- controller roadrunner.Controller
- handler *Handler
-
- http *http.Server
- https *http.Server
- fcgi *http.Server
-}
-
-// Attach attaches controller. Currently only one controller is supported.
-func (s *Service) Attach(w roadrunner.Controller) {
- s.controller = w
-}
-
-// ProduceCommands changes the default command generator method
-func (s *Service) ProduceCommands(producer roadrunner.CommandProducer) {
- s.cprod = producer
-}
-
-// AddMiddleware adds new net/http mdwr.
-func (s *Service) AddMiddleware(m middleware) {
- s.mdwr = append(s.mdwr, m)
-}
-
-// AddListener attaches server event controller.
-func (s *Service) AddListener(l func(event int, ctx interface{})) {
- s.lsns = append(s.lsns, l)
-}
-
-// Init must return configure svc and return true if svc hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (s *Service) Init(cfg *Config, r *rpc.Service, e env.Environment, log *logrus.Logger) (bool, error) {
- s.cfg = cfg
- s.log = log
- s.env = e
-
- if r != nil {
- if err := r.Register(ID, &rpcServer{s}); err != nil {
- return false, err
- }
- }
-
- if !cfg.EnableHTTP() && !cfg.EnableTLS() && !cfg.EnableFCGI() {
- return false, nil
- }
-
- return true, nil
-}
-
-// Serve serves the svc.
-func (s *Service) Serve() error {
- s.Lock()
-
- if s.env != nil {
- if err := s.env.Copy(s.cfg.Workers); err != nil {
- return nil
- }
- }
-
- s.cfg.Workers.CommandProducer = s.cprod
- s.cfg.Workers.SetEnv("RR_HTTP", "true")
-
- s.rr = roadrunner.NewServer(s.cfg.Workers)
- s.rr.Listen(s.throw)
-
- if s.controller != nil {
- s.rr.Attach(s.controller)
- }
-
- s.handler = &Handler{
- cfg: s.cfg,
- rr: s.rr,
- internalErrorCode: s.cfg.InternalErrorCode,
- appErrorCode: s.cfg.AppErrorCode,
- }
- s.handler.Listen(s.throw)
-
- if s.cfg.EnableHTTP() {
- if s.cfg.EnableH2C() {
- s.http = &http.Server{Addr: s.cfg.Address, Handler: h2c.NewHandler(s, &http2.Server{})}
- } else {
- s.http = &http.Server{Addr: s.cfg.Address, Handler: s}
- }
- }
-
- if s.cfg.EnableTLS() {
- s.https = s.initSSL()
- if s.cfg.SSL.RootCA != "" {
- err := s.appendRootCa()
- if err != nil {
- return err
- }
- }
-
- if s.cfg.EnableHTTP2() {
- if err := s.initHTTP2(); err != nil {
- return err
- }
- }
- }
-
- if s.cfg.EnableFCGI() {
- s.fcgi = &http.Server{Handler: s}
- }
-
- s.Unlock()
-
- if err := s.rr.Start(); err != nil {
- return err
- }
- defer s.rr.Stop()
-
- err := make(chan error, 3)
-
- if s.http != nil {
- go func() {
- httpErr := s.http.ListenAndServe()
- if httpErr != nil && httpErr != http.ErrServerClosed {
- err <- httpErr
- } else {
- err <- nil
- }
- }()
- }
-
- if s.https != nil {
- go func() {
- httpErr := s.https.ListenAndServeTLS(
- s.cfg.SSL.Cert,
- s.cfg.SSL.Key,
- )
-
- if httpErr != nil && httpErr != http.ErrServerClosed {
- err <- httpErr
- return
- }
- err <- nil
- }()
- }
-
- if s.fcgi != nil {
- go func() {
- httpErr := s.serveFCGI()
- if httpErr != nil && httpErr != http.ErrServerClosed {
- err <- httpErr
- return
- }
- err <- nil
- }()
- }
- return <-err
-}
-
-// Stop stops the http.
-func (s *Service) Stop() {
- s.Lock()
- defer s.Unlock()
-
- if s.fcgi != nil {
- s.Add(1)
- go func() {
- defer s.Done()
- err := s.fcgi.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- // Stop() error
- // push error from goroutines to the channel and block unil error or success shutdown or timeout
- s.log.Error(fmt.Errorf("error shutting down the fcgi server, error: %v", err))
- return
- }
- }()
- }
-
- if s.https != nil {
- s.Add(1)
- go func() {
- defer s.Done()
- err := s.https.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- s.log.Error(fmt.Errorf("error shutting down the https server, error: %v", err))
- return
- }
- }()
- }
-
- if s.http != nil {
- s.Add(1)
- go func() {
- defer s.Done()
- err := s.http.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- s.log.Error(fmt.Errorf("error shutting down the http server, error: %v", err))
- return
- }
- }()
- }
-
- s.Wait()
-}
-
-// Server returns associated rr server (if any).
-func (s *Service) Server() *roadrunner.Server {
- s.Lock()
- defer s.Unlock()
-
- return s.rr
-}
-
-// ServeHTTP handles connection using set of middleware and rr PSR-7 server.
-func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if s.https != nil && r.TLS == nil && s.cfg.SSL.Redirect {
- target := &url.URL{
- Scheme: "https",
- Host: s.tlsAddr(r.Host, false),
- Path: r.URL.Path,
- RawQuery: r.URL.RawQuery,
- }
-
- http.Redirect(w, r, target.String(), http.StatusTemporaryRedirect)
- return
- }
-
- if s.https != nil && r.TLS != nil {
- w.Header().Add("Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload")
- }
-
- r = attributes.Init(r)
-
- // chaining middleware
- f := s.handler.ServeHTTP
- for _, m := range s.mdwr {
- f = m(f)
- }
- f(w, r)
-}
-
-// append RootCA to the https server TLS config
-func (s *Service) appendRootCa() error {
- rootCAs, err := x509.SystemCertPool()
- if err != nil {
- s.throw(EventInitSSL, nil)
- return nil
- }
- if rootCAs == nil {
- rootCAs = x509.NewCertPool()
- }
-
- CA, err := ioutil.ReadFile(s.cfg.SSL.RootCA)
- if err != nil {
- s.throw(EventInitSSL, nil)
- return err
- }
-
- // should append our CA cert
- ok := rootCAs.AppendCertsFromPEM(CA)
- if !ok {
- return couldNotAppendPemError
- }
- config := &tls.Config{
- InsecureSkipVerify: false,
- RootCAs: rootCAs,
- }
- s.http.TLSConfig = config
-
- return nil
-}
-
-// Init https server
-func (s *Service) initSSL() *http.Server {
- var topCipherSuites []uint16
- var defaultCipherSuitesTLS13 []uint16
-
- hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
-
- if hasGCMAsm {
- // If AES-GCM hardware is provided then prioritise AES-GCM
- // cipher suites.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- } else {
- // Without AES-GCM hardware, we put the ChaCha20-Poly1305
- // cipher suites first.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- }
-
- DefaultCipherSuites := make([]uint16, 0, 22)
- DefaultCipherSuites = append(DefaultCipherSuites, topCipherSuites...)
- DefaultCipherSuites = append(DefaultCipherSuites, defaultCipherSuitesTLS13...)
-
- server := &http.Server{
- Addr: s.tlsAddr(s.cfg.Address, true),
- Handler: s,
- TLSConfig: &tls.Config{
- CurvePreferences: []tls.CurveID{
- tls.CurveP256,
- tls.CurveP384,
- tls.CurveP521,
- tls.X25519,
- },
- CipherSuites: DefaultCipherSuites,
- MinVersion: tls.VersionTLS12,
- PreferServerCipherSuites: true,
- },
- }
- s.throw(EventInitSSL, server)
-
- return server
-}
-
-// init http/2 server
-func (s *Service) initHTTP2() error {
- return http2.ConfigureServer(s.https, &http2.Server{
- MaxConcurrentStreams: s.cfg.HTTP2.MaxConcurrentStreams,
- })
-}
-
-// serveFCGI starts FastCGI server.
-func (s *Service) serveFCGI() error {
- l, err := util.CreateListener(s.cfg.FCGI.Address)
- if err != nil {
- return err
- }
-
- err = fcgi.Serve(l, s.fcgi.Handler)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// throw handles service, server and pool events.
-func (s *Service) throw(event int, ctx interface{}) {
- for _, l := range s.lsns {
- l(event, ctx)
- }
-
- if event == roadrunner.EventServerFailure {
- // underlying rr server is dead
- s.Stop()
- }
-}
-
-// tlsAddr replaces listen or host port with port configured by SSL config.
-func (s *Service) tlsAddr(host string, forcePort bool) string {
- // remove current forcePort first
- host = strings.Split(host, ":")[0]
-
- if forcePort || s.cfg.SSL.Port != 443 {
- host = fmt.Sprintf("%s:%v", host, s.cfg.SSL.Port)
- }
-
- return host
-}
diff --git a/service/http/service_test.go b/service/http/service_test.go
deleted file mode 100644
index 960bc513..00000000
--- a/service/http/service_test.go
+++ /dev/null
@@ -1,757 +0,0 @@
-package http
-
-import (
- "io/ioutil"
- "net/http"
- "os"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff/v4"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/env"
- "github.com/spiral/roadrunner/service/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- httpCfg string
- rpcCfg string
- envCfg string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == ID {
- if cfg.httpCfg == "" {
- return nil
- }
-
- return &testCfg{target: cfg.httpCfg}
- }
-
- if name == rpc.ID {
- return &testCfg{target: cfg.rpcCfg}
- }
-
- if name == env.ID {
- return &testCfg{target: cfg.envCfg}
- }
-
- return nil
-}
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- return json.Unmarshal([]byte(cfg.target), out)
-}
-
-func Test_Service_NoConfig(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{"Enable":true}`})
- assert.Error(t, err)
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusInactive, st)
-}
-
-func Test_Service_Configure_Disable(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusInactive, st)
-}
-
-func Test_Service_Configure_Enable(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":8070",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-
-func Test_Service_Echo(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6536",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 100)
-
- req, err := http.NewRequest("GET", "http://localhost:6536?hello=world", nil)
- if err != nil {
- c.Stop()
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- return err
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- c.Stop()
- return err
- }
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- if err != nil {
- c.Stop()
- return err
- }
-
- c.Stop()
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Env(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(env.ID, env.NewService(map[string]string{"rr": "test"}))
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":10031",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php env pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`, envCfg: `{"env_key":"ENV_VALUE"}`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "http://localhost:10031", nil)
- if err != nil {
- c.Stop()
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- c.Stop()
- return err
- }
-
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "ENV_VALUE", string(b))
-
- err = r.Body.Close()
- if err != nil {
- c.Stop()
- return err
- }
-
- c.Stop()
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-
-func Test_Service_ErrorEcho(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6030",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echoerr pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- goterr := make(chan interface{})
- s.(*Service).AddListener(func(event int, ctx interface{}) {
- if event == roadrunner.EventStderrOutput {
- if string(ctx.([]byte)) == "WORLD\n" {
- goterr <- nil
- }
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "http://localhost:6030?hello=world", nil)
- if err != nil {
- c.Stop()
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- c.Stop()
- return err
- }
-
- <-goterr
-
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
- err = r.Body.Close()
- if err != nil {
- c.Stop()
- return err
- }
-
- c.Stop()
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Middleware(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6032",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- s.(*Service).AddMiddleware(func(f http.HandlerFunc) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/halt" {
- w.WriteHeader(500)
- _, err := w.Write([]byte("halted"))
- if err != nil {
- t.Errorf("error writing the data to the http reply: error %v", err)
- }
- } else {
- f(w, r)
- }
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "http://localhost:6032?hello=world", nil)
- if err != nil {
- c.Stop()
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- c.Stop()
- return err
- }
-
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- if err != nil {
- c.Stop()
- return err
- }
-
- req, err = http.NewRequest("GET", "http://localhost:6032/halt", nil)
- if err != nil {
- c.Stop()
- return err
- }
-
- r, err = http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- return err
- }
- b, err = ioutil.ReadAll(r.Body)
- if err != nil {
- c.Stop()
- return err
- }
-
- assert.Equal(t, 500, r.StatusCode)
- assert.Equal(t, "halted", string(b))
-
- err = r.Body.Close()
- if err != nil {
- c.Stop()
- return err
- }
- c.Stop()
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-
-func Test_Service_Listener(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6033",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- stop := make(chan interface{})
- s.(*Service).AddListener(func(event int, ctx interface{}) {
- if event == roadrunner.EventServerStart {
- stop <- nil
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- c.Stop()
- assert.True(t, true)
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Error(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6034",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "---",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- // assert error
- err = c.Serve()
- if err == nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Error2(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6035",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php broken pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- if err != nil {
- return err
- }
-
- // assert error
- err = c.Serve()
- if err == nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Error3(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": ":6036",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers"
- "command": "php ../../tests/http/client.php broken pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- // assert error
- if err == nil {
- return err
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-
-func Test_Service_Error4(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{httpCfg: `{
- "enable": true,
- "address": "----",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php broken pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`})
- // assert error
- if err != nil {
- return nil
- }
-
- return err
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func tmpDir() string {
- p := os.TempDir()
- r, _ := json.Marshal(p)
-
- return string(r)
-}
diff --git a/service/http/ssl_test.go b/service/http/ssl_test.go
deleted file mode 100644
index 8078a3a7..00000000
--- a/service/http/ssl_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package http
-
-import (
- "crypto/tls"
- "io/ioutil"
- "net/http"
- "testing"
- "time"
-
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-var sslClient = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- },
-}
-
-func Test_SSL_Service_Echo(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{httpCfg: `{
- "address": ":6029",
- "ssl": {
- "port": 6900,
- "key": "fixtures/server.key",
- "cert": "fixtures/server.crt"
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "https://localhost:6900?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
-
- c.Stop()
-}
-
-func Test_SSL_Service_NoRedirect(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{httpCfg: `{
- "address": ":6030",
- "ssl": {
- "port": 6901,
- "key": "fixtures/server.key",
- "cert": "fixtures/server.crt"
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "http://localhost:6030?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
-
- assert.Nil(t, r.TLS)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
- c.Stop()
-}
-
-func Test_SSL_Service_Redirect(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{httpCfg: `{
- "address": ":6831",
- "ssl": {
- "port": 6902,
- "redirect": true,
- "key": "fixtures/server.key",
- "cert": "fixtures/server.crt"
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "pool": {"numWorkers": 1}
- }
- }`}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "http://localhost:6831?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
- assert.NotNil(t, r.TLS)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
- c.Stop()
-}
-
-func Test_SSL_Service_Push(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{httpCfg: `{
- "address": ":6032",
- "ssl": {
- "port": 6903,
- "redirect": true,
- "key": "fixtures/server.key",
- "cert": "fixtures/server.crt"
- },
- "workers":{
- "command": "php ../../tests/http/client.php push pipes",
- "pool": {"numWorkers": 1}
- }
- }`}))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusOK, st)
-
- // should do nothing
- s.(*Service).Stop()
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "https://localhost:6903?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
-
- assert.NotNil(t, r.TLS)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, "", r.Header.Get("Http2-Push"))
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
- c.Stop()
-}
diff --git a/service/limit/config.go b/service/limit/config.go
deleted file mode 100644
index 7a56280d..00000000
--- a/service/limit/config.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package limit
-
-import (
- "time"
-
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
-)
-
-// Config of Limit service.
-type Config struct {
- // Interval defines the update duration for underlying controllers, default 1s.
- Interval time.Duration
-
- // Services declares list of services to be watched.
- Services map[string]*controllerConfig
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
-
- // Always use second based definition for time durations
- if c.Interval < time.Microsecond {
- c.Interval = time.Second * time.Duration(c.Interval.Nanoseconds())
- }
-
- return nil
-}
-
-// InitDefaults sets missing values to their default values.
-func (c *Config) InitDefaults() error {
- c.Interval = time.Second
-
- return nil
-}
-
-// Controllers returns list of defined Services
-func (c *Config) Controllers(l listener) (controllers map[string]roadrunner.Controller) {
- controllers = make(map[string]roadrunner.Controller)
-
- for name, cfg := range c.Services {
- controllers[name] = &controller{lsn: l, tick: c.Interval, cfg: cfg}
- }
-
- return controllers
-}
diff --git a/service/limit/config_test.go b/service/limit/config_test.go
deleted file mode 100644
index 1f121bc5..00000000
--- a/service/limit/config_test.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package limit
-
-import (
- "testing"
- "time"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate_Error1(t *testing.T) {
- cfg := &mockCfg{`{"enable: true}`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Controller_Default(t *testing.T) {
- cfg := &mockCfg{`
-{
- "services":{
- "http": {
- "TTL": 1
- }
- }
-}
-`}
- c := &Config{}
- err := c.InitDefaults()
- if err != nil {
- t.Errorf("failed to InitDefaults: error %v", err)
- }
-
- assert.NoError(t, c.Hydrate(cfg))
- assert.Equal(t, time.Second, c.Interval)
-
- list := c.Controllers(func(event int, ctx interface{}) {
- })
-
- sc := list["http"]
-
- assert.Equal(t, time.Second, sc.(*controller).tick)
-}
diff --git a/service/limit/controller.go b/service/limit/controller.go
deleted file mode 100644
index b4a1c25f..00000000
--- a/service/limit/controller.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package limit
-
-import (
- "fmt"
- "time"
-
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/util"
-)
-
-const (
- // EventMaxMemory caused when worker consumes more memory than allowed.
- EventMaxMemory = iota + 8000
-
- // EventTTL thrown when worker is removed due TTL being reached. Context is rr.WorkerError
- EventTTL
-
- // EventIdleTTL triggered when worker spends too much time at rest.
- EventIdleTTL
-
- // EventExecTTL triggered when worker spends too much time doing the task (max_execution_time).
- EventExecTTL
-)
-
-// handles controller events
-type listener func(event int, ctx interface{})
-
-// defines the controller behaviour
-type controllerConfig struct {
- // MaxMemory defines maximum amount of memory allowed for worker. In megabytes.
- MaxMemory uint64
-
- // TTL defines maximum time worker is allowed to live.
- TTL int64
-
- // IdleTTL defines maximum duration worker can spend in idle mode.
- IdleTTL int64
-
- // ExecTTL defines maximum lifetime per job.
- ExecTTL int64
-}
-
-type controller struct {
- lsn listener
- tick time.Duration
- cfg *controllerConfig
-
- // list of workers which are currently working
- sw *stateFilter
-
- stop chan interface{}
-}
-
-// control the pool state
-func (c *controller) control(p roadrunner.Pool) {
- c.loadWorkers(p)
-
- now := time.Now()
-
- if c.cfg.ExecTTL != 0 {
- for _, w := range c.sw.find(
- roadrunner.StateWorking,
- now.Add(-time.Second*time.Duration(c.cfg.ExecTTL)),
- ) {
- eID := w.State().NumExecs()
- err := fmt.Errorf("max exec time reached (%vs)", c.cfg.ExecTTL)
-
- // make sure worker still on initial request
- if p.Remove(w, err) && w.State().NumExecs() == eID {
- go func() {
- err := w.Kill()
- if err != nil {
- fmt.Printf("error killing worker with PID number: %d, created: %s", w.Pid, w.Created)
- }
- }()
- c.report(EventExecTTL, w, err)
- }
- }
- }
-
- // locale workers which are in idle mode for too long
- if c.cfg.IdleTTL != 0 {
- for _, w := range c.sw.find(
- roadrunner.StateReady,
- now.Add(-time.Second*time.Duration(c.cfg.IdleTTL)),
- ) {
- err := fmt.Errorf("max idle time reached (%vs)", c.cfg.IdleTTL)
- if p.Remove(w, err) {
- c.report(EventIdleTTL, w, err)
- }
- }
- }
-}
-
-func (c *controller) loadWorkers(p roadrunner.Pool) {
- now := time.Now()
-
- for _, w := range p.Workers() {
- if w.State().Value() == roadrunner.StateInvalid {
- // skip duplicate assessment
- continue
- }
-
- s, err := util.WorkerState(w)
- if err != nil {
- continue
- }
-
- if c.cfg.TTL != 0 && now.Sub(w.Created).Seconds() >= float64(c.cfg.TTL) {
- err := fmt.Errorf("max TTL reached (%vs)", c.cfg.TTL)
- if p.Remove(w, err) {
- c.report(EventTTL, w, err)
- }
- continue
- }
-
- if c.cfg.MaxMemory != 0 && s.MemoryUsage >= c.cfg.MaxMemory*1024*1024 {
- err := fmt.Errorf("max allowed memory reached (%vMB)", c.cfg.MaxMemory)
- if p.Remove(w, err) {
- c.report(EventMaxMemory, w, err)
- }
- continue
- }
-
- // control the worker state changes
- c.sw.push(w)
- }
-
- c.sw.sync(now)
-}
-
-// throw controller event
-func (c *controller) report(event int, worker *roadrunner.Worker, caused error) {
- if c.lsn != nil {
- c.lsn(event, roadrunner.WorkerError{Worker: worker, Caused: caused})
- }
-}
-
-// Attach controller to the pool
-func (c *controller) Attach(pool roadrunner.Pool) roadrunner.Controller {
- wp := &controller{
- tick: c.tick,
- lsn: c.lsn,
- cfg: c.cfg,
- sw: newStateFilter(),
- stop: make(chan interface{}),
- }
-
- go func(wp *controller, pool roadrunner.Pool) {
- ticker := time.NewTicker(wp.tick)
- for {
- select {
- case <-ticker.C:
- wp.control(pool)
- case <-wp.stop:
- return
- }
- }
- }(wp, pool)
-
- return wp
-}
-
-// Detach controller from the pool.
-func (c *controller) Detach() {
- close(c.stop)
-}
diff --git a/service/limit/service.go b/service/limit/service.go
deleted file mode 100644
index c0b4139c..00000000
--- a/service/limit/service.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package limit
-
-import (
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
-)
-
-// ID defines controller service name.
-const ID = "limit"
-
-// Service to control the state of rr service inside other services.
-type Service struct {
- lsns []func(event int, ctx interface{})
-}
-
-// Init controller service
-func (s *Service) Init(cfg *Config, c service.Container) (bool, error) {
- // mount Services to designated services
- for id, watcher := range cfg.Controllers(s.throw) {
- svc, _ := c.Get(id)
- if ctrl, ok := svc.(roadrunner.Attacher); ok {
- ctrl.Attach(watcher)
- }
- }
-
- return true, nil
-}
-
-// AddListener attaches server event controller.
-func (s *Service) AddListener(l func(event int, ctx interface{})) {
- s.lsns = append(s.lsns, l)
-}
-
-// throw handles service, server and pool events.
-func (s *Service) throw(event int, ctx interface{}) {
- for _, l := range s.lsns {
- l(event, ctx)
- }
-}
diff --git a/service/limit/service_test.go b/service/limit/service_test.go
deleted file mode 100644
index fae73166..00000000
--- a/service/limit/service_test.go
+++ /dev/null
@@ -1,498 +0,0 @@
-package limit
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "testing"
- "time"
-
- "github.com/cenkalti/backoff/v4"
- json "github.com/json-iterator/go"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- rrhttp "github.com/spiral/roadrunner/service/http"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- httpCfg string
- limitCfg string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == rrhttp.ID {
- if cfg.httpCfg == "" {
- return nil
- }
-
- return &testCfg{target: cfg.httpCfg}
- }
-
- if name == ID {
- return &testCfg{target: cfg.limitCfg}
- }
-
- return nil
-}
-
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- err := j.Unmarshal([]byte(cfg.target), out)
-
- if cl, ok := out.(*Config); ok {
- // to speed up tests
- cl.Interval = time.Millisecond
- }
-
- return err
-}
-
-func Test_Service_PidEcho(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{
- httpCfg: `{
- "address": ":27029",
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- limitCfg: `{
- "services": {
- "http": {
- "ttl": 1
- }
- }
- }`,
- })
- if err != nil {
- return err
- }
-
- s, _ := c.Get(rrhttp.ID)
- assert.NotNil(t, s)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 800)
- req, err := http.NewRequest("GET", "http://localhost:27029", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
-
- assert.Equal(t, getPID(s), string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("error during the body closing: error %v", err2)
- }
- c.Stop()
- return nil
-
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-
-func Test_Service_ListenerPlusTTL(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{
- httpCfg: `{
- "address": ":7030",
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- limitCfg: `{
- "services": {
- "http": {
- "ttl": 1
- }
- }
- }`,
- })
- if err != nil {
- return err
- }
-
- s, _ := c.Get(rrhttp.ID)
- assert.NotNil(t, s)
-
- l, _ := c.Get(ID)
- captured := make(chan interface{})
- l.(*Service).AddListener(func(event int, ctx interface{}) {
- if event == EventTTL {
- close(captured)
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 100)
-
- lastPID := getPID(s)
-
- req, err := http.NewRequest("GET", "http://localhost:7030", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
- assert.Equal(t, lastPID, string(b))
-
- <-captured
-
- // clean state
- req, err = http.NewRequest("GET", "http://localhost:7030?new", nil)
- if err != nil {
- return err
- }
-
- _, err = http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- assert.NotEqual(t, lastPID, getPID(s))
-
- c.Stop()
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("error during the body closing: error %v", err2)
- }
-
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-
-func Test_Service_ListenerPlusIdleTTL(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{
- httpCfg: `{
- "address": ":7031",
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- limitCfg: `{
- "services": {
- "http": {
- "idleTtl": 1
- }
- }
- }`,
- })
- if err != nil {
- return err
- }
-
- s, _ := c.Get(rrhttp.ID)
- assert.NotNil(t, s)
-
- l, _ := c.Get(ID)
- captured := make(chan interface{})
- l.(*Service).AddListener(func(event int, ctx interface{}) {
- if event == EventIdleTTL {
- close(captured)
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 100)
-
- lastPID := getPID(s)
-
- req, err := http.NewRequest("GET", "http://localhost:7031", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return err
- }
- assert.Equal(t, lastPID, string(b))
-
- <-captured
-
- // clean state
- req, err = http.NewRequest("GET", "http://localhost:7031?new", nil)
- if err != nil {
- return err
- }
-
- _, err = http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
-
- assert.NotEqual(t, lastPID, getPID(s))
-
- c.Stop()
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("error during the body closing: error %v", err2)
- }
- return nil
- }, bkoff)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Listener_MaxExecTTL(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
-
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{
- httpCfg: `{
- "address": ":7032",
- "workers":{
- "command": "php ../../tests/http/client.php stuck pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- limitCfg: `{
- "services": {
- "http": {
- "execTTL": 1
- }
- }
- }`,
- })
- if err != nil {
- return err
- }
-
- s, _ := c.Get(rrhttp.ID)
- assert.NotNil(t, s)
-
- l, _ := c.Get(ID)
- captured := make(chan interface{})
- l.(*Service).AddListener(func(event int, ctx interface{}) {
- if event == EventExecTTL {
- close(captured)
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 100)
-
- req, err := http.NewRequest("GET", "http://localhost:7032", nil)
- if err != nil {
- return err
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- assert.Equal(t, 500, r.StatusCode)
-
- <-captured
-
- c.Stop()
- return nil
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Service_Listener_MaxMemoryUsage(t *testing.T) {
- bkoff := backoff.NewExponentialBackOff()
- bkoff.MaxElapsedTime = time.Second * 15
-
- err := backoff.Retry(func() error {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- err := c.Init(&testCfg{
- httpCfg: `{
- "address": ":10033",
- "workers":{
- "command": "php ../../tests/http/client.php memleak pipes",
- "pool": {"numWorkers": 1}
- }
- }`,
- limitCfg: `{
- "services": {
- "http": {
- "maxMemory": 10
- }
- }
- }`,
- })
- if err != nil {
- return err
- }
-
- time.Sleep(time.Second * 3)
- s, _ := c.Get(rrhttp.ID)
- assert.NotNil(t, s)
-
- l, _ := c.Get(ID)
- captured := make(chan interface{})
- once := false
- l.(*Service).AddListener(func(event int, ctx interface{}) {
- if event == EventMaxMemory && !once {
- close(captured)
- once = true
- }
- })
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- lastPID := getPID(s)
-
- req, err := http.NewRequest("GET", "http://localhost:10033", nil)
- if err != nil {
- return err
- }
-
- for {
- select {
- case <-captured:
- _, err := http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- t.Errorf("error during sending the http request: error %v", err)
- }
- assert.NotEqual(t, lastPID, getPID(s))
- c.Stop()
- return nil
- default:
- _, err := http.DefaultClient.Do(req)
- if err != nil {
- c.Stop()
- t.Errorf("error during sending the http request: error %v", err)
- }
- c.Stop()
- return nil
- }
- }
- }, bkoff)
-
- if err != nil {
- t.Fatal(err)
- }
-
-}
-func getPID(s interface{}) string {
- if len(s.(*rrhttp.Service).Server().Workers()) > 0 {
- w := s.(*rrhttp.Service).Server().Workers()[0]
- return fmt.Sprintf("%v", *w.Pid)
- } else {
- panic("no workers")
- }
-}
diff --git a/service/limit/state_filter.go b/service/limit/state_filter.go
deleted file mode 100644
index 4e05769a..00000000
--- a/service/limit/state_filter.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package limit
-
-import (
- "time"
-
- "github.com/spiral/roadrunner"
-)
-
-type stateFilter struct {
- prev map[*roadrunner.Worker]state
- next map[*roadrunner.Worker]state
-}
-
-type state struct {
- state int64
- numExecs int64
- since time.Time
-}
-
-func newStateFilter() *stateFilter {
- return &stateFilter{
- prev: make(map[*roadrunner.Worker]state),
- next: make(map[*roadrunner.Worker]state),
- }
-}
-
-// add new worker to be watched
-func (sw *stateFilter) push(w *roadrunner.Worker) {
- sw.next[w] = state{state: w.State().Value(), numExecs: w.State().NumExecs()}
-}
-
-// update worker states.
-func (sw *stateFilter) sync(t time.Time) {
- for w := range sw.prev {
- if _, ok := sw.next[w]; !ok {
- delete(sw.prev, w)
- }
- }
-
- for w, s := range sw.next {
- ps, ok := sw.prev[w]
- if !ok || ps.state != s.state || ps.numExecs != s.numExecs {
- sw.prev[w] = state{state: s.state, numExecs: s.numExecs, since: t}
- }
-
- delete(sw.next, w)
- }
-}
-
-// find all workers which spend given amount of time in a specific state.
-func (sw *stateFilter) find(state int64, since time.Time) (workers []*roadrunner.Worker) {
- for w, s := range sw.prev {
- if s.state == state && s.since.Before(since) {
- workers = append(workers, w)
- }
- }
-
- return
-}
diff --git a/service/metrics/rpc.go b/service/metrics/rpc.go
deleted file mode 100644
index 0544d109..00000000
--- a/service/metrics/rpc.go
+++ /dev/null
@@ -1,263 +0,0 @@
-package metrics
-
-import (
- "fmt"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type rpcServer struct {
- svc *Service
-}
-
-// Metric represent single metric produced by the application.
-type Metric struct {
- // Collector name.
- Name string
-
- // Collector value.
- Value float64
-
- // Labels associated with metric. Only for vector metrics. Must be provided in a form of label values.
- Labels []string
-}
-
-// Add new metric to the designated collector.
-func (rpc *rpcServer) Add(m *Metric, ok *bool) (err error) {
- defer func() {
- if r, fail := recover().(error); fail {
- err = r
- }
- }()
-
- c := rpc.svc.Collector(m.Name)
- if c == nil {
- return fmt.Errorf("undefined collector `%s`", m.Name)
- }
-
- switch c := c.(type) {
- case prometheus.Gauge:
- c.Add(m.Value)
-
- case *prometheus.GaugeVec:
- if len(m.Labels) == 0 {
- return fmt.Errorf("required labels for collector `%s`", m.Name)
- }
-
- c.WithLabelValues(m.Labels...).Add(m.Value)
-
- case prometheus.Counter:
- c.Add(m.Value)
-
- case *prometheus.CounterVec:
- if len(m.Labels) == 0 {
- return fmt.Errorf("required labels for collector `%s`", m.Name)
- }
-
- c.WithLabelValues(m.Labels...).Add(m.Value)
-
- default:
- return fmt.Errorf("collector `%s` does not support method `Add`", m.Name)
- }
-
- // RPC, set ok to true as return value. Need by rpc.Call reply argument
- *ok = true
- return nil
-}
-
-// Sub subtract the value from the specific metric (gauge only).
-func (rpc *rpcServer) Sub(m *Metric, ok *bool) (err error) {
- defer func() {
- if r, fail := recover().(error); fail {
- err = r
- }
- }()
-
- c := rpc.svc.Collector(m.Name)
- if c == nil {
- return fmt.Errorf("undefined collector `%s`", m.Name)
- }
-
- switch c := c.(type) {
- case prometheus.Gauge:
- c.Sub(m.Value)
-
- case *prometheus.GaugeVec:
- if len(m.Labels) == 0 {
- return fmt.Errorf("required labels for collector `%s`", m.Name)
- }
-
- c.WithLabelValues(m.Labels...).Sub(m.Value)
- default:
- return fmt.Errorf("collector `%s` does not support method `Sub`", m.Name)
- }
-
- // RPC, set ok to true as return value. Need by rpc.Call reply argument
- *ok = true
- return nil
-}
-
-// Observe the value (histogram and summary only).
-func (rpc *rpcServer) Observe(m *Metric, ok *bool) (err error) {
- defer func() {
- if r, fail := recover().(error); fail {
- err = r
- }
- }()
-
- c := rpc.svc.Collector(m.Name)
- if c == nil {
- return fmt.Errorf("undefined collector `%s`", m.Name)
- }
-
- switch c := c.(type) {
- case *prometheus.SummaryVec:
- if len(m.Labels) == 0 {
- return fmt.Errorf("required labels for collector `%s`", m.Name)
- }
-
- c.WithLabelValues(m.Labels...).Observe(m.Value)
-
- case prometheus.Histogram:
- c.Observe(m.Value)
-
- case *prometheus.HistogramVec:
- if len(m.Labels) == 0 {
- return fmt.Errorf("required labels for collector `%s`", m.Name)
- }
-
- c.WithLabelValues(m.Labels...).Observe(m.Value)
- default:
- return fmt.Errorf("collector `%s` does not support method `Observe`", m.Name)
- }
-
- // RPC, set ok to true as return value. Need by rpc.Call reply argument
- *ok = true
- return nil
-}
-
-// Declare is used to register new collector in prometheus
-// THE TYPES ARE:
-// NamedCollector -> Collector with the name
-// bool -> RPC reply value
-// RETURNS:
-// error
-func (rpc *rpcServer) Declare(c *NamedCollector, ok *bool) (err error) {
- // MustRegister could panic, so, to return error and not shutdown whole app
- // we recover and return error
- defer func() {
- if r, fail := recover().(error); fail {
- err = r
- }
- }()
-
- if rpc.svc.Collector(c.Name) != nil {
- *ok = false
- // alternative is to return error
- // fmt.Errorf("tried to register existing collector with the name `%s`", c.Name)
- return nil
- }
-
- var collector prometheus.Collector
- switch c.Type {
- case Histogram:
- opts := prometheus.HistogramOpts{
- Name: c.Name,
- Namespace: c.Namespace,
- Subsystem: c.Subsystem,
- Help: c.Help,
- Buckets: c.Buckets,
- }
-
- if len(c.Labels) != 0 {
- collector = prometheus.NewHistogramVec(opts, c.Labels)
- } else {
- collector = prometheus.NewHistogram(opts)
- }
- case Gauge:
- opts := prometheus.GaugeOpts{
- Name: c.Name,
- Namespace: c.Namespace,
- Subsystem: c.Subsystem,
- Help: c.Help,
- }
-
- if len(c.Labels) != 0 {
- collector = prometheus.NewGaugeVec(opts, c.Labels)
- } else {
- collector = prometheus.NewGauge(opts)
- }
- case Counter:
- opts := prometheus.CounterOpts{
- Name: c.Name,
- Namespace: c.Namespace,
- Subsystem: c.Subsystem,
- Help: c.Help,
- }
-
- if len(c.Labels) != 0 {
- collector = prometheus.NewCounterVec(opts, c.Labels)
- } else {
- collector = prometheus.NewCounter(opts)
- }
- case Summary:
- opts := prometheus.SummaryOpts{
- Name: c.Name,
- Namespace: c.Namespace,
- Subsystem: c.Subsystem,
- Help: c.Help,
- Objectives: c.Objectives,
- }
-
- if len(c.Labels) != 0 {
- collector = prometheus.NewSummaryVec(opts, c.Labels)
- } else {
- collector = prometheus.NewSummary(opts)
- }
-
- default:
- return fmt.Errorf("unknown collector type `%s`", c.Type)
-
- }
-
- // add collector to sync.Map
- rpc.svc.collectors.Store(c.Name, collector)
- // that method might panic, we handle it by recover
- rpc.svc.MustRegister(collector)
-
- *ok = true
- return nil
-}
-
-// Set the metric value (only for gaude).
-func (rpc *rpcServer) Set(m *Metric, ok *bool) (err error) {
- defer func() {
- if r, fail := recover().(error); fail {
- err = r
- }
- }()
-
- c := rpc.svc.Collector(m.Name)
- if c == nil {
- return fmt.Errorf("undefined collector `%s`", m.Name)
- }
-
- switch c := c.(type) {
- case prometheus.Gauge:
- c.Set(m.Value)
-
- case *prometheus.GaugeVec:
- if len(m.Labels) == 0 {
- return fmt.Errorf("required labels for collector `%s`", m.Name)
- }
-
- c.WithLabelValues(m.Labels...).Set(m.Value)
-
- default:
- return fmt.Errorf("collector `%s` does not support method `Set`", m.Name)
- }
-
- // RPC, set ok to true as return value. Need by rpc.Call reply argument
- *ok = true
- return nil
-}
diff --git a/service/metrics/rpc_test.go b/service/metrics/rpc_test.go
deleted file mode 100644
index 37af3eec..00000000
--- a/service/metrics/rpc_test.go
+++ /dev/null
@@ -1,862 +0,0 @@
-package metrics
-
-import (
- rpc2 "net/rpc"
- "strconv"
- "testing"
- "time"
-
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-var port = 5004
-
-func setup(t *testing.T, metric string, portNum string) (*rpc2.Client, service.Container) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rpc.ID, &rpc.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- rpcCfg: `{"enable":true, "listen":"tcp://:` + strconv.Itoa(port) + `"}`,
- metricsCfg: `{
- "address": "localhost:` + portNum + `",
- "collect":{
- ` + metric + `
- }
- }`}))
-
- // rotate ports for travis
- port++
-
- s, _ := c.Get(ID)
- assert.NotNil(t, s)
-
- s2, _ := c.Get(rpc.ID)
- rs := s2.(*rpc.Service)
-
- assert.True(t, s.(*Service).Enabled())
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 200)
-
- client, err := rs.Client()
- assert.NoError(t, err)
- if err != nil {
- panic(err)
- }
-
- return client, c
-}
-
-func Test_Set_RPC(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge"
- }`,
- "2112",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Set", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2112/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_gauge 100`)
-}
-
-func Test_Set_RPC_Vector(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2113",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Set", Metric{
- Name: "user_gauge",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2113/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_gauge{section="first",type="core"} 100`)
-}
-
-func Test_Set_RPC_CollectorError(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2114",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Set", Metric{
- Name: "user_gauge_2",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Set_RPC_MetricError(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2115",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Set", Metric{
- Name: "user_gauge",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Set_RPC_MetricError_2(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2116",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Set", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
-}
-
-func Test_Set_RPC_MetricError_3(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2117",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Set", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
-}
-
-// sub
-
-func Test_Sub_RPC(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge"
- }`,
- "2118",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
- assert.True(t, ok)
-
- assert.NoError(t, client.Call("metrics.Sub", Metric{
- Name: "user_gauge",
- Value: 10.0,
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2118/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_gauge 90`)
-}
-
-func Test_Sub_RPC_Vector(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2119",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }, &ok))
- assert.True(t, ok)
-
- assert.NoError(t, client.Call("metrics.Sub", Metric{
- Name: "user_gauge",
- Value: 10.0,
- Labels: []string{"core", "first"},
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2119/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_gauge{section="first",type="core"} 90`)
-}
-
-func Test_Register_RPC_Histogram(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2319",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Declare", &NamedCollector{
- Name: "custom_histogram",
- Collector: Collector{
- Namespace: "test_histogram",
- Subsystem: "test_histogram",
- Type: Histogram,
- Help: "test_histogram",
- Labels: nil,
- Buckets: []float64{0.1, 0.2, 0.5},
- },
- }, &ok))
- assert.True(t, ok)
-
- var ok2 bool
- // histogram does not support Add, should be an error
- assert.Error(t, client.Call("metrics.Add", Metric{
- Name: "custom_histogram",
- }, &ok2))
- // ok should became false
- assert.False(t, ok2)
-
- out, _, err := get("http://localhost:2319/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `TYPE test_histogram_test_histogram_custom_histogram histogram`)
-
- // check buckets
- assert.Contains(t, out, `test_histogram_test_histogram_custom_histogram_bucket{le="0.1"} 0`)
- assert.Contains(t, out, `test_histogram_test_histogram_custom_histogram_bucket{le="0.2"} 0`)
- assert.Contains(t, out, `test_histogram_test_histogram_custom_histogram_bucket{le="0.5"} 0`)
-}
-
-func Test_Register_RPC_Gauge(t *testing.T) {
- // FOR register method, setup used just to init the rpc
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2324",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Declare", &NamedCollector{
- Name: "custom_gauge",
- Collector: Collector{
- Namespace: "test_gauge",
- Subsystem: "test_gauge",
- Type: Gauge,
- Help: "test_gauge",
- Labels: []string{"type", "section"},
- Buckets: nil,
- },
- }, &ok))
- assert.True(t, ok)
-
- var ok2 bool
- // Add to custom_gauge
- assert.NoError(t, client.Call("metrics.Add", Metric{
- Name: "custom_gauge",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }, &ok2))
- // ok should became true
- assert.True(t, ok2)
-
- // Subtract from custom runtime metric
- var ok3 bool
- assert.NoError(t, client.Call("metrics.Sub", Metric{
- Name: "custom_gauge",
- Value: 10.0,
- Labels: []string{"core", "first"},
- }, &ok3))
- assert.True(t, ok3)
-
- out, _, err := get("http://localhost:2324/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `test_gauge_test_gauge_custom_gauge{section="first",type="core"} 90`)
-}
-
-func Test_Register_RPC_Counter(t *testing.T) {
- // FOR register method, setup used just to init the rpc
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2328",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Declare", &NamedCollector{
- Name: "custom_counter",
- Collector: Collector{
- Namespace: "test_counter",
- Subsystem: "test_counter",
- Type: Counter,
- Help: "test_counter",
- Labels: []string{"type", "section"},
- Buckets: nil,
- },
- }, &ok))
- assert.True(t, ok)
-
- var ok2 bool
- // Add to custom_counter
- assert.NoError(t, client.Call("metrics.Add", Metric{
- Name: "custom_counter",
- Value: 100.0,
- Labels: []string{"type2", "section2"},
- }, &ok2))
- // ok should became true
- assert.True(t, ok2)
-
- out, _, err := get("http://localhost:2328/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `test_counter_test_counter_custom_counter{section="section2",type="type2"} 100`)
-}
-
-func Test_Register_RPC_Summary(t *testing.T) {
- // FOR register method, setup used just to init the rpc
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "6666",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Declare", &NamedCollector{
- Name: "custom_summary",
- Collector: Collector{
- Namespace: "test_summary",
- Subsystem: "test_summary",
- Type: Summary,
- Help: "test_summary",
- Labels: nil,
- Buckets: nil,
- },
- }, &ok))
- assert.True(t, ok)
-
- var ok2 bool
- // Add to custom_summary is not supported
- assert.Error(t, client.Call("metrics.Add", Metric{
- Name: "custom_summary",
- Value: 100.0,
- Labels: []string{"type22", "section22"},
- }, &ok2))
- // ok should became false
- assert.False(t, ok2)
-
- out, _, err := get("http://localhost:6666/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `test_summary_test_summary_custom_summary_sum 0`)
- assert.Contains(t, out, `test_summary_test_summary_custom_summary_count 0`)
-}
-
-func Test_Sub_RPC_CollectorError(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2120",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Sub", Metric{
- Name: "user_gauge_2",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Sub_RPC_MetricError(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2121",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Sub", Metric{
- Name: "user_gauge",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Sub_RPC_MetricError_2(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "gauge",
- "labels": ["type", "section"]
- }`,
- "2122",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Sub", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
-}
-
-func Test_Sub_RPC_MetricError_3(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2123",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Sub", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
-}
-
-// -- observe
-
-func Test_Observe_RPC(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "histogram"
- }`,
- "2124",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2124/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_histogram`)
-}
-
-func Test_Observe_RPC_Vector(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2125",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2125/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_histogram`)
-}
-
-func Test_Observe_RPC_CollectorError(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2126",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Observe_RPC_MetricError(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2127",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Observe_RPC_MetricError_2(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2128",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- }, &ok))
-}
-
-// -- observe summary
-
-func Test_Observe2_RPC(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "summary"
- }`,
- "2129",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2129/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_histogram`)
-}
-
-func Test_Observe2_RPC_Invalid(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "summary"
- }`,
- "2130",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram_2",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Observe2_RPC_Invalid_2(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "gauge"
- }`,
- "2131",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- }, &ok))
-}
-
-func Test_Observe2_RPC_Vector(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "summary",
- "labels": ["type", "section"]
- }`,
- "2132",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2132/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_histogram`)
-}
-
-func Test_Observe2_RPC_CollectorError(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "summary",
- "labels": ["type", "section"]
- }`,
- "2133",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Observe2_RPC_MetricError(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "summary",
- "labels": ["type", "section"]
- }`,
- "2134",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-}
-
-func Test_Observe2_RPC_MetricError_2(t *testing.T) {
- client, c := setup(
- t,
- `"user_histogram":{
- "type": "summary",
- "labels": ["type", "section"]
- }`,
- "2135",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Observe", Metric{
- Name: "user_histogram",
- Value: 100.0,
- }, &ok))
-}
-
-// add
-func Test_Add_RPC(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "counter"
- }`,
- "2136",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2136/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_gauge 100`)
-}
-
-func Test_Add_RPC_Vector(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "counter",
- "labels": ["type", "section"]
- }`,
- "2137",
- )
- defer c.Stop()
-
- var ok bool
- assert.NoError(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }, &ok))
- assert.True(t, ok)
-
- out, _, err := get("http://localhost:2137/metrics")
- assert.NoError(t, err)
- assert.Contains(t, out, `user_gauge{section="first",type="core"} 100`)
-}
-
-func Test_Add_RPC_CollectorError(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "counter",
- "labels": ["type", "section"]
- }`,
- "2138",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge_2",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-
- assert.False(t, ok)
-}
-
-func Test_Add_RPC_MetricError(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "counter",
- "labels": ["type", "section"]
- }`,
- "2139",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- Labels: []string{"missing"},
- }, &ok))
-
- assert.False(t, ok)
-}
-
-func Test_Add_RPC_MetricError_2(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "counter",
- "labels": ["type", "section"]
- }`,
- "2140",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
-
- assert.False(t, ok)
-}
-
-func Test_Add_RPC_MetricError_3(t *testing.T) {
- client, c := setup(
- t,
- `"user_gauge":{
- "type": "histogram",
- "labels": ["type", "section"]
- }`,
- "2141",
- )
- defer c.Stop()
-
- var ok bool
- assert.Error(t, client.Call("metrics.Add", Metric{
- Name: "user_gauge",
- Value: 100.0,
- }, &ok))
-}
diff --git a/service/metrics/service.go b/service/metrics/service.go
deleted file mode 100644
index 4656ae04..00000000
--- a/service/metrics/service.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package metrics
-
-// todo: declare metric at runtime
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "net/http"
- "sync"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/sirupsen/logrus"
- "github.com/spiral/roadrunner/service/rpc"
- "golang.org/x/sys/cpu"
-)
-
-const (
- // ID declares public service name.
- ID = "metrics"
- // maxHeaderSize declares max header size for prometheus server
- maxHeaderSize = 1024 * 1024 * 100 // 104MB
-)
-
-// Service to manage application metrics using Prometheus.
-type Service struct {
- cfg *Config
- log *logrus.Logger
- mu sync.Mutex
- http *http.Server
- collectors sync.Map
- registry *prometheus.Registry
-}
-
-// Init service.
-func (s *Service) Init(cfg *Config, r *rpc.Service, log *logrus.Logger) (bool, error) {
- s.cfg = cfg
- s.log = log
- s.registry = prometheus.NewRegistry()
-
- s.registry.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
- s.registry.MustRegister(prometheus.NewGoCollector())
-
- if r != nil {
- if err := r.Register(ID, &rpcServer{s}); err != nil {
- return false, err
- }
- }
-
- return true, nil
-}
-
-// Enabled indicates that server is able to collect metrics.
-func (s *Service) Enabled() bool {
- return s.cfg != nil
-}
-
-// Register new prometheus collector.
-func (s *Service) Register(c prometheus.Collector) error {
- return s.registry.Register(c)
-}
-
-// MustRegister registers new collector or fails with panic.
-func (s *Service) MustRegister(c prometheus.Collector) {
- s.registry.MustRegister(c)
-}
-
-// Serve prometheus metrics service.
-func (s *Service) Serve() error {
- // register application specific metrics
- collectors, err := s.cfg.getCollectors()
- if err != nil {
- return err
- }
-
- for name, collector := range collectors {
- if err := s.registry.Register(collector); err != nil {
- return err
- }
-
- s.collectors.Store(name, collector)
- }
-
- s.mu.Lock()
-
- var topCipherSuites []uint16
- var defaultCipherSuitesTLS13 []uint16
-
- hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
-
- if hasGCMAsm {
- // If AES-GCM hardware is provided then prioritise AES-GCM
- // cipher suites.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- } else {
- // Without AES-GCM hardware, we put the ChaCha20-Poly1305
- // cipher suites first.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- }
-
- DefaultCipherSuites := make([]uint16, 0, 22)
- DefaultCipherSuites = append(DefaultCipherSuites, topCipherSuites...)
- DefaultCipherSuites = append(DefaultCipherSuites, defaultCipherSuitesTLS13...)
-
- s.http = &http.Server{
- Addr: s.cfg.Address,
- Handler: promhttp.HandlerFor(s.registry, promhttp.HandlerOpts{}),
- IdleTimeout: time.Hour * 24,
- ReadTimeout: time.Minute * 60,
- MaxHeaderBytes: maxHeaderSize,
- ReadHeaderTimeout: time.Minute * 60,
- WriteTimeout: time.Minute * 60,
- TLSConfig: &tls.Config{
- CurvePreferences: []tls.CurveID{
- tls.CurveP256,
- tls.CurveP384,
- tls.CurveP521,
- tls.X25519,
- },
- CipherSuites: DefaultCipherSuites,
- MinVersion: tls.VersionTLS12,
- PreferServerCipherSuites: true,
- },
- }
- s.mu.Unlock()
-
- err = s.http.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- return err
- }
-
- return nil
-}
-
-// Stop prometheus metrics service.
-func (s *Service) Stop() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.http != nil {
- // gracefully stop server
- go func() {
- err := s.http.Shutdown(context.Background())
- if err != nil {
- // Function should be Stop() error
- s.log.Error(fmt.Errorf("error shutting down the metrics server: error %v", err))
- }
- }()
- }
-}
-
-// Collector returns application specific collector by name or nil if collector not found.
-func (s *Service) Collector(name string) prometheus.Collector {
- collector, ok := s.collectors.Load(name)
- if !ok {
- return nil
- }
-
- return collector.(prometheus.Collector)
-}
diff --git a/service/metrics/service_test.go b/service/metrics/service_test.go
deleted file mode 100644
index 7e11cf85..00000000
--- a/service/metrics/service_test.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package metrics
-
-import (
- "io/ioutil"
- "net/http"
- "testing"
- "time"
-
- json "github.com/json-iterator/go"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- rpcCfg string
- metricsCfg string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == ID {
- return &testCfg{target: cfg.metricsCfg}
- }
-
- if name == rpc.ID {
- return &testCfg{target: cfg.rpcCfg}
- }
-
- return nil
-}
-
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- err := j.Unmarshal([]byte(cfg.target), out)
- return err
-}
-
-// get request and return body
-func get(url string) (string, *http.Response, error) {
- r, err := http.Get(url)
- if err != nil {
- return "", nil, err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", nil, err
- }
- return string(b), r, err
-}
-
-func TestService_Serve(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{metricsCfg: `{
- "address": "localhost:2116"
- }`}))
-
- s, _ := c.Get(ID)
- assert.NotNil(t, s)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- out, _, err := get("http://localhost:2116/metrics")
- assert.NoError(t, err)
-
- assert.Contains(t, out, "go_gc_duration_seconds")
-}
-
-func Test_ServiceCustomMetric(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{metricsCfg: `{
- "address": "localhost:2115"
- }`}))
-
- s, _ := c.Get(ID)
- assert.NotNil(t, s)
-
- collector := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "my_gauge",
- Help: "My gauge value",
- })
-
- assert.NoError(t, s.(*Service).Register(collector))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- collector.Set(100)
-
- out, _, err := get("http://localhost:2115/metrics")
- assert.NoError(t, err)
-
- assert.Contains(t, out, "my_gauge 100")
-}
-
-func Test_ServiceCustomMetricMust(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{metricsCfg: `{
- "address": "localhost:2114"
- }`}))
-
- s, _ := c.Get(ID)
- assert.NotNil(t, s)
-
- collector := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "my_gauge_2",
- Help: "My gauge value",
- })
-
- s.(*Service).MustRegister(collector)
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- collector.Set(100)
-
- out, _, err := get("http://localhost:2114/metrics")
- assert.NoError(t, err)
-
- assert.Contains(t, out, "my_gauge_2 100")
-}
-
-func Test_ConfiguredMetric(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{metricsCfg: `{
- "address": "localhost:2113",
- "collect":{
- "user_gauge":{
- "type": "gauge"
- }
- }
- }`}))
-
- s, _ := c.Get(ID)
- assert.NotNil(t, s)
-
- assert.True(t, s.(*Service).Enabled())
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("error during the Serve: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
- defer c.Stop()
-
- s.(*Service).Collector("user_gauge").(prometheus.Gauge).Set(100)
-
- assert.Nil(t, s.(*Service).Collector("invalid"))
-
- out, _, err := get("http://localhost:2113/metrics")
- assert.NoError(t, err)
-
- assert.Contains(t, out, "user_gauge 100")
-}
-
-func Test_ConfiguredDuplicateMetric(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{metricsCfg: `{
- "address": "localhost:2112",
- "collect":{
- "go_gc_duration_seconds":{
- "type": "gauge"
- }
- }
- }`}))
-
- s, _ := c.Get(ID)
- assert.NotNil(t, s)
-
- assert.True(t, s.(*Service).Enabled())
-
- assert.Error(t, c.Serve())
-}
-
-func Test_ConfiguredInvalidMetric(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{metricsCfg: `{
- "address": "localhost:2112",
- "collect":{
- "user_gauge":{
- "type": "invalid"
- }
- }
-
- }`}))
-
- assert.Error(t, c.Serve())
-}
diff --git a/service/reload/service.go b/service/reload/service.go
deleted file mode 100644
index c065d95d..00000000
--- a/service/reload/service.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package reload
-
-import (
- "errors"
- "os"
- "strings"
- "time"
-
- "github.com/sirupsen/logrus"
- "github.com/spiral/roadrunner"
- "github.com/spiral/roadrunner/service"
-)
-
-// ID contains default service name.
-const ID = "reload"
-
-type Service struct {
- cfg *Config
- log *logrus.Logger
- watcher *Watcher
- stopc chan struct{}
-}
-
-// Init controller service
-func (s *Service) Init(cfg *Config, log *logrus.Logger, c service.Container) (bool, error) {
- if cfg == nil || len(cfg.Services) == 0 {
- return false, nil
- }
-
- s.cfg = cfg
- s.log = log
- s.stopc = make(chan struct{})
-
- var configs []WatcherConfig
-
- // mount Services to designated services
- for serviceName := range cfg.Services {
- svc, _ := c.Get(serviceName)
- if ctrl, ok := svc.(roadrunner.Controllable); ok {
- tmp := cfg.Services[serviceName]
- tmp.service = &ctrl
- cfg.Services[serviceName] = tmp
- }
- }
-
- for serviceName, config := range s.cfg.Services {
- if cfg.Services[serviceName].service == nil {
- continue
- }
- ignored, err := ConvertIgnored(config.Ignore)
- if err != nil {
- return false, err
- }
- configs = append(configs, WatcherConfig{
- ServiceName: serviceName,
- Recursive: config.Recursive,
- Directories: config.Dirs,
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return ErrorSkip
- },
- Files: make(map[string]os.FileInfo),
- Ignored: ignored,
- FilePatterns: append(config.Patterns, cfg.Patterns...),
- })
- }
-
- var err error
- s.watcher, err = NewWatcher(configs)
- if err != nil {
- return false, err
- }
-
- return true, nil
-}
-
-func (s *Service) Serve() error {
- if s.cfg.Interval < time.Second {
- return errors.New("reload interval is too fast")
- }
-
- // make a map with unique services
- // so, if we would have a 100 events from http service
- // in map we would see only 1 key and it's config
- treshholdc := make(chan struct {
- serviceConfig ServiceConfig
- service string
- }, 100)
-
- // use the same interval
- timer := time.NewTimer(s.cfg.Interval)
-
- // drain channel in case of leaved messages
- defer func() {
- go func() {
- for range treshholdc {
-
- }
- }()
- }()
-
- go func() {
- for e := range s.watcher.Event {
- treshholdc <- struct {
- serviceConfig ServiceConfig
- service string
- }{serviceConfig: s.cfg.Services[e.service], service: e.service}
- }
- }()
-
- // map with configs by services
- updated := make(map[string]ServiceConfig, 100)
-
- go func() {
- for {
- select {
- case config := <-treshholdc:
- // replace previous value in map by more recent without adding new one
- updated[config.service] = config.serviceConfig
- // stop timer
- timer.Stop()
- // restart
- // logic is following:
- // if we getting a lot of events, we should't restart particular service on each of it (user doing bug move or very fast typing)
- // instead, we are resetting the timer and wait for Interval time
- // If there is no more events, we restart service only once
- timer.Reset(s.cfg.Interval)
- case <-timer.C:
- if len(updated) > 0 {
- for k, v := range updated {
- sv := *v.service
- err := sv.Server().Reset()
- if err != nil {
- s.log.Error(err)
- }
- s.log.Debugf("[%s] found %v file(s) changes, reloading", k, len(updated))
- }
- // zero map
- updated = make(map[string]ServiceConfig, 100)
- }
- case <-s.stopc:
- timer.Stop()
- return
- }
- }
- }()
-
- err := s.watcher.StartPolling(s.cfg.Interval)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (s *Service) Stop() {
- s.watcher.Stop()
- s.stopc <- struct{}{}
-}
diff --git a/service/reload/watcher_test.go b/service/reload/watcher_test.go
deleted file mode 100644
index 2d8ad480..00000000
--- a/service/reload/watcher_test.go
+++ /dev/null
@@ -1,578 +0,0 @@
-package reload
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-var testServiceName = "test"
-
-// scenario
-// Create walker instance, init with default config, check that Watcher found all files from config
-func Test_Correct_Watcher_Init(t *testing.T) {
- tempDir, err := ioutil.TempDir(".", "")
- defer func() {
- err = freeResources(tempDir)
- if err != nil {
- t.Fatal(err)
- }
- }()
- if err != nil {
- t.Fatal(err)
- }
- err = ioutil.WriteFile(filepath.Join(tempDir, "file.txt"),
- []byte{}, 0755)
- if err != nil {
- t.Fatal(err)
- }
-
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: false,
- Directories: []string{tempDir},
- FilterHooks: nil,
- Files: make(map[string]os.FileInfo),
- Ignored: nil,
- FilePatterns: nil,
- }
-
- w, err := NewWatcher([]WatcherConfig{wc})
- if err != nil {
- t.Fatal(err)
- }
-
- if len(w.GetAllFiles(testServiceName)) != 2 {
- t.Fatal("incorrect directories len")
- }
-}
-
-// scenario
-// create 3 files, create walker instance
-// Start poll events
-// change file and see, if event had come to handler
-func Test_Get_FileEvent(t *testing.T) {
- tempDir, err := ioutil.TempDir(".", "")
- defer func(name string) {
- err = freeResources(name)
- assert.NoError(t, err)
- }(tempDir)
- assert.NoError(t, err)
- err = ioutil.WriteFile(filepath.Join(tempDir, "file1.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
- err = ioutil.WriteFile(filepath.Join(tempDir, "file2.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file3.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: false,
- Directories: []string{tempDir},
- FilterHooks: nil,
- Files: make(map[string]os.FileInfo),
- Ignored: nil,
- FilePatterns: []string{"aaa", "txt"},
- }
-
- w, err := NewWatcher([]WatcherConfig{wc})
- assert.NoError(t, err)
-
- // should be 3 files and directory
- if len(w.GetAllFiles(testServiceName)) != 4 {
- t.Fatal("incorrect directories len")
- }
-
- go func() {
- stop := make(chan struct{}, 1)
- go func() {
- time.Sleep(time.Second * 2)
- err := ioutil.WriteFile(filepath.Join(tempDir, "file2.txt"),
- []byte{1, 1, 1}, 0755)
- assert.NoError(t, err)
- time.Sleep(time.Second)
- stop <- struct{}{}
- }()
-
- go func() {
- for {
- select {
- case e := <-w.Event:
- if e.Path != "file2.txt" {
- assert.Fail(t, "didn't handle event when write file2")
- }
- w.Stop()
- case <-stop:
- return
- }
- }
-
- }()
- }()
-
- err = w.StartPolling(time.Second)
- assert.NoError(t, err)
-}
-
-// scenario
-// create 3 files with different extensions, create walker instance
-// Start poll events
-// change file with txt extension, and see, if event had not come to handler because it was filtered
-func Test_FileExtensionFilter(t *testing.T) {
- tempDir, err := ioutil.TempDir(".", "")
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file3.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: false,
- Directories: []string{tempDir},
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return ErrorSkip
- },
- Files: make(map[string]os.FileInfo),
- Ignored: nil,
- FilePatterns: []string{"aaa", "bbb"},
- }
-
- w, err := NewWatcher([]WatcherConfig{wc})
- assert.NoError(t, err)
-
- dirLen := len(w.GetAllFiles(testServiceName))
- // should be 2 files (one filtered) and directory
- if dirLen != 3 {
- t.Fatalf("incorrect directories len, len is: %d", dirLen)
- }
-
- go func() {
- stop := make(chan struct{}, 1)
-
- go func() {
- time.Sleep(time.Second)
- err := ioutil.WriteFile(filepath.Join(tempDir, "file3.txt"),
- []byte{1, 1, 1}, 0755)
- assert.NoError(t, err)
- stop <- struct{}{}
- }()
-
- go func() {
- time.Sleep(time.Second)
- select {
- case <-w.Event:
- assert.Fail(t, "handled event from filtered file")
- case <-stop:
- return
- }
- }()
- time.Sleep(time.Second)
- w.Stop()
- }()
-
- err = w.StartPolling(time.Second)
- assert.NoError(t, err)
- err = freeResources(tempDir)
- assert.NoError(t, err)
-}
-
-// nested
-// scenario
-// create dir and nested dir
-// make files with aaa, bbb and txt extensions, filter txt
-// change not filtered file, handle event
-func Test_Recursive_Support(t *testing.T) {
- tempDir, err := ioutil.TempDir(".", "")
- defer func() {
- err = freeResources(tempDir)
- assert.NoError(t, err)
- }()
-
- nestedDir, err := ioutil.TempDir(tempDir, "nested")
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file3.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: true,
- Directories: []string{tempDir},
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return ErrorSkip
- },
- Files: make(map[string]os.FileInfo),
- Ignored: nil,
- FilePatterns: []string{"aaa", "bbb"},
- }
-
- w, err := NewWatcher([]WatcherConfig{wc})
- assert.NoError(t, err)
-
- dirLen := len(w.GetAllFiles(testServiceName))
- // should be 3 files (2 from root dir, and 1 from nested), filtered txt
- if dirLen != 3 {
- t.Fatalf("incorrect directories len, len is: %d", dirLen)
- }
-
- go func() {
- stop := make(chan struct{}, 1)
- // time sleep is used here because StartPolling is blocking operation
- time.Sleep(time.Second * 5)
- // change file in nested directory
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
- []byte{1, 1, 1}, 0755)
- assert.NoError(t, err)
-
- go func() {
- time.Sleep(time.Second)
- for {
- select {
- case e := <-w.Event:
- if e.Info.Name() != "file4.aaa" {
- assert.Fail(t, "wrong handled event from watcher in nested dir")
- }
- case <-stop:
- w.Stop()
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- stop <- struct{}{}
- }()
-
- err = w.StartPolling(time.Second)
- assert.NoError(t, err)
-}
-
-func Test_Wrong_Dir(t *testing.T) {
- // no such file or directory
- wrongDir := "askdjfhaksdlfksdf"
-
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: true,
- Directories: []string{wrongDir},
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return ErrorSkip
- },
- Files: make(map[string]os.FileInfo),
- Ignored: nil,
- FilePatterns: []string{"aaa", "bbb"},
- }
-
- _, err := NewWatcher([]WatcherConfig{wc})
- assert.Error(t, err)
-}
-
-func Test_Filter_Directory(t *testing.T) {
- tempDir, err := ioutil.TempDir(".", "")
- defer func(name string) {
- err = freeResources(name)
- assert.NoError(t, err)
- }(tempDir)
-
- nestedDir, err := ioutil.TempDir(tempDir, "nested")
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file3.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- ignored, err := ConvertIgnored([]string{nestedDir})
- if err != nil {
- t.Fatal(err)
- }
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: true,
- Directories: []string{tempDir},
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return ErrorSkip
- },
- Files: make(map[string]os.FileInfo),
- Ignored: ignored,
- FilePatterns: []string{"aaa", "bbb", "txt"},
- }
-
- w, err := NewWatcher([]WatcherConfig{wc})
- if err != nil {
- t.Fatal(err)
- }
-
- dirLen := len(w.GetAllFiles(testServiceName))
- // should be 2 files (2 from root dir), filtered other
- if dirLen != 2 {
- t.Fatalf("incorrect directories len, len is: %d", dirLen)
- }
-
- go func() {
- stop := make(chan struct{}, 1)
- go func() {
- time.Sleep(time.Second)
- err := ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
- []byte{1, 1, 1}, 0755)
- assert.NoError(t, err)
- }()
-
- go func() {
- select {
- case e := <-w.Event:
- fmt.Println("file: " + e.Info.Name())
- assert.Fail(t, "handled event from watcher in nested dir")
- case <-stop:
- w.Stop()
- return
- }
- }()
-
- // time sleep is used here because StartPolling is blocking operation
- time.Sleep(time.Second * 5)
- stop <- struct{}{}
- }()
-
- err = w.StartPolling(time.Second)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-// copy files from nested dir to not ignored
-// should fire an event
-func Test_Copy_Directory(t *testing.T) {
- tempDir, err := ioutil.TempDir(".", "")
-
- defer func() {
- err = freeResources(tempDir)
- assert.NoError(t, err)
- }()
-
- nestedDir, err := ioutil.TempDir(tempDir, "nested")
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file1.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(tempDir, "file2.bbb"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file3.txt"),
- []byte{}, 0755)
- assert.NoError(t, err)
- err = ioutil.WriteFile(filepath.Join(nestedDir, "file4.aaa"),
- []byte{}, 0755)
- assert.NoError(t, err)
-
- ignored, err := ConvertIgnored([]string{nestedDir})
- assert.NoError(t, err)
-
- wc := WatcherConfig{
- ServiceName: testServiceName,
- Recursive: true,
- Directories: []string{tempDir},
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return ErrorSkip
- },
- Files: make(map[string]os.FileInfo),
- Ignored: ignored,
- FilePatterns: []string{"aaa", "bbb", "txt"},
- }
-
- w, err := NewWatcher([]WatcherConfig{wc})
- assert.NoError(t, err)
-
- dirLen := len(w.GetAllFiles(testServiceName))
- // should be 2 files (2 from root dir), filtered other
- if dirLen != 2 {
- t.Fatalf("incorrect directories len, len is: %d", dirLen)
- }
-
- go func() {
- go func() {
- time.Sleep(time.Second * 2)
- err := copyDir(nestedDir, filepath.Join(tempDir, "copyTo"))
- assert.NoError(t, err)
- }()
-
- go func() {
- for range w.Event {
- // here should be event, otherwise we won't stop
- w.Stop()
- }
- }()
- }()
-
- err = w.StartPolling(time.Second)
- assert.NoError(t, err)
-}
-
-func copyFile(src, dst string) (err error) {
- in, err := os.Open(src)
- if err != nil {
- return
- }
- defer func() {
- _ = in.Close()
- }()
-
- out, err := os.Create(dst)
- if err != nil {
- return
- }
- defer func() {
- if e := out.Close(); e != nil {
- err = e
- }
- }()
-
- _, err = io.Copy(out, in)
- if err != nil {
- return
- }
-
- err = out.Sync()
- if err != nil {
- return
- }
-
- si, err := os.Stat(src)
- if err != nil {
- return
- }
- err = os.Chmod(dst, si.Mode())
- if err != nil {
- return
- }
-
- return
-}
-
-func copyDir(src string, dst string) (err error) {
- src = filepath.Clean(src)
- dst = filepath.Clean(dst)
-
- si, err := os.Stat(src)
- if err != nil {
- return err
- }
- if !si.IsDir() {
- return fmt.Errorf("source is not a directory")
- }
-
- _, err = os.Stat(dst)
- if err != nil && !os.IsNotExist(err) {
- return
- }
- if err == nil {
- return fmt.Errorf("destination already exists")
- }
-
- err = os.MkdirAll(dst, si.Mode())
- if err != nil {
- return
- }
-
- entries, err := ioutil.ReadDir(src)
- if err != nil {
- return
- }
-
- for _, entry := range entries {
- srcPath := filepath.Join(src, entry.Name())
- dstPath := filepath.Join(dst, entry.Name())
-
- if entry.IsDir() {
- err = copyDir(srcPath, dstPath)
- if err != nil {
- return
- }
- } else {
- // Skip symlinks.
- if entry.Mode()&os.ModeSymlink != 0 {
- continue
- }
-
- err = copyFile(srcPath, dstPath)
- if err != nil {
- return
- }
- }
- }
-
- return
-}
-
-func freeResources(path string) error {
- return os.RemoveAll(path)
-}
diff --git a/service/rpc/service.go b/service/rpc/service.go
deleted file mode 100644
index 1d6d7595..00000000
--- a/service/rpc/service.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package rpc
-
-import (
- "errors"
- "net/rpc"
- "sync"
-
- "github.com/spiral/goridge/v2"
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/env"
-)
-
-// ID contains default service name.
-const ID = "rpc"
-
-// Service is RPC service.
-type Service struct {
- cfg *Config
- stop chan interface{}
- rpc *rpc.Server
- mu sync.Mutex
- serving bool
-}
-
-// Init rpc service. Must return true if service is enabled.
-func (s *Service) Init(cfg *Config, c service.Container, env env.Environment) (bool, error) {
- if !cfg.Enable {
- return false, nil
- }
-
- s.cfg = cfg
- s.rpc = rpc.NewServer()
-
- if env != nil {
- env.SetEnv("RR_RPC", cfg.Listen)
- }
-
- if err := s.Register("system", &systemService{c}); err != nil {
- return false, err
- }
-
- return true, nil
-}
-
-// Serve serves the service.
-func (s *Service) Serve() error {
- if s.rpc == nil {
- return errors.New("RPC service is not configured")
- }
-
- s.mu.Lock()
- s.serving = true
- s.stop = make(chan interface{})
- s.mu.Unlock()
-
- ln, err := s.cfg.Listener()
- if err != nil {
- return err
- }
- defer ln.Close()
-
- go func() {
- for {
- select {
- case <-s.stop:
- return
- default:
- conn, err := ln.Accept()
- if err != nil {
- continue
- }
-
- go s.rpc.ServeCodec(goridge.NewCodec(conn))
- }
- }
- }()
-
- <-s.stop
-
- s.mu.Lock()
- s.serving = false
- s.mu.Unlock()
-
- return nil
-}
-
-// Stop stops the service.
-func (s *Service) Stop() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.serving {
- close(s.stop)
- }
-}
-
-// Register publishes in the server the set of methods of the
-// receiver value that satisfy the following conditions:
-// - exported method of exported type
-// - two arguments, both of exported type
-// - the second argument is a pointer
-// - one return value, of type error
-// It returns an error if the receiver is not an exported type or has
-// no suitable methods. It also logs the error using package log.
-func (s *Service) Register(name string, svc interface{}) error {
- if s.rpc == nil {
- return errors.New("RPC service is not configured")
- }
-
- return s.rpc.RegisterName(name, svc)
-}
-
-// Client creates new RPC client.
-func (s *Service) Client() (*rpc.Client, error) {
- if s.cfg == nil {
- return nil, errors.New("RPC service is not configured")
- }
-
- conn, err := s.cfg.Dialer()
- if err != nil {
- return nil, err
- }
-
- return rpc.NewClientWithCodec(goridge.NewClientCodec(conn)), nil
-}
diff --git a/service/rpc/service_test.go b/service/rpc/service_test.go
deleted file mode 100644
index 385e818e..00000000
--- a/service/rpc/service_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package rpc
-
-import (
- "testing"
- "time"
-
- "github.com/spiral/roadrunner/service"
- "github.com/spiral/roadrunner/service/env"
- "github.com/stretchr/testify/assert"
-)
-
-type testService struct{}
-
-func (ts *testService) Echo(msg string, r *string) error { *r = msg; return nil }
-
-func Test_Disabled(t *testing.T) {
- s := &Service{}
- ok, err := s.Init(&Config{Enable: false}, service.NewContainer(nil), nil)
-
- assert.NoError(t, err)
- assert.False(t, ok)
-}
-
-func Test_RegisterNotConfigured(t *testing.T) {
- s := &Service{}
- assert.Error(t, s.Register("test", &testService{}))
-
- client, err := s.Client()
- assert.Nil(t, client)
- assert.Error(t, err)
- assert.Error(t, s.Serve())
-}
-
-func Test_Enabled(t *testing.T) {
- s := &Service{}
- ok, err := s.Init(&Config{Enable: true, Listen: "tcp://localhost:9008"}, service.NewContainer(nil), nil)
-
- assert.NoError(t, err)
- assert.True(t, ok)
-}
-
-func Test_StopNonServing(t *testing.T) {
- s := &Service{}
- ok, err := s.Init(&Config{Enable: true, Listen: "tcp://localhost:9008"}, service.NewContainer(nil), nil)
-
- assert.NoError(t, err)
- assert.True(t, ok)
- s.Stop()
-}
-
-func Test_Serve_Errors(t *testing.T) {
- s := &Service{}
- ok, err := s.Init(&Config{Enable: true, Listen: "malformed"}, service.NewContainer(nil), nil)
- assert.NoError(t, err)
- assert.True(t, ok)
-
- assert.Error(t, s.Serve())
-
- client, err := s.Client()
- assert.Nil(t, client)
- assert.Error(t, err)
-}
-
-func Test_Serve_Client(t *testing.T) {
- s := &Service{}
- ok, err := s.Init(&Config{Enable: true, Listen: "tcp://localhost:9018"}, service.NewContainer(nil), nil)
- assert.NoError(t, err)
- assert.True(t, ok)
-
- defer s.Stop()
-
- assert.NoError(t, s.Register("test", &testService{}))
-
- go func() { assert.NoError(t, s.Serve()) }()
- time.Sleep(time.Second)
-
- client, err := s.Client()
- assert.NotNil(t, client)
- assert.NoError(t, err)
-
- var resp string
- assert.NoError(t, client.Call("test.Echo", "hello world", &resp))
- assert.Equal(t, "hello world", resp)
- assert.NoError(t, client.Close())
-}
-
-func TestSetEnv(t *testing.T) {
- s := &Service{}
- e := env.NewService(map[string]string{})
- ok, err := s.Init(&Config{Enable: true, Listen: "tcp://localhost:9018"}, service.NewContainer(nil), e)
-
- assert.NoError(t, err)
- assert.True(t, ok)
-
- v, _ := e.GetEnv()
- assert.Equal(t, "tcp://localhost:9018", v["RR_RPC"])
-}
diff --git a/service/rpc/system.go b/service/rpc/system.go
deleted file mode 100644
index ffba3782..00000000
--- a/service/rpc/system.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package rpc
-
-import "github.com/spiral/roadrunner/service"
-
-// systemService service controls rr server.
-type systemService struct {
- c service.Container
-}
-
-// Detach the underlying c.
-func (s *systemService) Stop(stop bool, r *string) error {
- if stop {
- s.c.Stop()
- }
- *r = "OK"
-
- return nil
-}
diff --git a/service/static/config.go b/service/static/config.go
deleted file mode 100644
index db50c7dd..00000000
--- a/service/static/config.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package static
-
-import (
- "fmt"
- "os"
- "path"
- "strings"
-
- "github.com/spiral/roadrunner/service"
-)
-
-// Config describes file location and controls access to them.
-type Config struct {
- // Dir contains name of directory to control access to.
- Dir string
-
- // Forbid specifies list of file extensions which are forbidden for access.
- // Example: .php, .exe, .bat, .htaccess and etc.
- Forbid []string
-
- // Always specifies list of extensions which must always be served by static
- // service, even if file not found.
- Always []string
-
- // Request headers to add to every static.
- Request map[string]string
-
- // Response headers to add to every static.
- Response map[string]string
-}
-
-// Hydrate must populate Config values using given Config source. Must return error if Config is not valid.
-func (c *Config) Hydrate(cfg service.Config) error {
- if err := cfg.Unmarshal(c); err != nil {
- return err
- }
-
- return c.Valid()
-}
-
-// Valid returns nil if config is valid.
-func (c *Config) Valid() error {
- st, err := os.Stat(c.Dir)
- if err != nil {
- if os.IsNotExist(err) {
- return fmt.Errorf("root directory '%s' does not exists", c.Dir)
- }
-
- return err
- }
-
- if !st.IsDir() {
- return fmt.Errorf("invalid root directory '%s'", c.Dir)
- }
-
- return nil
-}
-
-// AlwaysForbid must return true if file extension is not allowed for the upload.
-func (c *Config) AlwaysForbid(filename string) bool {
- ext := strings.ToLower(path.Ext(filename))
-
- for _, v := range c.Forbid {
- if ext == v {
- return true
- }
- }
-
- return false
-}
-
-// AlwaysServe must indicate that file is expected to be served by static service.
-func (c *Config) AlwaysServe(filename string) bool {
- ext := strings.ToLower(path.Ext(filename))
-
- for _, v := range c.Always {
- if ext == v {
- return true
- }
- }
-
- return false
-}
diff --git a/service/static/config_test.go b/service/static/config_test.go
deleted file mode 100644
index 2bc936bb..00000000
--- a/service/static/config_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package static
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/service"
- "github.com/stretchr/testify/assert"
-)
-
-type mockCfg struct{ cfg string }
-
-func (cfg *mockCfg) Get(name string) service.Config { return nil }
-func (cfg *mockCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate(t *testing.T) {
- cfg := &mockCfg{`{"dir": "./", "request":{"foo": "bar"}, "response":{"xxx": "yyy"}}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error(t *testing.T) {
- cfg := &mockCfg{`{"enable": true,"dir": "/dir/"}`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func TestConfig_Forbids(t *testing.T) {
- cfg := Config{Forbid: []string{".php"}}
-
- assert.True(t, cfg.AlwaysForbid("index.php"))
- assert.True(t, cfg.AlwaysForbid("index.PHP"))
- assert.True(t, cfg.AlwaysForbid("phpadmin/index.bak.php"))
- assert.False(t, cfg.AlwaysForbid("index.html"))
-}
-
-func TestConfig_Valid(t *testing.T) {
- assert.NoError(t, (&Config{Dir: "./"}).Valid())
- assert.Error(t, (&Config{Dir: "./config.go"}).Valid())
- assert.Error(t, (&Config{Dir: "./dir/"}).Valid())
-}
diff --git a/service/static/service.go b/service/static/service.go
deleted file mode 100644
index 49dbedab..00000000
--- a/service/static/service.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package static
-
-import (
- "net/http"
- "path"
-
- rrhttp "github.com/spiral/roadrunner/service/http"
-)
-
-// ID contains default service name.
-const ID = "static"
-
-// Service serves static files. Potentially convert into middleware?
-type Service struct {
- // server configuration (location, forbidden files and etc)
- cfg *Config
-
- // root is initiated http directory
- root http.Dir
-}
-
-// Init must return configure service and return true if service hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (s *Service) Init(cfg *Config, r *rrhttp.Service) (bool, error) {
- if r == nil {
- return false, nil
- }
-
- s.cfg = cfg
- s.root = http.Dir(s.cfg.Dir)
- r.AddMiddleware(s.middleware)
-
- return true, nil
-}
-
-// middleware must return true if request/response pair is handled within the middleware.
-func (s *Service) middleware(f http.HandlerFunc) http.HandlerFunc {
- // Define the http.HandlerFunc
- return func(w http.ResponseWriter, r *http.Request) {
- if s.cfg.Request != nil {
- for k, v := range s.cfg.Request {
- r.Header.Add(k, v)
- }
- }
-
- if s.cfg.Response != nil {
- for k, v := range s.cfg.Response {
- w.Header().Set(k, v)
- }
- }
-
- if !s.handleStatic(w, r) {
- f(w, r)
- }
- }
-}
-
-func (s *Service) handleStatic(w http.ResponseWriter, r *http.Request) bool {
- fPath := path.Clean(r.URL.Path)
-
- if s.cfg.AlwaysForbid(fPath) {
- return false
- }
-
- f, err := s.root.Open(fPath)
- if err != nil {
- if s.cfg.AlwaysServe(fPath) {
- w.WriteHeader(404)
- return true
- }
-
- return false
- }
- defer f.Close()
-
- d, err := f.Stat()
- if err != nil {
- return false
- }
-
- // do not serve directories
- if d.IsDir() {
- return false
- }
-
- http.ServeContent(w, r, d.Name(), d.ModTime(), f)
- return true
-}
diff --git a/service/static/service_test.go b/service/static/service_test.go
deleted file mode 100644
index bbab86c2..00000000
--- a/service/static/service_test.go
+++ /dev/null
@@ -1,530 +0,0 @@
-package static
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "testing"
- "time"
-
- json "github.com/json-iterator/go"
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/spiral/roadrunner/service"
- rrhttp "github.com/spiral/roadrunner/service/http"
- "github.com/stretchr/testify/assert"
-)
-
-type testCfg struct {
- httpCfg string
- static string
- target string
-}
-
-func (cfg *testCfg) Get(name string) service.Config {
- if name == rrhttp.ID {
- return &testCfg{target: cfg.httpCfg}
- }
-
- if name == ID {
- return &testCfg{target: cfg.static}
- }
- return nil
-}
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.target), out)
-}
-
-func Test_Files(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8029",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Second)
-
- b, _, _ := get("http://localhost:8029/sample.txt")
- assert.Equal(t, "sample", b)
- c.Stop()
-}
-
-func Test_Disabled(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[]}`,
- }))
-
- s, st := c.Get(ID)
- assert.NotNil(t, s)
- assert.Equal(t, service.StatusInactive, st)
-}
-
-func Test_Files_Disable(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":false, "dir":"../../tests", "forbid":[".php"]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8030",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Second)
-
- b, _, err := get("http://localhost:8030/client.php?hello=world")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, "WORLD", b)
- c.Stop()
-}
-
-func Test_Files_Error(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.Error(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"dir/invalid", "forbid":[".php"]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8031",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-}
-
-func Test_Files_Error2(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.Error(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"dir/invalid", "forbid":[".php"]`,
- httpCfg: `{
- "enable": true,
- "address": ":8032",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-}
-
-func Test_Files_Forbid(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[".php"]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8033",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- b, _, err := get("http://localhost:8033/client.php?hello=world")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, "WORLD", b)
- c.Stop()
-}
-
-func Test_Files_Always(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[".php"], "always":[".ico"]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8034",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- _, r, err := get("http://localhost:8034/favicon.ico")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, 404, r.StatusCode)
- c.Stop()
-}
-
-func Test_Files_NotFound(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[".php"]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8035",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- b, _, _ := get("http://localhost:8035/client.XXX?hello=world")
- assert.Equal(t, "WORLD", b)
- c.Stop()
-}
-
-func Test_Files_Dir(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[".php"]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8036",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php echo pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- b, _, _ := get("http://localhost:8036/http?hello=world")
- assert.Equal(t, "WORLD", b)
- c.Stop()
-}
-
-func Test_Files_NotForbid(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[]}`,
- httpCfg: `{
- "enable": true,
- "address": ":8037",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- b, _, _ := get("http://localhost:8037/client.php")
- assert.Equal(t, all("../../tests/client.php"), b)
- assert.Equal(t, all("../../tests/client.php"), b)
- c.Stop()
-}
-
-func TestStatic_Headers(t *testing.T) {
- logger, _ := test.NewNullLogger()
- logger.SetLevel(logrus.DebugLevel)
-
- c := service.NewContainer(logger)
- c.Register(rrhttp.ID, &rrhttp.Service{})
- c.Register(ID, &Service{})
-
- assert.NoError(t, c.Init(&testCfg{
- static: `{"enable":true, "dir":"../../tests", "forbid":[], "request":{"input": "custom-header"}, "response":{"output": "output-header"}}`,
- httpCfg: `{
- "enable": true,
- "address": ":8037",
- "maxRequestSize": 1024,
- "uploads": {
- "dir": ` + tmpDir() + `,
- "forbid": []
- },
- "workers":{
- "command": "php ../../tests/http/client.php pid pipes",
- "relay": "pipes",
- "pool": {
- "numWorkers": 1,
- "allocateTimeout": 10000000,
- "destroyTimeout": 10000000
- }
- }
- }`}))
-
- go func() {
- err := c.Serve()
- if err != nil {
- t.Errorf("serve error: %v", err)
- }
- }()
-
- time.Sleep(time.Millisecond * 500)
-
- req, err := http.NewRequest("GET", "http://localhost:8037/client.php", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
-
- if resp.Header.Get("Output") != "output-header" {
- t.Fatal("can't find output header in response")
- }
-
- b, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
-
- assert.Equal(t, all("../../tests/client.php"), string(b))
- assert.Equal(t, all("../../tests/client.php"), string(b))
- c.Stop()
-}
-
-func get(url string) (string, *http.Response, error) {
- r, err := http.Get(url)
- if err != nil {
- return "", nil, err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", nil, err
- }
-
- return string(b), r, err
-}
-
-func tmpDir() string {
- p := os.TempDir()
- j := json.ConfigCompatibleWithStandardLibrary
- r, _ := j.Marshal(p)
-
- return string(r)
-}
-
-func all(fn string) string {
- f, _ := os.Open(fn)
-
- b := &bytes.Buffer{}
- _, err := io.Copy(b, f)
- if err != nil {
- return ""
- }
-
- err = f.Close()
- if err != nil {
- return ""
- }
-
- return b.String()
-}
diff --git a/socket_factory.go b/socket_factory.go
deleted file mode 100644
index f652e056..00000000
--- a/socket_factory.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "net"
- "os/exec"
- "sync"
- "time"
-
- "github.com/pkg/errors"
- "github.com/spiral/goridge/v2"
-)
-
-// SocketFactory connects to external workers using socket server.
-type SocketFactory struct {
- // listens for incoming connections from underlying processes
- ls net.Listener
-
- // relay connection timeout
- tout time.Duration
-
- // protects socket mapping
- mu sync.Mutex
-
- // sockets which are waiting for process association
- relays map[int]chan *goridge.SocketRelay
-}
-
-// NewSocketFactory returns SocketFactory attached to a given socket lsn.
-// tout specifies for how long factory should serve for incoming relay connection
-func NewSocketFactory(ls net.Listener, tout time.Duration) *SocketFactory {
- f := &SocketFactory{
- ls: ls,
- tout: tout,
- relays: make(map[int]chan *goridge.SocketRelay),
- }
-
- go f.listen()
-
- return f
-}
-
-// SpawnWorker creates worker and connects it to appropriate relay or returns error
-func (f *SocketFactory) SpawnWorker(cmd *exec.Cmd) (w *Worker, err error) {
- if w, err = newWorker(cmd); err != nil {
- return nil, err
- }
-
- if err := w.start(); err != nil {
- return nil, errors.Wrap(err, "process error")
- }
-
- rl, err := f.findRelay(w, f.tout)
- if err != nil {
- go func(w *Worker) {
- err := w.Kill()
- if err != nil {
- fmt.Println(fmt.Errorf("error killing the worker %v", err))
- }
- }(w)
-
- if wErr := w.Wait(); wErr != nil {
- if _, ok := wErr.(*exec.ExitError); ok {
- err = errors.Wrap(wErr, err.Error())
- } else {
- err = wErr
- }
- }
-
- return nil, errors.Wrap(err, "unable to connect to worker")
- }
-
- w.rl = rl
- w.state.set(StateReady)
-
- return w, nil
-}
-
-// Close socket factory and underlying socket connection.
-func (f *SocketFactory) Close() error {
- return f.ls.Close()
-}
-
-// listens for incoming socket connections
-func (f *SocketFactory) listen() {
- for {
- conn, err := f.ls.Accept()
- if err != nil {
- return
- }
-
- rl := goridge.NewSocketRelay(conn)
- if pid, err := fetchPID(rl); err == nil {
- f.relayChan(pid) <- rl
- }
- }
-}
-
-// waits for worker to connect over socket and returns associated relay of timeout
-func (f *SocketFactory) findRelay(w *Worker, tout time.Duration) (*goridge.SocketRelay, error) {
- timer := time.NewTimer(tout)
- for {
- select {
- case rl := <-f.relayChan(*w.Pid):
- timer.Stop()
- f.cleanChan(*w.Pid)
- return rl, nil
-
- case <-timer.C:
- return nil, fmt.Errorf("relay timeout")
-
- case <-w.waitDone:
- timer.Stop()
- f.cleanChan(*w.Pid)
- return nil, fmt.Errorf("worker is gone")
- }
- }
-}
-
-// chan to store relay associated with specific Pid
-func (f *SocketFactory) relayChan(pid int) chan *goridge.SocketRelay {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- rl, ok := f.relays[pid]
- if !ok {
- f.relays[pid] = make(chan *goridge.SocketRelay)
- return f.relays[pid]
- }
-
- return rl
-}
-
-// deletes relay chan associated with specific Pid
-func (f *SocketFactory) cleanChan(pid int) {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- delete(f.relays, pid)
-}
diff --git a/src/Diactoros/ServerRequestFactory.php b/src/Diactoros/ServerRequestFactory.php
deleted file mode 100644
index 6a42f207..00000000
--- a/src/Diactoros/ServerRequestFactory.php
+++ /dev/null
@@ -1,28 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Wolfy-J
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner\Diactoros;
-
-use Psr\Http\Message\ServerRequestFactoryInterface;
-use Psr\Http\Message\ServerRequestInterface;
-use Laminas\Diactoros\ServerRequest;
-
-final class ServerRequestFactory implements ServerRequestFactoryInterface
-{
- /**
- * @inheritdoc
- *
- * @param array<mixed> $serverParams Array of SAPI parameters with which to seed the generated request instance.
- */
- public function createServerRequest(string $method, $uri, array $serverParams = []): ServerRequestInterface
- {
- $uploadedFiles = [];
- return new ServerRequest($serverParams, $uploadedFiles, $uri, $method);
- }
-}
diff --git a/src/Diactoros/StreamFactory.php b/src/Diactoros/StreamFactory.php
deleted file mode 100644
index 68a77e92..00000000
--- a/src/Diactoros/StreamFactory.php
+++ /dev/null
@@ -1,57 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Wolfy-J
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner\Diactoros;
-
-use RuntimeException;
-use Psr\Http\Message\StreamFactoryInterface;
-use Psr\Http\Message\StreamInterface;
-use Laminas\Diactoros\Stream;
-
-final class StreamFactory implements StreamFactoryInterface
-{
- /**
- * @inheritdoc
- * @throws RuntimeException
- */
- public function createStream(string $content = ''): StreamInterface
- {
- $resource = fopen('php://temp', 'rb+');
-
- if (! \is_resource($resource)) {
- throw new RuntimeException('Cannot create stream');
- }
-
- fwrite($resource, $content);
- rewind($resource);
- return $this->createStreamFromResource($resource);
- }
-
- /**
- * @inheritdoc
- */
- public function createStreamFromFile(string $file, string $mode = 'rb'): StreamInterface
- {
- $resource = fopen($file, $mode);
-
- if (! \is_resource($resource)) {
- throw new RuntimeException('Cannot create stream');
- }
-
- return $this->createStreamFromResource($resource);
- }
-
- /**
- * @inheritdoc
- */
- public function createStreamFromResource($resource): StreamInterface
- {
- return new Stream($resource);
- }
-}
diff --git a/src/Diactoros/UploadedFileFactory.php b/src/Diactoros/UploadedFileFactory.php
deleted file mode 100644
index daa475c1..00000000
--- a/src/Diactoros/UploadedFileFactory.php
+++ /dev/null
@@ -1,36 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Wolfy-J
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner\Diactoros;
-
-use Psr\Http\Message\StreamInterface;
-use Psr\Http\Message\UploadedFileFactoryInterface;
-use Psr\Http\Message\UploadedFileInterface;
-use Laminas\Diactoros\UploadedFile;
-
-final class UploadedFileFactory implements UploadedFileFactoryInterface
-{
- /**
- * @inheritdoc
- */
- public function createUploadedFile(
- StreamInterface $stream,
- int $size = null,
- int $error = \UPLOAD_ERR_OK,
- string $clientFilename = null,
- string $clientMediaType = null
- ): UploadedFileInterface {
- if ($size === null) {
- $size = (int) $stream->getSize();
- }
-
- /** @var resource $stream */
- return new UploadedFile($stream, $size, $error, $clientFilename, $clientMediaType);
- }
-}
diff --git a/src/Exception/MetricException.php b/src/Exception/MetricException.php
deleted file mode 100644
index d5b738b8..00000000
--- a/src/Exception/MetricException.php
+++ /dev/null
@@ -1,17 +0,0 @@
-<?php
-
-/**
- * Spiral Framework.
- *
- * @license MIT
- * @author Anton Titov (Wolfy-J)
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner\Exception;
-
-use Spiral\Goridge\Exceptions\RPCException;
-
-class MetricException extends RPCException
-{
-}
diff --git a/src/Exception/RoadRunnerException.php b/src/Exception/RoadRunnerException.php
deleted file mode 100644
index f83c3dd4..00000000
--- a/src/Exception/RoadRunnerException.php
+++ /dev/null
@@ -1,14 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Wolfy-J
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner\Exception;
-
-class RoadRunnerException extends \Spiral\RoadRunner\Exceptions\RoadRunnerException
-{
-}
diff --git a/src/Exceptions/RoadRunnerException.php b/src/Exceptions/RoadRunnerException.php
deleted file mode 100644
index 43967893..00000000
--- a/src/Exceptions/RoadRunnerException.php
+++ /dev/null
@@ -1,18 +0,0 @@
-<?php
-
-/**
- * Spiral Framework.
- *
- * @license MIT
- * @author Anton Titov (Wolfy-J)
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner\Exceptions;
-
-/**
- * @deprecated use \Spiral\RoadRunner\Exception\RoadRunnerException instead
- */
-class RoadRunnerException extends \RuntimeException
-{
-}
diff --git a/src/HttpClient.php b/src/HttpClient.php
deleted file mode 100644
index 9b9048ca..00000000
--- a/src/HttpClient.php
+++ /dev/null
@@ -1,74 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Alex Bond
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner;
-
-final class HttpClient
-{
- /** @var Worker */
- private $worker;
-
- /**
- * @param Worker $worker
- */
- public function __construct(Worker $worker)
- {
- $this->worker = $worker;
- }
-
- /**
- * @return Worker
- */
- public function getWorker(): Worker
- {
- return $this->worker;
- }
-
- /**
- * @return mixed[]|null Request information as ['ctx'=>[], 'body'=>string]
- * or null if termination request or invalid context.
- */
- public function acceptRequest(): ?array
- {
- $body = $this->getWorker()->receive($ctx);
- if (empty($body) && empty($ctx)) {
- // termination request
- return null;
- }
-
- $ctx = json_decode($ctx, true);
- if ($ctx === null) {
- // invalid context
- return null;
- }
-
- return ['ctx' => $ctx, 'body' => $body];
- }
-
- /**
- * Send response to the application server.
- *
- * @param int $status Http status code
- * @param string $body Body of response
- * @param string[][] $headers An associative array of the message's headers. Each
- * key MUST be a header name, and each value MUST be an array of strings
- * for that header.
- */
- public function respond(int $status, string $body, array $headers = []): void
- {
- $sendHeaders = empty($headers)
- ? new \stdClass() // this is required to represent empty header set as map and not as array
- : $headers;
-
- $this->getWorker()->send(
- $body,
- (string) json_encode(['status' => $status, 'headers' => $sendHeaders])
- );
- }
-}
diff --git a/src/Metrics.php b/src/Metrics.php
deleted file mode 100644
index d6b6e1da..00000000
--- a/src/Metrics.php
+++ /dev/null
@@ -1,80 +0,0 @@
-<?php
-
-/**
- * Spiral Framework.
- *
- * @license MIT
- * @author Anton Titov (Wolfy-J)
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner;
-
-use Spiral\Goridge\Exceptions\RPCException;
-use Spiral\Goridge\RPC;
-use Spiral\RoadRunner\Exception\MetricException;
-
-/**
- * Application metrics.
- */
-final class Metrics implements MetricsInterface
-{
- /** @var RPC */
- private $rpc;
-
- /**
- * @param RPC $rpc
- */
- public function __construct(RPC $rpc)
- {
- $this->rpc = $rpc;
- }
-
- /**
- * @inheritDoc
- */
- public function add(string $name, float $value, array $labels = []): void
- {
- try {
- $this->rpc->call('metrics.Add', compact('name', 'value', 'labels'));
- } catch (RPCException $e) {
- throw new MetricException($e->getMessage(), $e->getCode(), $e);
- }
- }
-
- /**
- * @inheritDoc
- */
- public function sub(string $name, float $value, array $labels = []): void
- {
- try {
- $this->rpc->call('metrics.Sub', compact('name', 'value', 'labels'));
- } catch (RPCException $e) {
- throw new MetricException($e->getMessage(), $e->getCode(), $e);
- }
- }
-
- /**
- * @inheritDoc
- */
- public function observe(string $name, float $value, array $labels = []): void
- {
- try {
- $this->rpc->call('metrics.Observe', compact('name', 'value', 'labels'));
- } catch (RPCException $e) {
- throw new MetricException($e->getMessage(), $e->getCode(), $e);
- }
- }
-
- /**
- * @inheritDoc
- */
- public function set(string $name, float $value, array $labels = []): void
- {
- try {
- $this->rpc->call('metrics.Set', compact('name', 'value', 'labels'));
- } catch (RPCException $e) {
- throw new MetricException($e->getMessage(), $e->getCode(), $e);
- }
- }
-}
diff --git a/src/MetricsInterface.php b/src/MetricsInterface.php
deleted file mode 100644
index ec2009b0..00000000
--- a/src/MetricsInterface.php
+++ /dev/null
@@ -1,64 +0,0 @@
-<?php
-
-/**
- * Spiral Framework.
- *
- * @license MIT
- * @author Anton Titov (Wolfy-J)
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner;
-
-use Spiral\RoadRunner\Exception\MetricException;
-
-interface MetricsInterface
-{
- /**
- * Add collector value. Fallback to appropriate method of related collector.
- *
- * @param string $collector
- * @param float $value
- * @param mixed[] $labels
- *
- * @throws MetricException
- * @return void
- */
- public function add(string $collector, float $value, array $labels = []);
-
- /**
- * Subtract the collector value, only for gauge collector.
- *
- * @param string $collector
- * @param float $value
- * @param mixed[] $labels
- *
- * @throws MetricException
- * @return void
- */
- public function sub(string $collector, float $value, array $labels = []);
-
- /**
- * Observe collector value, only for histogram and summary collectors.
- *
- * @param string $collector
- * @param float $value
- * @param mixed[] $labels
- *
- * @throws MetricException
- * @return void
- */
- public function observe(string $collector, float $value, array $labels = []);
-
- /**
- * Set collector value, only for gauge collector.
- *
- * @param string $collector
- * @param float $value
- * @param mixed[] $labels
- *
- * @throws MetricException
- * @return void
- */
- public function set(string $collector, float $value, array $labels = []);
-}
diff --git a/src/PSR7Client.php b/src/PSR7Client.php
deleted file mode 100644
index 777dd891..00000000
--- a/src/PSR7Client.php
+++ /dev/null
@@ -1,217 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Wolfy-J
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner;
-
-use Psr\Http\Message\ResponseInterface;
-use Psr\Http\Message\ServerRequestFactoryInterface;
-use Psr\Http\Message\ServerRequestInterface;
-use Psr\Http\Message\StreamFactoryInterface;
-use Psr\Http\Message\UploadedFileFactoryInterface;
-use Psr\Http\Message\UploadedFileInterface;
-
-/**
- * Manages PSR-7 request and response.
- */
-class PSR7Client
-{
- /** @var HttpClient */
- private $httpClient;
-
- /** @var ServerRequestFactoryInterface */
- private $requestFactory;
-
- /** @var StreamFactoryInterface */
- private $streamFactory;
-
- /** @var UploadedFileFactoryInterface */
- private $uploadsFactory;
-
- /** @var mixed[] */
- private $originalServer = [];
-
- /** @var string[] Valid values for HTTP protocol version */
- private static $allowedVersions = ['1.0', '1.1', '2',];
-
- /**
- * @param Worker $worker
- * @param ServerRequestFactoryInterface|null $requestFactory
- * @param StreamFactoryInterface|null $streamFactory
- * @param UploadedFileFactoryInterface|null $uploadsFactory
- */
- public function __construct(
- Worker $worker,
- ServerRequestFactoryInterface $requestFactory = null,
- StreamFactoryInterface $streamFactory = null,
- UploadedFileFactoryInterface $uploadsFactory = null
- ) {
- $this->httpClient = new HttpClient($worker);
- $this->requestFactory = $requestFactory ?? new Diactoros\ServerRequestFactory();
- $this->streamFactory = $streamFactory ?? new Diactoros\StreamFactory();
- $this->uploadsFactory = $uploadsFactory ?? new Diactoros\UploadedFileFactory();
- $this->originalServer = $_SERVER;
- }
-
- /**
- * @return Worker
- */
- public function getWorker(): Worker
- {
- return $this->httpClient->getWorker();
- }
-
- /**
- * @return ServerRequestInterface|null
- */
- public function acceptRequest(): ?ServerRequestInterface
- {
- $rawRequest = $this->httpClient->acceptRequest();
- if ($rawRequest === null) {
- return null;
- }
-
- $_SERVER = $this->configureServer($rawRequest['ctx']);
-
- $request = $this->requestFactory->createServerRequest(
- $rawRequest['ctx']['method'],
- $rawRequest['ctx']['uri'],
- $_SERVER
- );
-
- parse_str($rawRequest['ctx']['rawQuery'], $query);
-
- $request = $request
- ->withProtocolVersion(static::fetchProtocolVersion($rawRequest['ctx']['protocol']))
- ->withCookieParams($rawRequest['ctx']['cookies'])
- ->withQueryParams($query)
- ->withUploadedFiles($this->wrapUploads($rawRequest['ctx']['uploads']));
-
- foreach ($rawRequest['ctx']['attributes'] as $name => $value) {
- $request = $request->withAttribute($name, $value);
- }
-
- foreach ($rawRequest['ctx']['headers'] as $name => $value) {
- $request = $request->withHeader($name, $value);
- }
-
- if ($rawRequest['ctx']['parsed']) {
- return $request->withParsedBody(json_decode($rawRequest['body'], true));
- }
-
- if ($rawRequest['body'] !== null) {
- return $request->withBody($this->streamFactory->createStream($rawRequest['body']));
- }
-
- return $request;
- }
-
- /**
- * Send response to the application server.
- *
- * @param ResponseInterface $response
- */
- public function respond(ResponseInterface $response): void
- {
- $this->httpClient->respond(
- $response->getStatusCode(),
- $response->getBody()->__toString(),
- $response->getHeaders()
- );
- }
-
- /**
- * Returns altered copy of _SERVER variable. Sets ip-address,
- * request-time and other values.
- *
- * @param mixed[] $ctx
- * @return mixed[]
- */
- protected function configureServer(array $ctx): array
- {
- $server = $this->originalServer;
-
- $server['REQUEST_URI'] = $ctx['uri'];
- $server['REQUEST_TIME'] = time();
- $server['REQUEST_TIME_FLOAT'] = microtime(true);
- $server['REMOTE_ADDR'] = $ctx['attributes']['ipAddress'] ?? $ctx['remoteAddr'] ?? '127.0.0.1';
- $server['REQUEST_METHOD'] = $ctx['method'];
-
- $server['HTTP_USER_AGENT'] = '';
- foreach ($ctx['headers'] as $key => $value) {
- $key = strtoupper(str_replace('-', '_', $key));
- if (\in_array($key, ['CONTENT_TYPE', 'CONTENT_LENGTH'])) {
- $server[$key] = implode(', ', $value);
- } else {
- $server['HTTP_' . $key] = implode(', ', $value);
- }
- }
-
- return $server;
- }
-
- /**
- * Wraps all uploaded files with UploadedFile.
- *
- * @param array[] $files
- *
- * @return UploadedFileInterface[]|mixed[]
- */
- private function wrapUploads($files): array
- {
- if (empty($files)) {
- return [];
- }
-
- $result = [];
- foreach ($files as $index => $f) {
- if (!isset($f['name'])) {
- $result[$index] = $this->wrapUploads($f);
- continue;
- }
-
- if (UPLOAD_ERR_OK === $f['error']) {
- $stream = $this->streamFactory->createStreamFromFile($f['tmpName']);
- } else {
- $stream = $this->streamFactory->createStream();
- }
-
- $result[$index] = $this->uploadsFactory->createUploadedFile(
- $stream,
- $f['size'],
- $f['error'],
- $f['name'],
- $f['mime']
- );
- }
-
- return $result;
- }
-
- /**
- * Normalize HTTP protocol version to valid values
- *
- * @param string $version
- * @return string
- */
- private static function fetchProtocolVersion(string $version): string
- {
- $v = substr($version, 5);
-
- if ($v === '2.0') {
- return '2';
- }
-
- // Fallback for values outside of valid protocol versions
- if (!in_array($v, static::$allowedVersions, true)) {
- return '1.1';
- }
-
- return $v;
- }
-}
diff --git a/src/Worker.php b/src/Worker.php
deleted file mode 100644
index d509562e..00000000
--- a/src/Worker.php
+++ /dev/null
@@ -1,178 +0,0 @@
-<?php
-
-/**
- * High-performance PHP process supervisor and load balancer written in Go
- *
- * @author Wolfy-J
- */
-declare(strict_types=1);
-
-namespace Spiral\RoadRunner;
-
-use Spiral\Goridge\Exceptions\GoridgeException;
-use Spiral\Goridge\RelayInterface as Relay;
-use Spiral\Goridge\SendPackageRelayInterface;
-use Spiral\RoadRunner\Exception\RoadRunnerException;
-
-/**
- * Accepts connection from RoadRunner server over given Goridge relay.
- *
- * Example:
- *
- * $worker = new Worker(new Goridge\StreamRelay(STDIN, STDOUT));
- * while ($task = $worker->receive($context)) {
- * $worker->send("DONE", json_encode($context));
- * }
- */
-class Worker
-{
- // Send as response context to request worker termination
- public const STOP = '{"stop":true}';
-
- /** @var Relay */
- private $relay;
-
- /**
- * @param Relay $relay
- */
- public function __construct(Relay $relay)
- {
- $this->relay = $relay;
- }
-
- /**
- * Receive packet of information to process, returns null when process must be stopped. Might
- * return Error to wrap error message from server.
- *
- * @param mixed $header
- * @return \Error|null|string
- *
- * @throws GoridgeException
- */
- public function receive(&$header)
- {
- $body = $this->relay->receiveSync($flags);
-
- if ($flags & Relay::PAYLOAD_CONTROL) {
- if ($this->handleControl($body, $header, $flags)) {
- // wait for the next command
- return $this->receive($header);
- }
-
- // no context for the termination.
- $header = null;
-
- // Expect process termination
- return null;
- }
-
- if ($flags & Relay::PAYLOAD_ERROR) {
- return new \Error((string)$body);
- }
-
- return $body;
- }
-
- /**
- * Respond to the server with result of task execution and execution context.
- *
- * Example:
- * $worker->respond((string)$response->getBody(), json_encode($response->getHeaders()));
- *
- * @param string|null $payload
- * @param string|null $header
- */
- public function send(string $payload = null, string $header = null): void
- {
- if (!$this->relay instanceof SendPackageRelayInterface) {
- if ($header === null) {
- $this->relay->send('', Relay::PAYLOAD_CONTROL | Relay::PAYLOAD_NONE);
- } else {
- $this->relay->send($header, Relay::PAYLOAD_CONTROL | Relay::PAYLOAD_RAW);
- }
-
- $this->relay->send((string)$payload, Relay::PAYLOAD_RAW);
- } else {
- $this->relay->sendPackage(
- (string)$header,
- Relay::PAYLOAD_CONTROL | ($header === null ? Relay::PAYLOAD_NONE : Relay::PAYLOAD_RAW),
- (string)$payload,
- Relay::PAYLOAD_RAW
- );
- }
- }
-
- /**
- * Respond to the server with an error. Error must be treated as TaskError and might not cause
- * worker destruction.
- *
- * Example:
- *
- * $worker->error("invalid payload");
- *
- * @param string $message
- */
- public function error(string $message): void
- {
- $this->relay->send(
- $message,
- Relay::PAYLOAD_CONTROL | Relay::PAYLOAD_RAW | Relay::PAYLOAD_ERROR
- );
- }
-
- /**
- * Terminate the process. Server must automatically pass task to the next available process.
- * Worker will receive StopCommand context after calling this method.
- *
- * Attention, you MUST use continue; after invoking this method to let rr to properly
- * stop worker.
- *
- * @throws GoridgeException
- */
- public function stop(): void
- {
- $this->send(null, self::STOP);
- }
-
- /**
- * Handles incoming control command payload and executes it if required.
- *
- * @param string $body
- * @param mixed $header Exported context (if any).
- * @param int $flags
- * @return bool True when continue processing.
- *
- * @throws RoadRunnerException
- */
- private function handleControl(string $body = null, &$header = null, int $flags = 0): bool
- {
- $header = $body;
- if ($body === null || $flags & Relay::PAYLOAD_RAW) {
- // empty or raw prefix
- return true;
- }
-
- $p = json_decode($body, true);
- if ($p === false) {
- throw new RoadRunnerException('invalid task context, JSON payload is expected');
- }
-
- // PID negotiation (socket connections only)
- if (!empty($p['pid'])) {
- $this->relay->send(
- sprintf('{"pid":%s}', getmypid()),
- Relay::PAYLOAD_CONTROL
- );
- }
-
- // termination request
- if (!empty($p['stop'])) {
- return false;
- }
-
- // parsed header
- $header = $p;
-
- return true;
- }
-}
diff --git a/state.go b/state.go
deleted file mode 100644
index 98451f48..00000000
--- a/state.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "sync/atomic"
-)
-
-// State represents worker status and updated time.
-type State interface {
- fmt.Stringer
-
- // Value returns state value
- Value() int64
-
- // NumJobs shows how many times worker was invoked
- NumExecs() int64
-
- // IsActive returns true if worker not Inactive or Stopped
- IsActive() bool
-}
-
-const (
- // StateInactive - no associated process
- StateInactive int64 = iota
-
- // StateReady - ready for job.
- StateReady
-
- // StateWorking - working on given payload.
- StateWorking
-
- // StateInvalid - indicates that worker is being disabled and will be removed.
- StateInvalid
-
- // StateStopping - process is being softly stopped.
- StateStopping
-
- // StateStopped - process has been terminated.
- StateStopped
-
- // StateErrored - error state (can't be used).
- StateErrored
-)
-
-type state struct {
- value int64
- numExecs int64
-}
-
-func newState(value int64) *state {
- return &state{value: value}
-}
-
-// String returns current state as string.
-func (s *state) String() string {
- switch s.Value() {
- case StateInactive:
- return "inactive"
- case StateReady:
- return "ready"
- case StateWorking:
- return "working"
- case StateInvalid:
- return "invalid"
- case StateStopped:
- return "stopped"
- case StateErrored:
- return "errored"
- }
-
- return "undefined"
-}
-
-// NumExecs returns number of registered worker execs.
-func (s *state) NumExecs() int64 {
- return atomic.LoadInt64(&s.numExecs)
-}
-
-// Value state returns state value
-func (s *state) Value() int64 {
- return atomic.LoadInt64(&s.value)
-}
-
-// IsActive returns true if worker not Inactive or Stopped
-func (s *state) IsActive() bool {
- state := s.Value()
- return state == StateWorking || state == StateReady
-}
-
-// change state value (status)
-func (s *state) set(value int64) {
- atomic.StoreInt64(&s.value, value)
-}
-
-// register new execution atomically
-func (s *state) registerExec() {
- atomic.AddInt64(&s.numExecs, 1)
-}
diff --git a/state_test.go b/state_test.go
deleted file mode 100644
index 10547a4b..00000000
--- a/state_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package roadrunner
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_NewState(t *testing.T) {
- st := newState(StateErrored)
-
- assert.Equal(t, "errored", st.String())
-
- assert.Equal(t, "inactive", newState(StateInactive).String())
- assert.Equal(t, "ready", newState(StateReady).String())
- assert.Equal(t, "working", newState(StateWorking).String())
- assert.Equal(t, "stopped", newState(StateStopped).String())
- assert.Equal(t, "undefined", newState(1000).String())
-}
-
-func Test_IsActive(t *testing.T) {
- assert.False(t, newState(StateInactive).IsActive())
- assert.True(t, newState(StateReady).IsActive())
- assert.True(t, newState(StateWorking).IsActive())
- assert.False(t, newState(StateStopped).IsActive())
- assert.False(t, newState(StateErrored).IsActive())
-}
diff --git a/static_pool.go b/static_pool.go
deleted file mode 100644
index efd9125a..00000000
--- a/static_pool.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package roadrunner
-
-import (
- "os/exec"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/pkg/errors"
-)
-
-const (
- // StopRequest can be sent by worker to indicate that restart is required.
- StopRequest = "{\"stop\":true}"
-)
-
-// StaticPool controls worker creation, destruction and task routing. Pool uses fixed amount of workers.
-type StaticPool struct {
- // pool behaviour
- cfg Config
-
- // worker command creator
- cmd func() *exec.Cmd
-
- // creates and connects to workers
- factory Factory
-
- // active task executions
- tmu sync.Mutex
- tasks sync.WaitGroup
-
- // workers circular allocation buf
- free chan *Worker
-
- // number of workers expected to be dead in a buf.
- numDead int64
-
- // protects state of worker list, does not affect allocation
- muw sync.RWMutex
-
- // all registered workers
- workers []*Worker
-
- // invalid declares set of workers to be removed from the pool.
- remove sync.Map
-
- // pool is being destroyed
- inDestroy int32
- destroy chan interface{}
-
- // lsn is optional callback to handle worker create/destruct/error events.
- mul sync.Mutex
- lsn func(event int, ctx interface{})
-}
-
-// NewPool creates new worker pool and task multiplexer. StaticPool will initiate with one worker.
-func NewPool(cmd func() *exec.Cmd, factory Factory, cfg Config) (*StaticPool, error) {
- if err := cfg.Valid(); err != nil {
- return nil, errors.Wrap(err, "config")
- }
-
- p := &StaticPool{
- cfg: cfg,
- cmd: cmd,
- factory: factory,
- workers: make([]*Worker, 0, cfg.NumWorkers),
- free: make(chan *Worker, cfg.NumWorkers),
- destroy: make(chan interface{}),
- }
-
- // constant number of workers simplify logic
- for i := int64(0); i < p.cfg.NumWorkers; i++ {
- // to test if worker ready
- w, err := p.createWorker()
- if err != nil {
- p.Destroy()
- return nil, err
- }
-
- p.free <- w
- }
-
- return p, nil
-}
-
-// Listen attaches pool event controller.
-func (p *StaticPool) Listen(l func(event int, ctx interface{})) {
- p.mul.Lock()
- defer p.mul.Unlock()
-
- p.lsn = l
-
- p.muw.Lock()
- for _, w := range p.workers {
- w.err.Listen(p.lsn)
- }
- p.muw.Unlock()
-}
-
-// Config returns associated pool configuration. Immutable.
-func (p *StaticPool) Config() Config {
- return p.cfg
-}
-
-// Workers returns worker list associated with the pool.
-func (p *StaticPool) Workers() (workers []*Worker) {
- p.muw.RLock()
- defer p.muw.RUnlock()
-
- workers = append(workers, p.workers...)
-
- return workers
-}
-
-// Remove forces pool to remove specific worker.
-func (p *StaticPool) Remove(w *Worker, err error) bool {
- if w.State().Value() != StateReady && w.State().Value() != StateWorking {
- // unable to remove inactive worker
- return false
- }
-
- if _, ok := p.remove.Load(w); ok {
- return false
- }
-
- p.remove.Store(w, err)
- return true
-}
-
-var ErrAllocateWorker = errors.New("unable to allocate worker")
-
-// Exec one task with given payload and context, returns result or error.
-func (p *StaticPool) Exec(rqs *Payload) (rsp *Payload, err error) {
- p.tmu.Lock()
- p.tasks.Add(1)
- p.tmu.Unlock()
-
- defer p.tasks.Done()
-
- w, err := p.allocateWorker()
- if err != nil {
- return nil, ErrAllocateWorker
- }
-
- rsp, err = w.Exec(rqs)
- if err != nil {
- // soft job errors are allowed
- if _, jobError := err.(JobError); jobError {
- p.release(w)
- return nil, err
- }
-
- p.discardWorker(w, err)
- return nil, err
- }
-
- // worker want's to be terminated
- if rsp.Body == nil && rsp.Context != nil && string(rsp.Context) == StopRequest {
- p.discardWorker(w, err)
- return p.Exec(rqs)
- }
-
- p.release(w)
- return rsp, nil
-}
-
-// Destroy all underlying workers (but let them to complete the task).
-func (p *StaticPool) Destroy() {
- atomic.AddInt32(&p.inDestroy, 1)
-
- p.tmu.Lock()
- p.tasks.Wait()
- close(p.destroy)
- p.tmu.Unlock()
-
- var wg sync.WaitGroup
- for _, w := range p.Workers() {
- wg.Add(1)
- w.markInvalid()
- go func(w *Worker) {
- defer wg.Done()
- p.destroyWorker(w, nil)
- }(w)
- }
-
- wg.Wait()
-}
-
-var ErrPoolStopped = errors.New("pool has been stopped")
-var ErrWorkerAllocateTimeout = errors.New("worker allocate timeout")
-var ErrAllWorkersAreDead = errors.New("all workers are dead")
-
-// finds free worker in a given time interval. Skips dead workers.
-func (p *StaticPool) allocateWorker() (w *Worker, err error) {
- // TODO loop counts upward, but its variable is bounded downward.
- for i := atomic.LoadInt64(&p.numDead); i >= 0; i++ {
- // this loop is required to skip issues with dead workers still being in a ring
- // (we know how many workers).
- select {
- case w = <-p.free:
- if w.State().Value() != StateReady {
- // found expected dead worker
- atomic.AddInt64(&p.numDead, ^int64(0))
- continue
- }
-
- if err, remove := p.remove.Load(w); remove {
- p.discardWorker(w, err)
-
- // get next worker
- i++
- continue
- }
-
- return w, nil
- case <-p.destroy:
- return nil, ErrPoolStopped
- default:
- // enable timeout handler
- }
-
- timeout := time.NewTimer(p.cfg.AllocateTimeout)
- select {
- case <-timeout.C:
- return nil, ErrWorkerAllocateTimeout
- case w = <-p.free:
- timeout.Stop()
-
- if w.State().Value() != StateReady {
- atomic.AddInt64(&p.numDead, ^int64(0))
- continue
- }
-
- if err, remove := p.remove.Load(w); remove {
- p.discardWorker(w, err)
-
- // get next worker
- i++
- continue
- }
-
- return w, nil
- case <-p.destroy:
- timeout.Stop()
-
- return nil, ErrPoolStopped
- }
- }
-
- return nil, ErrAllWorkersAreDead
-}
-
-// release releases or replaces the worker.
-func (p *StaticPool) release(w *Worker) {
- if p.cfg.MaxJobs != 0 && w.State().NumExecs() >= p.cfg.MaxJobs {
- p.discardWorker(w, p.cfg.MaxJobs)
- return
- }
-
- if err, remove := p.remove.Load(w); remove {
- p.discardWorker(w, err)
- return
- }
-
- p.free <- w
-}
-
-// creates new worker using associated factory. automatically
-// adds worker to the worker list (background)
-func (p *StaticPool) createWorker() (*Worker, error) {
- w, err := p.factory.SpawnWorker(p.cmd())
- if err != nil {
- return nil, err
- }
-
- p.mul.Lock()
- if p.lsn != nil {
- w.err.Listen(p.lsn)
- }
- p.mul.Unlock()
-
- p.throw(EventWorkerConstruct, w)
-
- p.muw.Lock()
- p.workers = append(p.workers, w)
- p.muw.Unlock()
-
- go p.watchWorker(w)
- return w, nil
-}
-
-// gentry remove worker
-func (p *StaticPool) discardWorker(w *Worker, caused interface{}) {
- w.markInvalid()
- go p.destroyWorker(w, caused)
-}
-
-// destroyWorker destroys workers and removes it from the pool.
-// TODO caused unused
-func (p *StaticPool) destroyWorker(w *Worker, caused interface{}) {
- go func() {
- err := w.Stop()
- if err != nil {
- p.throw(EventWorkerError, WorkerError{Worker: w, Caused: err})
- }
- }()
-
- select {
- case <-w.waitDone:
- // worker is dead
- p.throw(EventWorkerDestruct, w)
-
- case <-time.NewTimer(p.cfg.DestroyTimeout).C:
- // failed to stop process in given time
- if err := w.Kill(); err != nil {
- p.throw(EventWorkerError, WorkerError{Worker: w, Caused: err})
- }
-
- p.throw(EventWorkerKill, w)
- }
-}
-
-// watchWorker watches worker state and replaces it if worker fails.
-func (p *StaticPool) watchWorker(w *Worker) {
- err := w.Wait()
- p.throw(EventWorkerDead, w)
-
- // detaching
- p.muw.Lock()
- for i, wc := range p.workers {
- if wc == w {
- p.workers = append(p.workers[:i], p.workers[i+1:]...)
- p.remove.Delete(w)
- break
- }
- }
- p.muw.Unlock()
-
- // registering a dead worker
- atomic.AddInt64(&p.numDead, 1)
-
- // worker have died unexpectedly, pool should attempt to replace it with alive version safely
- if err != nil {
- p.throw(EventWorkerError, WorkerError{Worker: w, Caused: err})
- }
-
- if !p.destroyed() {
- nw, err := p.createWorker()
- if err == nil {
- p.free <- nw
- return
- }
-
- // possible situation when major error causes all PHP scripts to die (for example dead DB)
- if len(p.Workers()) == 0 {
- p.throw(EventPoolError, err)
- } else {
- p.throw(EventWorkerError, WorkerError{Worker: w, Caused: err})
- }
- }
-}
-
-func (p *StaticPool) destroyed() bool {
- return atomic.LoadInt32(&p.inDestroy) != 0
-}
-
-// throw invokes event handler if any.
-func (p *StaticPool) throw(event int, ctx interface{}) {
- p.mul.Lock()
- if p.lsn != nil {
- p.lsn(event, ctx)
- }
- p.mul.Unlock()
-}
diff --git a/static_pool_test.go b/static_pool_test.go
deleted file mode 100644
index e2181292..00000000
--- a/static_pool_test.go
+++ /dev/null
@@ -1,495 +0,0 @@
-package roadrunner
-
-import (
- "log"
- "os/exec"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-var cfg = Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
-}
-
-func Test_NewPool(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
-
- assert.Equal(t, cfg, p.Config())
-
- defer p.Destroy()
-
- assert.NotNil(t, p)
-}
-
-func Test_StaticPool_Invalid(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/invalid.php") },
- NewPipeFactory(),
- cfg,
- )
-
- assert.Nil(t, p)
- assert.Error(t, err)
-}
-
-func Test_ConfigError(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- Config{
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
-
- assert.Nil(t, p)
- assert.Error(t, err)
-}
-
-func Test_StaticPool_Echo(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
-
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- res, err := p.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_StaticPool_Echo_NilContext(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
-
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- res, err := p.Exec(&Payload{Body: []byte("hello"), Context: nil})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_StaticPool_Echo_Context(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "head", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
-
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- res, err := p.Exec(&Payload{Body: []byte("hello"), Context: []byte("world")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.Nil(t, res.Body)
- assert.NotNil(t, res.Context)
-
- assert.Equal(t, "world", string(res.Context))
-}
-
-func Test_StaticPool_JobError(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "error", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- res, err := p.Exec(&Payload{Body: []byte("hello")})
-
- assert.Error(t, err)
- assert.Nil(t, res)
-
- assert.IsType(t, JobError{}, err)
- assert.Equal(t, "hello", err.Error())
-}
-
-func Test_StaticPool_Broken_Replace(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "broken", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
- assert.NotNil(t, p)
-
- done := make(chan interface{})
-
- p.Listen(func(e int, ctx interface{}) {
- if err, ok := ctx.(error); ok {
- if strings.Contains(err.Error(), "undefined_function()") {
- close(done)
- }
- }
- })
-
- res, err := p.Exec(&Payload{Body: []byte("hello")})
-
- assert.Error(t, err)
- assert.Nil(t, res)
-
- <-done
- p.Destroy()
-}
-
-func Test_StaticPool_Broken_FromOutside(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- assert.NoError(t, err)
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- res, err := p.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
- assert.Equal(t, runtime.NumCPU(), len(p.Workers()))
-
- destructed := make(chan interface{})
- p.Listen(func(e int, ctx interface{}) {
- if e == EventWorkerConstruct {
- destructed <- nil
- }
- })
-
- // killing random worker and expecting pool to replace it
- err = p.Workers()[0].cmd.Process.Kill()
- if err != nil {
- t.Errorf("error killing the process: error %v", err)
- }
- <-destructed
-
- for _, w := range p.Workers() {
- assert.Equal(t, StateReady, w.state.Value())
- }
-}
-
-func Test_StaticPool_AllocateTimeout(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "delay", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 1,
- AllocateTimeout: time.Nanosecond * 1,
- DestroyTimeout: time.Second * 2,
- },
- )
- if err != nil {
- t.Fatal(err)
- }
-
- done := make(chan interface{})
- go func() {
- if p != nil {
- _, err := p.Exec(&Payload{Body: []byte("100")})
- assert.NoError(t, err)
- close(done)
- } else {
- panic("Pool is nil")
- }
- }()
-
- // to ensure that worker is already busy
- time.Sleep(time.Millisecond * 10)
-
- _, err = p.Exec(&Payload{Body: []byte("10")})
- if err == nil {
- t.Fatal("Test_StaticPool_AllocateTimeout exec should raise error")
- }
- assert.Contains(t, err.Error(), "unable to allocate worker")
-
- <-done
- p.Destroy()
-}
-
-func Test_StaticPool_Replace_Worker(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "pid", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 1,
- MaxJobs: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
- assert.NoError(t, err)
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- var lastPID string
- lastPID = strconv.Itoa(*p.Workers()[0].Pid)
-
- res, _ := p.Exec(&Payload{Body: []byte("hello")})
- assert.Equal(t, lastPID, string(res.Body))
-
- for i := 0; i < 10; i++ {
- res, err := p.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.NotEqual(t, lastPID, string(res.Body))
- lastPID = string(res.Body)
- }
-}
-
-// identical to replace but controlled on worker side
-func Test_StaticPool_Stop_Worker(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "stop", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
- assert.NoError(t, err)
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- var lastPID string
- lastPID = strconv.Itoa(*p.Workers()[0].Pid)
-
- res, _ := p.Exec(&Payload{Body: []byte("hello")})
- assert.Equal(t, lastPID, string(res.Body))
-
- for i := 0; i < 10; i++ {
- res, err := p.Exec(&Payload{Body: []byte("hello")})
-
- assert.NoError(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.NotEqual(t, lastPID, string(res.Body))
- lastPID = string(res.Body)
- }
-}
-
-// identical to replace but controlled on worker side
-func Test_Static_Pool_Destroy_And_Close(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "delay", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
-
- assert.NotNil(t, p)
- assert.NoError(t, err)
-
- p.Destroy()
- _, err = p.Exec(&Payload{Body: []byte("100")})
- assert.Error(t, err)
-}
-
-// identical to replace but controlled on worker side
-func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "delay", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
-
- assert.NotNil(t, p)
- assert.NoError(t, err)
-
- go func() {
- _, err := p.Exec(&Payload{Body: []byte("100")})
- if err != nil {
- t.Errorf("error executing payload: error %v", err)
- }
-
- }()
- time.Sleep(time.Millisecond * 10)
-
- p.Destroy()
- _, err = p.Exec(&Payload{Body: []byte("100")})
- assert.Error(t, err)
-}
-
-// identical to replace but controlled on worker side
-func Test_Static_Pool_Handle_Dead(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 5,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
- assert.NoError(t, err)
- defer p.Destroy()
-
- assert.NotNil(t, p)
-
- for _, w := range p.workers {
- w.state.value = StateErrored
- }
-
- _, err = p.Exec(&Payload{Body: []byte("hello")})
- assert.Error(t, err)
-}
-
-// identical to replace but controlled on worker side
-func Test_Static_Pool_Slow_Destroy(t *testing.T) {
- p, err := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/slow-destroy.php", "echo", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 5,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
-
- assert.NoError(t, err)
- assert.NotNil(t, p)
-
- p.Destroy()
-}
-
-func Benchmark_Pool_Allocate(b *testing.B) {
- p, _ := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- defer p.Destroy()
-
- for n := 0; n < b.N; n++ {
- w, err := p.allocateWorker()
- if err != nil {
- b.Fail()
- log.Println(err)
- }
-
- p.free <- w
- }
-}
-
-func Benchmark_Pool_Echo(b *testing.B) {
- p, _ := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- cfg,
- )
- defer p.Destroy()
-
- for n := 0; n < b.N; n++ {
- if _, err := p.Exec(&Payload{Body: []byte("hello")}); err != nil {
- b.Fail()
- }
- }
-}
-
-func Benchmark_Pool_Echo_Batched(b *testing.B) {
- p, _ := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second * 100,
- DestroyTimeout: time.Second,
- },
- )
- defer p.Destroy()
-
- var wg sync.WaitGroup
- for i := 0; i < b.N; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- if _, err := p.Exec(&Payload{Body: []byte("hello")}); err != nil {
- b.Fail()
- log.Println(err)
- }
- }()
- }
-
- wg.Wait()
-}
-
-func Benchmark_Pool_Echo_Replaced(b *testing.B) {
- p, _ := NewPool(
- func() *exec.Cmd { return exec.Command("php", "tests/client.php", "echo", "pipes") },
- NewPipeFactory(),
- Config{
- NumWorkers: 1,
- MaxJobs: 1,
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- )
- defer p.Destroy()
-
- for n := 0; n < b.N; n++ {
- if _, err := p.Exec(&Payload{Body: []byte("hello")}); err != nil {
- b.Fail()
- log.Println(err)
- }
- }
-}
diff --git a/systemd/rr.service b/systemd/rr.service
index 6e4d5914..6e4d5914 100644..100755
--- a/systemd/rr.service
+++ b/systemd/rr.service
diff --git a/tests/broken.php b/tests/broken.php
index 42b4e7c2..1f869b2d 100644
--- a/tests/broken.php
+++ b/tests/broken.php
@@ -8,7 +8,7 @@ use Spiral\RoadRunner;
$rr = new RoadRunner\Worker($relay);
-while ($in = $rr->receive($ctx)) {
+while ($in = $rr->waitPayload()) {
echo undefined_function();
- $rr->send((string)$in);
+ $rr->send((string)$in->body, null);
}
diff --git a/tests/client.php b/tests/client.php
index 835b1c6c..c00cece1 100644
--- a/tests/client.php
+++ b/tests/client.php
@@ -3,7 +3,7 @@
use Spiral\Goridge;
ini_set('display_errors', 'stderr');
-require dirname(__DIR__) . "/vendor_php/autoload.php";
+require __DIR__ . "/vendor/autoload.php";
if (count($argv) < 3) {
die("need 2 arguments");
diff --git a/tests/composer.json b/tests/composer.json
new file mode 100644
index 00000000..d4f32be5
--- /dev/null
+++ b/tests/composer.json
@@ -0,0 +1,13 @@
+{
+ "minimum-stability": "beta",
+ "require": {
+ "nyholm/psr7": "^1.3",
+ "spiral/roadrunner": "^2.0",
+ "spiral/roadrunner-http": "^2.0"
+ },
+ "autoload": {
+ "psr-4": {
+ "Spiral\\RoadRunner\\": "src/"
+ }
+ }
+}
diff --git a/tests/delay.php b/tests/delay.php
index bf9ecc12..f0435b05 100644
--- a/tests/delay.php
+++ b/tests/delay.php
@@ -8,9 +8,9 @@ use Spiral\RoadRunner;
$rr = new RoadRunner\Worker($relay);
-while ($in = $rr->receive($ctx)) {
+while ($in = $rr->waitPayload()) {
try {
- usleep($in * 1000);
+ usleep($in->body * 1000);
$rr->send('');
} catch (\Throwable $e) {
$rr->error((string)$e);
diff --git a/tests/docker-compose.yaml b/tests/docker-compose.yaml
new file mode 100644
index 00000000..fd1a48bf
--- /dev/null
+++ b/tests/docker-compose.yaml
@@ -0,0 +1,7 @@
+version: '3'
+
+services:
+ memcached:
+ image: memcached:latest
+ ports:
+ - "0.0.0.0:11211:11211" \ No newline at end of file
diff --git a/tests/echo.php b/tests/echo.php
index 1570e3df..83eec92e 100644
--- a/tests/echo.php
+++ b/tests/echo.php
@@ -8,9 +8,9 @@ use Spiral\RoadRunner;
$rr = new RoadRunner\Worker($relay);
-while ($in = $rr->receive($ctx)) {
+while ($in = $rr->waitPayload()) {
try {
- $rr->send((string)$in);
+ $rr->send((string)$in->body);
} catch (\Throwable $e) {
$rr->error((string)$e);
}
diff --git a/tests/error.php b/tests/error.php
index 8e1c8d0d..c77e6817 100644
--- a/tests/error.php
+++ b/tests/error.php
@@ -8,6 +8,6 @@ use Spiral\RoadRunner;
$rr = new RoadRunner\Worker($relay);
-while ($in = $rr->receive($ctx)) {
- $rr->error((string)$in);
+while ($in = $rr->waitPayload()) {
+ $rr->error((string)$in->body);
}
diff --git a/tests/head.php b/tests/head.php
index 88ebd3f2..3c57258f 100644
--- a/tests/head.php
+++ b/tests/head.php
@@ -8,9 +8,9 @@ use Spiral\RoadRunner;
$rr = new RoadRunner\Worker($relay);
-while ($in = $rr->receive($ctx)) {
+while ($in = $rr->waitPayload()) {
try {
- $rr->send("", (string)$ctx);
+ $rr->send("", (string)$in->header);
} catch (\Throwable $e) {
$rr->error((string)$e);
}
diff --git a/tests/http/client.php b/tests/http/client.php
index 9f21b273..ad5cce24 100644
--- a/tests/http/client.php
+++ b/tests/http/client.php
@@ -4,7 +4,7 @@ use Spiral\Goridge;
use Spiral\RoadRunner;
ini_set('display_errors', 'stderr');
-require dirname(__DIR__) . "/../vendor_php/autoload.php";
+require dirname(__DIR__) . "/vendor/autoload.php";
if (count($argv) < 3) {
die("need 2 arguments");
@@ -33,12 +33,18 @@ switch ($goridge) {
die("invalid protocol selection");
}
-$psr7 = new RoadRunner\PSR7Client(new RoadRunner\Worker($relay));
+$psr7 = new RoadRunner\Http\PSR7Worker(
+ new RoadRunner\Worker($relay),
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory()
+);
+
require_once sprintf("%s/%s.php", __DIR__, $test);
-while ($req = $psr7->acceptRequest()) {
+while ($req = $psr7->waitRequest()) {
try {
- $psr7->respond(handleRequest($req, new \Zend\Diactoros\Response()));
+ $psr7->respond(handleRequest($req, new \Nyholm\Psr7\Response()));
} catch (\Throwable $e) {
$psr7->getWorker()->error((string)$e);
}
diff --git a/tests/http/slow-client.php b/tests/http/slow-client.php
index 4d3963d7..731232f7 100644
--- a/tests/http/slow-client.php
+++ b/tests/http/slow-client.php
@@ -4,13 +4,13 @@ use Spiral\Goridge;
use Spiral\RoadRunner;
ini_set('display_errors', 'stderr');
-require dirname(__DIR__) . "/../vendor_php/autoload.php";
+require dirname(__DIR__) . "/vendor/autoload.php";
if (count($argv) < 3) {
die("need 2 arguments");
}
-list($test, $goridge, $bootDelay) = [$argv[1], $argv[2], $argv[3]];
+[$test, $goridge, $bootDelay] = [$argv[1], $argv[2], $argv[3]];
usleep($bootDelay * 1000);
switch ($goridge) {
@@ -34,13 +34,19 @@ switch ($goridge) {
die("invalid protocol selection");
}
-$psr7 = new RoadRunner\PSR7Client(new RoadRunner\Worker($relay));
+$psr7 = new RoadRunner\Http\PSR7Worker(
+ new RoadRunner\Worker($relay),
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory()
+);
+
require_once sprintf("%s/%s.php", __DIR__, $test);
-while ($req = $psr7->acceptRequest()) {
+while ($req = $psr7->waitRequest()) {
try {
- $psr7->respond(handleRequest($req, new \Zend\Diactoros\Response()));
+ $psr7->respond(handleRequest($req, new \Nyholm\Psr7\Response()));
} catch (\Throwable $e) {
- $psr7->getWorker()->error((string)$e);
+ $psr7->getWorker()->error((string) $e);
}
}
diff --git a/tests/http/upload.php b/tests/http/upload.php
index bb4af766..57526246 100644
--- a/tests/http/upload.php
+++ b/tests/http/upload.php
@@ -24,7 +24,7 @@ function handleRequest(ServerRequestInterface $req, ResponseInterface $resp): Re
'size' => $v->getSize(),
'mime' => $v->getClientMediaType(),
'error' => $v->getError(),
- 'md5' => md5($v->getStream()->__toString()),
+ 'sha512' => hash('sha512', $v->getStream()->__toString()),
];
}
});
diff --git a/tests/memleak.php b/tests/memleak.php
new file mode 100644
index 00000000..169fe4f5
--- /dev/null
+++ b/tests/memleak.php
@@ -0,0 +1,15 @@
+<?php
+
+declare(strict_types=1);
+
+use Spiral\Goridge\StreamRelay;
+use Spiral\RoadRunner\Worker as RoadRunner;
+
+require __DIR__ . "/vendor/autoload.php";
+
+$rr = new RoadRunner(new StreamRelay(\STDIN, \STDOUT));
+$mem = '';
+while($rr->waitPayload()){
+ $mem .= str_repeat("a", 1024*1024);
+ $rr->send("");
+}
diff --git a/tests/mocks/mock_log.go b/tests/mocks/mock_log.go
new file mode 100644
index 00000000..e9631805
--- /dev/null
+++ b/tests/mocks/mock_log.go
@@ -0,0 +1,150 @@
+package mocks
+
+import (
+ "reflect"
+
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+// MockLogger is a mock of Logger interface.
+type MockLogger struct {
+ ctrl *gomock.Controller
+ recorder *MockLoggerMockRecorder
+}
+
+// MockLoggerMockRecorder is the mock recorder for MockLogger.
+type MockLoggerMockRecorder struct {
+ mock *MockLogger
+}
+
+// NewMockLogger creates a new mock instance.
+func NewMockLogger(ctrl *gomock.Controller) *MockLogger {
+ mock := &MockLogger{ctrl: ctrl}
+ mock.recorder = &MockLoggerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockLogger) EXPECT() *MockLoggerMockRecorder {
+ return m.recorder
+}
+
+func (m *MockLogger) Init() error {
+ mock := &MockLogger{ctrl: m.ctrl}
+ mock.recorder = &MockLoggerMockRecorder{mock}
+ return nil
+}
+
+// Debug mocks base method.
+func (m *MockLogger) Debug(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Debug", varargs...)
+}
+
+// Warn mocks base method.
+func (m *MockLogger) Warn(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Warn", varargs...)
+}
+
+// Info mocks base method.
+func (m *MockLogger) Info(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Info", varargs...)
+}
+
+// Error mocks base method.
+func (m *MockLogger) Error(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Error", varargs...)
+}
+
+// Warn indicates an expected call of Warn.
+func (mr *MockLoggerMockRecorder) Warn(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warn", reflect.TypeOf((*MockLogger)(nil).Warn), varargs...)
+}
+
+// Debug indicates an expected call of Debug.
+func (mr *MockLoggerMockRecorder) Debug(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debug", reflect.TypeOf((*MockLogger)(nil).Debug), varargs...)
+}
+
+// Error indicates an expected call of Error.
+func (mr *MockLoggerMockRecorder) Error(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...)
+}
+
+func (mr *MockLoggerMockRecorder) Init() error {
+ return nil
+}
+
+// Info indicates an expected call of Info.
+func (mr *MockLoggerMockRecorder) Info(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...)
+}
+
+// MockWithLogger is a mock of WithLogger interface.
+type MockWithLogger struct {
+ ctrl *gomock.Controller
+ recorder *MockWithLoggerMockRecorder
+}
+
+// MockWithLoggerMockRecorder is the mock recorder for MockWithLogger.
+type MockWithLoggerMockRecorder struct {
+ mock *MockWithLogger
+}
+
+// NewMockWithLogger creates a new mock instance.
+func NewMockWithLogger(ctrl *gomock.Controller) *MockWithLogger {
+ mock := &MockWithLogger{ctrl: ctrl}
+ mock.recorder = &MockWithLoggerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockWithLogger) EXPECT() *MockWithLoggerMockRecorder {
+ return m.recorder
+}
+
+// With mocks base method.
+func (m *MockWithLogger) With(keyvals ...interface{}) logger.Logger {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "With", varargs...)
+ ret0, _ := ret[0].(logger.Logger)
+ return ret0
+}
+
+// With indicates an expected call of With.
+func (mr *MockWithLoggerMockRecorder) With(keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "With", reflect.TypeOf((*MockWithLogger)(nil).With), keyvals...)
+}
diff --git a/tests/pid.php b/tests/pid.php
index bf10a025..f8b2515d 100644
--- a/tests/pid.php
+++ b/tests/pid.php
@@ -8,7 +8,7 @@
$rr = new RoadRunner\Worker($relay);
- while ($in = $rr->receive($ctx)) {
+ while ($in = $rr->waitPayload()) {
try {
$rr->send((string)getmypid());
} catch (\Throwable $e) {
diff --git a/tests/plugins/checker/configs/.rr-checker-init.yaml b/tests/plugins/checker/configs/.rr-checker-init.yaml
new file mode 100755
index 00000000..1273529a
--- /dev/null
+++ b/tests/plugins/checker/configs/.rr-checker-init.yaml
@@ -0,0 +1,31 @@
+rpc:
+ listen: tcp://127.0.0.1:6005
+ disabled: false
+
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+status:
+ address: "127.0.0.1:34333"
+logs:
+ mode: development
+ level: debug
+http:
+ debug: true
+ address: 127.0.0.1:11933
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s \ No newline at end of file
diff --git a/tests/plugins/checker/plugin_test.go b/tests/plugins/checker/plugin_test.go
new file mode 100644
index 00000000..c346d91a
--- /dev/null
+++ b/tests/plugins/checker/plugin_test.go
@@ -0,0 +1,190 @@
+package checker
+
+import (
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/checker"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStatusHttp(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-checker-init.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &checker.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("CheckerGetStatus", checkHTTPStatus)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+const resp = `Service: http: Status: 200
+Service: rpc not found`
+
+func checkHTTPStatus(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://127.0.0.1:34333/v1/health?plugin=http&plugin=rpc", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+ assert.Equal(t, 200, r.StatusCode)
+ assert.Equal(t, resp, string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestStatusRPC(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-checker-init.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &checker.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("CheckerGetStatusRpc", checkRPCStatus)
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func checkRPCStatus(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6005")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+
+ st := &checker.Status{}
+
+ err = client.Call("status.Status", "http", &st)
+ assert.NoError(t, err)
+ assert.Equal(t, st.Code, 200)
+}
diff --git a/tests/plugins/config/.rr.yaml b/tests/plugins/config/.rr.yaml
new file mode 100755
index 00000000..bad2846a
--- /dev/null
+++ b/tests/plugins/config/.rr.yaml
@@ -0,0 +1,21 @@
+rpc:
+ listen: tcp://localhost:6060
+
+reload:
+ enabled: true
+ interval: 1s
+ patterns: [".php"]
+ services:
+ http:
+ recursive: true
+ ignore: ["vendor"]
+ patterns: [".php", ".go",".md",]
+ dirs: ["."]
+ jobs:
+ recursive: false
+ ignore: ["service/metrics"]
+ dirs: ["./jobs"]
+ rpc:
+ recursive: true
+ patterns: [".json"]
+ dirs: [""]
diff --git a/tests/plugins/config/config_test.go b/tests/plugins/config/config_test.go
new file mode 100755
index 00000000..6d95ba70
--- /dev/null
+++ b/tests/plugins/config/config_test.go
@@ -0,0 +1,64 @@
+package config
+
+import (
+ "os"
+ "os/signal"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestViperProvider_Init(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ vp := &config.Viper{}
+ vp.Path = ".rr.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ errCh, err := container.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // stop by CTRL+C
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+
+ tt := time.NewTicker(time.Second * 2)
+ defer tt.Stop()
+
+ for {
+ select {
+ case e := <-errCh:
+ assert.NoError(t, e.Error)
+ assert.NoError(t, container.Stop())
+ return
+ case <-c:
+ er := container.Stop()
+ assert.NoError(t, er)
+ return
+ case <-tt.C:
+ assert.NoError(t, container.Stop())
+ return
+ }
+ }
+}
diff --git a/tests/plugins/config/plugin1.go b/tests/plugins/config/plugin1.go
new file mode 100755
index 00000000..a6c06aec
--- /dev/null
+++ b/tests/plugins/config/plugin1.go
@@ -0,0 +1,96 @@
+package config
+
+import (
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+type AllConfig struct {
+ RPC struct {
+ Listen string `yaml:"listen"`
+ } `yaml:"rpc"`
+ Reload struct {
+ Enabled bool `yaml:"enabled"`
+ Interval string `yaml:"interval"`
+ Patterns []string `yaml:"patterns"`
+ Services struct {
+ HTTP struct {
+ Recursive bool `yaml:"recursive"`
+ Ignore []string `yaml:"ignore"`
+ Patterns []string `yaml:"patterns"`
+ Dirs []string `yaml:"dirs"`
+ } `yaml:"http"`
+ Jobs struct {
+ Recursive bool `yaml:"recursive"`
+ Ignore []string `yaml:"ignore"`
+ Dirs []string `yaml:"dirs"`
+ } `yaml:"jobs"`
+ RPC struct {
+ Recursive bool `yaml:"recursive"`
+ Patterns []string `yaml:"patterns"`
+ Dirs []string `yaml:"dirs"`
+ } `yaml:"rpc"`
+ } `yaml:"services"`
+ } `yaml:"reload"`
+}
+
+// ReloadConfig is a Reload configuration point.
+type ReloadConfig struct {
+ Interval time.Duration
+ Patterns []string
+ Services map[string]ServiceConfig
+}
+
+type ServiceConfig struct {
+ Enabled bool
+ Recursive bool
+ Patterns []string
+ Dirs []string
+ Ignore []string
+}
+
+type Foo struct {
+ configProvider config.Configurer
+}
+
+// Depends on S2 and DB (S3 in the current case)
+func (f *Foo) Init(p config.Configurer) error {
+ f.configProvider = p
+ return nil
+}
+
+func (f *Foo) Serve() chan error {
+ const op = errors.Op("foo serve")
+ errCh := make(chan error, 1)
+
+ r := &ReloadConfig{}
+ err := f.configProvider.UnmarshalKey("reload", r)
+ if err != nil {
+ errCh <- err
+ }
+
+ if len(r.Patterns) == 0 {
+ errCh <- errors.E(op, errors.Str("should be at least one pattern, but got 0"))
+ return errCh
+ }
+
+ var allCfg AllConfig
+ err = f.configProvider.Unmarshal(&allCfg)
+ if err != nil {
+ errCh <- errors.E(op, errors.Str("should be at least one pattern, but got 0"))
+ return errCh
+ }
+
+ if allCfg.RPC.Listen != "tcp://localhost:6060" {
+ errCh <- errors.E(op, errors.Str("RPC.Listen should be parsed"))
+ return errCh
+ }
+
+ return errCh
+}
+
+func (f *Foo) Stop() error {
+ return nil
+}
diff --git a/tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml b/tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml
new file mode 100644
index 00000000..a2d12706
--- /dev/null
+++ b/tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml
@@ -0,0 +1,25 @@
+server:
+ command: "php ../../psr-worker.php"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:18103
+ maxRequestSize: 1024
+ middleware: [ "gzip", "foo" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/gzip/configs/.rr-http-withGzip.yaml b/tests/plugins/gzip/configs/.rr-http-withGzip.yaml
new file mode 100644
index 00000000..aff3efdb
--- /dev/null
+++ b/tests/plugins/gzip/configs/.rr-http-withGzip.yaml
@@ -0,0 +1,25 @@
+server:
+ command: "php ../../psr-worker.php"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:18953
+ maxRequestSize: 1024
+ middleware: [ "gzip" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/gzip/plugin_test.go b/tests/plugins/gzip/plugin_test.go
new file mode 100644
index 00000000..b09d430e
--- /dev/null
+++ b/tests/plugins/gzip/plugin_test.go
@@ -0,0 +1,176 @@
+package gzip
+
+import (
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/gzip"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/spiral/roadrunner/v2/tests/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGzipPlugin(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http-withGzip.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &gzip.Gzip{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ t.Run("GzipCheckHeader", headerCheck)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func headerCheck(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:18953", nil)
+ assert.NoError(t, err)
+ client := &http.Client{
+ Transport: &http.Transport{
+ DisableCompression: false,
+ },
+ }
+
+ r, err := client.Do(req)
+ assert.NoError(t, err)
+ assert.True(t, r.Uncompressed)
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestMiddlewareNotExist(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http-middlewareNotExist.yaml",
+ Prefix: "rr",
+ }
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ mockLogger.EXPECT().Warn("requested middleware does not exist", "requested", "foo").AnyTimes()
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &gzip.Gzip{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
diff --git a/tests/plugins/headers/configs/.rr-cors-headers.yaml b/tests/plugins/headers/configs/.rr-cors-headers.yaml
new file mode 100644
index 00000000..9d4e8b36
--- /dev/null
+++ b/tests/plugins/headers/configs/.rr-cors-headers.yaml
@@ -0,0 +1,39 @@
+server:
+ command: "php ../../http/client.php headers pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:22855
+ maxRequestSize: 1024
+ middleware: [ "headers" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ # Additional HTTP headers and CORS control.
+ headers:
+ cors:
+ allowedOrigin: "*"
+ allowedHeaders: "*"
+ allowedMethods: "GET,POST,PUT,DELETE"
+ allowCredentials: true
+ exposedHeaders: "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma"
+ maxAge: 600
+ request:
+ "input": "custom-header"
+ response:
+ "output": "output-header"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/headers/configs/.rr-headers-init.yaml b/tests/plugins/headers/configs/.rr-headers-init.yaml
new file mode 100644
index 00000000..8d63a187
--- /dev/null
+++ b/tests/plugins/headers/configs/.rr-headers-init.yaml
@@ -0,0 +1,39 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:33453
+ maxRequestSize: 1024
+ middleware: [ "headers" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ # Additional HTTP headers and CORS control.
+ headers:
+ cors:
+ allowedOrigin: "*"
+ allowedHeaders: "*"
+ allowedMethods: "GET,POST,PUT,DELETE"
+ allowCredentials: true
+ exposedHeaders: "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma"
+ maxAge: 600
+ request:
+ "Example-Request-Header": "Value"
+ response:
+ "X-Powered-By": "RoadRunner"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/headers/configs/.rr-req-headers.yaml b/tests/plugins/headers/configs/.rr-req-headers.yaml
new file mode 100644
index 00000000..f8ab9bec
--- /dev/null
+++ b/tests/plugins/headers/configs/.rr-req-headers.yaml
@@ -0,0 +1,32 @@
+server:
+ command: "php ../../http/client.php header pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:22655
+ maxRequestSize: 1024
+ middleware: [ "headers" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ # Additional HTTP headers and CORS control.
+ headers:
+ request:
+ "input": "custom-header"
+ response:
+ "output": "output-header"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/headers/configs/.rr-res-headers.yaml b/tests/plugins/headers/configs/.rr-res-headers.yaml
new file mode 100644
index 00000000..36ab4eb3
--- /dev/null
+++ b/tests/plugins/headers/configs/.rr-res-headers.yaml
@@ -0,0 +1,32 @@
+server:
+ command: "php ../../http/client.php header pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:22455
+ maxRequestSize: 1024
+ middleware: [ "headers" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ # Additional HTTP headers and CORS control.
+ headers:
+ request:
+ "input": "custom-header"
+ response:
+ "output": "output-header"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/headers/headers_plugin_test.go b/tests/plugins/headers/headers_plugin_test.go
new file mode 100644
index 00000000..a2ad3357
--- /dev/null
+++ b/tests/plugins/headers/headers_plugin_test.go
@@ -0,0 +1,367 @@
+package headers
+
+import (
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/headers"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHeadersInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-headers-init.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &headers.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func TestRequestHeaders(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-req-headers.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &headers.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("RequestHeaders", reqHeaders)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func reqHeaders(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:22655?hello=value", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 200, r.StatusCode)
+ assert.Equal(t, "CUSTOM-HEADER", string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestResponseHeaders(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-res-headers.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &headers.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("ResponseHeaders", resHeaders)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func resHeaders(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:22455?hello=value", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ assert.Equal(t, "output-header", r.Header.Get("output"))
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+ assert.Equal(t, 200, r.StatusCode)
+ assert.Equal(t, "CUSTOM-HEADER", string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestCORSHeaders(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-cors-headers.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &headers.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("CORSHeaders", corsHeaders)
+ t.Run("CORSHeadersPass", corsHeadersPass)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func corsHeadersPass(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:22855", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
+ assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Headers"))
+ assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Origin"))
+ assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
+
+ _, err = ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+ assert.Equal(t, 200, r.StatusCode)
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func corsHeaders(t *testing.T) {
+ req, err := http.NewRequest("OPTIONS", "http://localhost:22855", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
+ assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Headers"))
+ assert.Equal(t, "GET,POST,PUT,DELETE", r.Header.Get("Access-Control-Allow-Methods"))
+ assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Origin"))
+ assert.Equal(t, "600", r.Header.Get("Access-Control-Max-Age"))
+ assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
+
+ _, err = ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+ assert.Equal(t, 200, r.StatusCode)
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
diff --git a/service/http/attributes/attributes_test.go b/tests/plugins/http/attributes_test.go
index d914f6fa..69200a30 100644
--- a/service/http/attributes/attributes_test.go
+++ b/tests/plugins/http/attributes_test.go
@@ -1,80 +1,78 @@
-package attributes
+package http
import (
"net/http"
"testing"
+ "github.com/spiral/roadrunner/v2/plugins/http/attributes"
"github.com/stretchr/testify/assert"
)
func TestAllAttributes(t *testing.T) {
r := &http.Request{}
- r = Init(r)
+ r = attributes.Init(r)
- err := Set(r, "key", "value")
+ err := attributes.Set(r, "key", "value")
if err != nil {
t.Errorf("error during the Set: error %v", err)
}
- assert.Equal(t, All(r), map[string]interface{}{
+ assert.Equal(t, attributes.All(r), map[string]interface{}{
"key": "value",
})
}
func TestAllAttributesNone(t *testing.T) {
r := &http.Request{}
- r = Init(r)
+ r = attributes.Init(r)
- assert.Equal(t, All(r), map[string]interface{}{})
+ assert.Equal(t, attributes.All(r), map[string]interface{}{})
}
func TestAllAttributesNone2(t *testing.T) {
r := &http.Request{}
- assert.Equal(t, All(r), map[string]interface{}{})
+ assert.Equal(t, attributes.All(r), map[string]interface{}{})
}
func TestGetAttribute(t *testing.T) {
r := &http.Request{}
- r = Init(r)
+ r = attributes.Init(r)
- err := Set(r, "key", "value")
+ err := attributes.Set(r, "key", "value")
if err != nil {
t.Errorf("error during the Set: error %v", err)
}
- assert.Equal(t, Get(r, "key"), "value")
+ assert.Equal(t, attributes.Get(r, "key"), "value")
}
func TestGetAttributeNone(t *testing.T) {
r := &http.Request{}
- r = Init(r)
+ r = attributes.Init(r)
- assert.Equal(t, Get(r, "key"), nil)
+ assert.Equal(t, attributes.Get(r, "key"), nil)
}
func TestGetAttributeNone2(t *testing.T) {
r := &http.Request{}
- assert.Equal(t, Get(r, "key"), nil)
+ assert.Equal(t, attributes.Get(r, "key"), nil)
}
func TestSetAttribute(t *testing.T) {
r := &http.Request{}
- r = Init(r)
+ r = attributes.Init(r)
- err := Set(r, "key", "value")
+ err := attributes.Set(r, "key", "value")
if err != nil {
t.Errorf("error during the Set: error %v", err)
}
- assert.Equal(t, Get(r, "key"), "value")
+ assert.Equal(t, attributes.Get(r, "key"), "value")
}
func TestSetAttributeNone(t *testing.T) {
r := &http.Request{}
-
- err := Set(r, "key", "value")
- if err != nil {
- t.Errorf("error during the Set: error %v", err)
- }
- assert.Equal(t, Get(r, "key"), nil)
+ err := attributes.Set(r, "key", "value")
+ assert.Error(t, err)
+ assert.Equal(t, attributes.Get(r, "key"), nil)
}
diff --git a/tests/plugins/http/configs/.rr-broken-pipes.yaml b/tests/plugins/http/configs/.rr-broken-pipes.yaml
new file mode 100644
index 00000000..8006cb5f
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-broken-pipes.yaml
@@ -0,0 +1,31 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../http/client.php broken pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:12384
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
+
diff --git a/tests/plugins/http/configs/.rr-env.yaml b/tests/plugins/http/configs/.rr-env.yaml
new file mode 100644
index 00000000..1cce5dab
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-env.yaml
@@ -0,0 +1,33 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../http/client.php env pipes"
+ user: ""
+ group: ""
+ env:
+ "env_key": "ENV_VALUE"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:12084
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ env:
+ "RR_HTTP": "true"
+ "env_key": "ENV_VALUE"
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/http/configs/.rr-fcgi-reqUri.yaml b/tests/plugins/http/configs/.rr-fcgi-reqUri.yaml
new file mode 100644
index 00000000..725ae724
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-fcgi-reqUri.yaml
@@ -0,0 +1,38 @@
+server:
+ command: "php ../../http/client.php request-uri pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: :8082
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 1
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: 8890
+ redirect: false
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+ # rootCa: root.crt
+ fcgi:
+ address: tcp://127.0.0.1:6921
+ http2:
+ enabled: false
+ h2c: false
+ maxConcurrentStreams: 128
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-fcgi.yaml b/tests/plugins/http/configs/.rr-fcgi.yaml
new file mode 100644
index 00000000..ba119a88
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-fcgi.yaml
@@ -0,0 +1,38 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: :8081
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 1
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: 8889
+ redirect: false
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+ # rootCa: root.crt
+ fcgi:
+ address: tcp://0.0.0.0:6920
+ http2:
+ enabled: false
+ h2c: false
+ maxConcurrentStreams: 128
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-h2c.yaml b/tests/plugins/http/configs/.rr-h2c.yaml
new file mode 100644
index 00000000..287b7929
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-h2c.yaml
@@ -0,0 +1,29 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: :8083
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 1
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+ http2:
+ enabled: true
+ h2c: true
+ maxConcurrentStreams: 128
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-http.yaml b/tests/plugins/http/configs/.rr-http.yaml
new file mode 100644
index 00000000..93f131f8
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-http.yaml
@@ -0,0 +1,31 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:18903
+ maxRequestSize: 1024
+ middleware: [ "pluginMiddleware", "pluginMiddleware2" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
+
diff --git a/tests/plugins/http/configs/.rr-init.yaml b/tests/plugins/http/configs/.rr-init.yaml
new file mode 100644
index 00000000..79303eab
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-init.yaml
@@ -0,0 +1,43 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:15395
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: 8892
+ redirect: false
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+ # rootCa: root.crt
+ fcgi:
+ address: tcp://0.0.0.0:7921
+ http2:
+ enabled: false
+ h2c: false
+ maxConcurrentStreams: 128
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/http/configs/.rr-resetter.yaml b/tests/plugins/http/configs/.rr-resetter.yaml
new file mode 100644
index 00000000..e2edafc6
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-resetter.yaml
@@ -0,0 +1,30 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:10084
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+
diff --git a/tests/plugins/http/configs/.rr-ssl-push.yaml b/tests/plugins/http/configs/.rr-ssl-push.yaml
new file mode 100644
index 00000000..81699a21
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-ssl-push.yaml
@@ -0,0 +1,31 @@
+server:
+ command: "php ../../http/client.php push pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: :8086
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 1
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: 8894
+ redirect: true
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-ssl-redirect.yaml b/tests/plugins/http/configs/.rr-ssl-redirect.yaml
new file mode 100644
index 00000000..fe6c5a86
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-ssl-redirect.yaml
@@ -0,0 +1,31 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: :8087
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 1
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: 8895
+ redirect: true
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-ssl.yaml b/tests/plugins/http/configs/.rr-ssl.yaml
new file mode 100644
index 00000000..3255383a
--- /dev/null
+++ b/tests/plugins/http/configs/.rr-ssl.yaml
@@ -0,0 +1,38 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: :8085
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 1
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: 8893
+ redirect: false
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+ # rootCa: root.crt
+ fcgi:
+ address: tcp://0.0.0.0:16920
+ http2:
+ enabled: false
+ h2c: false
+ maxConcurrentStreams: 128
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/service/http/fixtures/server.crt b/tests/plugins/http/fixtures/server.crt
index 24d67fd7..24d67fd7 100644
--- a/service/http/fixtures/server.crt
+++ b/tests/plugins/http/fixtures/server.crt
diff --git a/service/http/fixtures/server.key b/tests/plugins/http/fixtures/server.key
index 7501dd46..7501dd46 100644
--- a/service/http/fixtures/server.key
+++ b/tests/plugins/http/fixtures/server.key
diff --git a/service/http/handler_test.go b/tests/plugins/http/handler_test.go
index 7a50bf97..18558296 100644
--- a/service/http/handler_test.go
+++ b/tests/plugins/http/handler_test.go
@@ -5,87 +5,40 @@ import (
"context"
"io/ioutil"
"mime/multipart"
- "net/http"
- "net/http/httptest"
"net/url"
- "os"
+ "os/exec"
"runtime"
"strings"
- "testing"
- "time"
- "github.com/spiral/roadrunner"
+ "github.com/spiral/roadrunner/v2/pkg/pipe"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
"github.com/stretchr/testify/assert"
-)
-
-// get request and return body
-func get(url string) (string, *http.Response, error) {
- r, err := http.Get(url)
- if err != nil {
- return "", nil, err
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", nil, err
- }
- return string(b), r, err
-}
-// get request and return body
-func getHeader(url string, h map[string]string) (string, *http.Response, error) {
- req, err := http.NewRequest("GET", url, bytes.NewBuffer(nil))
- if err != nil {
- return "", nil, err
- }
-
- for k, v := range h {
- req.Header.Set(k, v)
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return "", nil, err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
+ "net/http"
+ "os"
+ "testing"
+ "time"
+)
- err = r.Body.Close()
+func TestHandler_Echo(t *testing.T) {
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
if err != nil {
- return "", nil, err
+ t.Fatal(err)
}
- return string(b), r, err
-}
-func TestHandler_Echo(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -94,130 +47,52 @@ func TestHandler_Echo(t *testing.T) {
t.Errorf("error during the shutdown: error %v", err)
}
}()
-
- go func() {
- err := hs.ListenAndServe()
+ go func(server *http.Server) {
+ err := server.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
t.Errorf("error listening the interface: error %v", err)
}
- }()
+ }(hs)
time.Sleep(time.Millisecond * 10)
body, r, err := get("http://localhost:8177/?hello=world")
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
assert.Equal(t, 201, r.StatusCode)
assert.Equal(t, "WORLD", body)
}
func Test_HandlerErrors(t *testing.T) {
- h := &Handler{
- internalErrorCode: 500,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- wr := httptest.NewRecorder()
- rq := httptest.NewRequest("POST", "/", bytes.NewBuffer([]byte("data")))
-
- h.ServeHTTP(wr, rq)
- assert.Equal(t, 500, wr.Code)
-}
-
-func Test_HandlerErrorsPoolErrorCode(t *testing.T) {
- h := &Handler{
- internalErrorCode: 777,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- wr := httptest.NewRecorder()
- rq := httptest.NewRequest("POST", "/", bytes.NewBuffer([]byte("data")))
-
- h.ServeHTTP(wr, rq)
- assert.Equal(t, 777, wr.Code)
-}
-
-func Test_Handler_JSON_error(t *testing.T) {
- h := &Handler{
- appErrorCode: 500,
- internalErrorCode: 500,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- wr := httptest.NewRecorder()
- rq := httptest.NewRequest("POST", "/", bytes.NewBuffer([]byte("{sd")))
- rq.Header.Add("Content-Type", "application/json")
- rq.Header.Add("Content-Size", "3")
-
- h.ServeHTTP(wr, rq)
- assert.Equal(t, 500, wr.Code)
+ _, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, nil)
+ assert.Error(t, err)
}
func TestHandler_Headers(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php header pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "header", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8078", Handler: h}
defer func() {
@@ -246,7 +121,6 @@ func TestHandler_Headers(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -260,27 +134,26 @@ func TestHandler_Headers(t *testing.T) {
}
func TestHandler_Empty_User_Agent(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php user-agent pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "user-agent", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8088", Handler: h}
defer func() {
@@ -309,7 +182,6 @@ func TestHandler_Empty_User_Agent(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -322,27 +194,26 @@ func TestHandler_Empty_User_Agent(t *testing.T) {
}
func TestHandler_User_Agent(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php user-agent pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "user-agent", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8088", Handler: h}
defer func() {
@@ -371,7 +242,6 @@ func TestHandler_User_Agent(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -384,27 +254,26 @@ func TestHandler_User_Agent(t *testing.T) {
}
func TestHandler_Cookies(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php cookie pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "cookie", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8079", Handler: h}
defer func() {
@@ -433,7 +302,6 @@ func TestHandler_Cookies(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -451,27 +319,26 @@ func TestHandler_Cookies(t *testing.T) {
}
func TestHandler_JsonPayload_POST(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php payload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8090", Handler: h}
defer func() {
@@ -504,7 +371,6 @@ func TestHandler_JsonPayload_POST(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -517,27 +383,26 @@ func TestHandler_JsonPayload_POST(t *testing.T) {
}
func TestHandler_JsonPayload_PUT(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php payload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8081", Handler: h}
defer func() {
@@ -578,27 +443,26 @@ func TestHandler_JsonPayload_PUT(t *testing.T) {
}
func TestHandler_JsonPayload_PATCH(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php payload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8082", Handler: h}
defer func() {
@@ -627,7 +491,6 @@ func TestHandler_JsonPayload_PATCH(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -640,27 +503,26 @@ func TestHandler_JsonPayload_PATCH(t *testing.T) {
}
func TestHandler_FormData_POST(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8083", Handler: h}
defer func() {
@@ -700,7 +562,6 @@ func TestHandler_FormData_POST(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -710,31 +571,31 @@ func TestHandler_FormData_POST(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
+ // Sorted
+ assert.Equal(t, "{\"arr\":{\"c\":{\"p\":\"l\",\"z\":\"\"},\"x\":{\"y\":{\"e\":\"f\",\"z\":\"y\"}}},\"key\":\"value\",\"name\":[\"name1\",\"name2\",\"name3\"]}", string(b))
}
func TestHandler_FormData_POST_Overwrite(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8083", Handler: h}
defer func() {
@@ -755,6 +616,7 @@ func TestHandler_FormData_POST_Overwrite(t *testing.T) {
form := url.Values{}
form.Add("key", "value")
+ form.Add("key", "value2")
form.Add("name[]", "name1")
form.Add("name[]", "name2")
form.Add("name[]", "name3")
@@ -774,7 +636,6 @@ func TestHandler_FormData_POST_Overwrite(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -784,31 +645,30 @@ func TestHandler_FormData_POST_Overwrite(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
+ assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value2","name":["name1","name2","name3"]}`, string(b))
}
func TestHandler_FormData_POST_Form_UrlEncoded_Charset(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8083", Handler: h}
defer func() {
@@ -848,7 +708,6 @@ func TestHandler_FormData_POST_Form_UrlEncoded_Charset(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -862,29 +721,28 @@ func TestHandler_FormData_POST_Form_UrlEncoded_Charset(t *testing.T) {
}
func TestHandler_FormData_PUT(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
-
- hs := &http.Server{Addr: ":18084", Handler: h}
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
+
+ hs := &http.Server{Addr: ":17834", Handler: h}
defer func() {
err := hs.Shutdown(context.Background())
if err != nil {
@@ -922,7 +780,6 @@ func TestHandler_FormData_PUT(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -936,27 +793,26 @@ func TestHandler_FormData_PUT(t *testing.T) {
}
func TestHandler_FormData_PATCH(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8085", Handler: h}
defer func() {
@@ -996,7 +852,6 @@ func TestHandler_FormData_PATCH(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -1006,31 +861,30 @@ func TestHandler_FormData_PATCH(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
+ assert.Equal(t, "{\"arr\":{\"c\":{\"p\":\"l\",\"z\":\"\"},\"x\":{\"y\":{\"e\":\"f\",\"z\":\"y\"}}},\"key\":\"value\",\"name\":[\"name1\",\"name2\",\"name3\"]}", string(b))
}
func TestHandler_Multipart_POST(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8019", Handler: h}
defer func() {
@@ -1050,7 +904,7 @@ func TestHandler_Multipart_POST(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- err := w.WriteField("key", "value")
+ err = w.WriteField("key", "value")
if err != nil {
t.Errorf("error writing the field: error %v", err)
}
@@ -1112,7 +966,6 @@ func TestHandler_Multipart_POST(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -1122,31 +975,30 @@ func TestHandler_Multipart_POST(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
+ assert.Equal(t, "{\"arr\":{\"c\":{\"p\":\"l\",\"z\":\"\"},\"x\":{\"y\":{\"e\":\"f\",\"z\":\"y\"}}},\"key\":\"value\",\"name\":[\"name1\",\"name2\",\"name3\"]}", string(b))
}
func TestHandler_Multipart_PUT(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8020", Handler: h}
defer func() {
@@ -1166,7 +1018,7 @@ func TestHandler_Multipart_PUT(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- err := w.WriteField("key", "value")
+ err = w.WriteField("key", "value")
if err != nil {
t.Errorf("error writing the field: error %v", err)
}
@@ -1228,7 +1080,6 @@ func TestHandler_Multipart_PUT(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -1242,27 +1093,26 @@ func TestHandler_Multipart_PUT(t *testing.T) {
}
func TestHandler_Multipart_PATCH(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php data pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8021", Handler: h}
defer func() {
@@ -1283,7 +1133,7 @@ func TestHandler_Multipart_PATCH(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- err := w.WriteField("key", "value")
+ err = w.WriteField("key", "value")
if err != nil {
t.Errorf("error writing the field: error %v", err)
}
@@ -1346,7 +1196,6 @@ func TestHandler_Multipart_PATCH(t *testing.T) {
err := r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -1360,29 +1209,26 @@ func TestHandler_Multipart_PATCH(t *testing.T) {
}
func TestHandler_Error(t *testing.T) {
- h := &Handler{
- appErrorCode: http.StatusInternalServerError,
- internalErrorCode: http.StatusInternalServerError,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php error pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1402,33 +1248,33 @@ func TestHandler_Error(t *testing.T) {
_, r, err := get("http://localhost:8177/?hello=world")
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
assert.Equal(t, 500, r.StatusCode)
}
func TestHandler_Error2(t *testing.T) {
- h := &Handler{
- appErrorCode: http.StatusInternalServerError,
- internalErrorCode: http.StatusInternalServerError,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php error2 pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error2", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1448,33 +1294,33 @@ func TestHandler_Error2(t *testing.T) {
_, r, err := get("http://localhost:8177/?hello=world")
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
assert.Equal(t, 500, r.StatusCode)
}
func TestHandler_Error3(t *testing.T) {
- h := &Handler{
- appErrorCode: http.StatusInternalServerError,
- internalErrorCode: http.StatusInternalServerError,
- cfg: &Config{
- MaxRequestSize: 1,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php pid pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "pid", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1485,7 +1331,7 @@ func TestHandler_Error3(t *testing.T) {
}()
go func() {
- err := hs.ListenAndServe()
+ err = hs.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
t.Errorf("error listening the interface: error %v", err)
}
@@ -1503,10 +1349,9 @@ func TestHandler_Error3(t *testing.T) {
r, err := http.DefaultClient.Do(req)
assert.NoError(t, err)
defer func() {
- err := r.Body.Close()
+ err = r.Body.Close()
if err != nil {
t.Errorf("error during the closing Body: error %v", err)
-
}
}()
@@ -1515,29 +1360,26 @@ func TestHandler_Error3(t *testing.T) {
}
func TestHandler_ResponseDuration(t *testing.T) {
- h := &Handler{
- appErrorCode: http.StatusInternalServerError,
- internalErrorCode: http.StatusInternalServerError,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1556,18 +1398,21 @@ func TestHandler_ResponseDuration(t *testing.T) {
time.Sleep(time.Millisecond * 10)
gotresp := make(chan interface{})
- h.Listen(func(event int, ctx interface{}) {
- if event == EventResponse {
- c := ctx.(*ResponseEvent)
-
- if c.Elapsed() > 0 {
+ h.AddListener(func(event interface{}) {
+ switch t := event.(type) {
+ case httpPlugin.ResponseEvent:
+ if t.Elapsed() > 0 {
close(gotresp)
}
+ default:
}
})
body, r, err := get("http://localhost:8177/?hello=world")
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
<-gotresp
@@ -1576,27 +1421,26 @@ func TestHandler_ResponseDuration(t *testing.T) {
}
func TestHandler_ResponseDurationDelayed(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echoDelay pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echoDelay", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1615,19 +1459,21 @@ func TestHandler_ResponseDurationDelayed(t *testing.T) {
time.Sleep(time.Millisecond * 10)
gotresp := make(chan interface{})
- h.Listen(func(event int, ctx interface{}) {
- if event == EventResponse {
- c := ctx.(*ResponseEvent)
-
- if c.Elapsed() > time.Second {
+ h.AddListener(func(event interface{}) {
+ switch tp := event.(type) {
+ case httpPlugin.ResponseEvent:
+ if tp.Elapsed() > time.Second {
close(gotresp)
}
+ default:
}
})
body, r, err := get("http://localhost:8177/?hello=world")
assert.NoError(t, err)
-
+ defer func() {
+ _ = r.Body.Close()
+ }()
<-gotresp
assert.Equal(t, 201, r.StatusCode)
@@ -1635,28 +1481,26 @@ func TestHandler_ResponseDurationDelayed(t *testing.T) {
}
func TestHandler_ErrorDuration(t *testing.T) {
- h := &Handler{
- appErrorCode: http.StatusInternalServerError,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php error pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1667,7 +1511,7 @@ func TestHandler_ErrorDuration(t *testing.T) {
}()
go func() {
- err := hs.ListenAndServe()
+ err = hs.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
t.Errorf("error listening the interface: error %v", err)
}
@@ -1675,18 +1519,21 @@ func TestHandler_ErrorDuration(t *testing.T) {
time.Sleep(time.Millisecond * 10)
goterr := make(chan interface{})
- h.Listen(func(event int, ctx interface{}) {
- if event == EventError {
- c := ctx.(*ErrorEvent)
-
- if c.Elapsed() > 0 {
+ h.AddListener(func(event interface{}) {
+ switch tp := event.(type) {
+ case httpPlugin.ErrorEvent:
+ if tp.Elapsed() > 0 {
close(goterr)
}
+ default:
}
})
_, r, err := get("http://localhost:8177/?hello=world")
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
<-goterr
@@ -1694,42 +1541,40 @@ func TestHandler_ErrorDuration(t *testing.T) {
}
func TestHandler_IP(t *testing.T) {
- h := &Handler{
- appErrorCode: http.StatusInternalServerError,
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- TrustedSubnets: []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php ip pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- err := h.cfg.parseCIDRs()
+ trusted := []string{
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ }
+
+ cidrs, err := httpPlugin.ParseCIDRs(trusted)
+ assert.NoError(t, err)
+ assert.NotNil(t, cidrs)
+
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
if err != nil {
- t.Errorf("error parsing CIDRs: error %v", err)
+ t.Fatal(err)
}
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, cidrs, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
defer func() {
@@ -1749,48 +1594,50 @@ func TestHandler_IP(t *testing.T) {
body, r, err := get("http://127.0.0.1:8177/")
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
assert.Equal(t, 200, r.StatusCode)
assert.Equal(t, "127.0.0.1", body)
}
func TestHandler_XRealIP(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- TrustedSubnets: []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php ip pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- err := h.cfg.parseCIDRs()
+ trusted := []string{
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ }
+
+ cidrs, err := httpPlugin.ParseCIDRs(trusted)
+ assert.NoError(t, err)
+ assert.NotNil(t, cidrs)
+
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
if err != nil {
- t.Errorf("error parsing CIDRs: error %v", err)
+ t.Fatal(err)
}
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, cidrs, pool)
+ assert.NoError(t, err)
- hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
+ hs := &http.Server{Addr: "127.0.0.1:8179", Handler: h}
defer func() {
err := hs.Shutdown(context.Background())
if err != nil {
@@ -1806,52 +1653,55 @@ func TestHandler_XRealIP(t *testing.T) {
}()
time.Sleep(time.Millisecond * 10)
- body, r, err := getHeader("http://127.0.0.1:8177/", map[string]string{
+ body, r, err := getHeader("http://127.0.0.1:8179/", map[string]string{
"X-Real-Ip": "200.0.0.1",
})
assert.NoError(t, err)
+ defer func() {
+ _ = r.Body.Close()
+ }()
assert.Equal(t, 200, r.StatusCode)
assert.Equal(t, "200.0.0.1", body)
}
func TestHandler_XForwardedFor(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- TrustedSubnets: []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "100.0.0.0/16",
- "200.0.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php ip pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- err := h.cfg.parseCIDRs()
+ trusted := []string{
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "100.0.0.0/16",
+ "200.0.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ }
+
+ cidrs, err := httpPlugin.ParseCIDRs(trusted)
+ assert.NoError(t, err)
+ assert.NotNil(t, cidrs)
+
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
if err != nil {
- t.Errorf("error parsing CIDRs: error %v", err)
+ t.Fatal(err)
}
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, cidrs, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
defer func() {
@@ -1876,45 +1726,47 @@ func TestHandler_XForwardedFor(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
assert.Equal(t, "101.0.0.1", body)
+ _ = r.Body.Close()
body, r, err = getHeader("http://127.0.0.1:8177/", map[string]string{
"X-Forwarded-For": "100.0.0.1, 200.0.0.1, 101.0.0.1, invalid",
})
assert.NoError(t, err)
+ _ = r.Body.Close()
assert.Equal(t, 200, r.StatusCode)
assert.Equal(t, "101.0.0.1", body)
}
func TestHandler_XForwardedFor_NotTrustedRemoteIp(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- TrustedSubnets: []string{
- "10.0.0.0/8",
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php ip pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- err := h.cfg.parseCIDRs()
+ trusted := []string{
+ "10.0.0.0/8",
+ }
+
+ cidrs, err := httpPlugin.ParseCIDRs(trusted)
+ assert.NoError(t, err)
+ assert.NotNil(t, cidrs)
+
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
if err != nil {
- t.Errorf("error parsing CIDRs: error %v", err)
+ t.Fatal(err)
}
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, cidrs, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
defer func() {
@@ -1937,35 +1789,32 @@ func TestHandler_XForwardedFor_NotTrustedRemoteIp(t *testing.T) {
})
assert.NoError(t, err)
+ _ = r.Body.Close()
assert.Equal(t, 200, r.StatusCode)
assert.Equal(t, "127.0.0.1", body)
}
func BenchmarkHandler_Listen_Echo(b *testing.B) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php echo pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
- }
-
- err := h.rr.Start()
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: int64(runtime.NumCPU()),
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
if err != nil {
- b.Errorf("error starting the worker pool: error %v", err)
+ b.Fatal(err)
}
- defer h.rr.Stop()
+ defer func() {
+ pool.Destroy(context.Background())
+ }()
+
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(b, err)
hs := &http.Server{Addr: ":8177", Handler: h}
defer func() {
@@ -1976,13 +1825,15 @@ func BenchmarkHandler_Listen_Echo(b *testing.B) {
}()
go func() {
- err := hs.ListenAndServe()
+ err = hs.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
b.Errorf("error listening the interface: error %v", err)
}
}()
time.Sleep(time.Millisecond * 10)
+ b.ResetTimer()
+ b.ReportAllocs()
bb := "WORLD"
for n := 0; n < b.N; n++ {
r, err := http.Get("http://localhost:8177/?hello=world")
diff --git a/tests/plugins/http/http_plugin_test.go b/tests/plugins/http/http_plugin_test.go
new file mode 100644
index 00000000..88857df5
--- /dev/null
+++ b/tests/plugins/http/http_plugin_test.go
@@ -0,0 +1,1247 @@
+package http
+
+import (
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/interfaces/events"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/informer"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/resetter"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/spiral/roadrunner/v2/tests/mocks"
+ "github.com/spiral/roadrunner/v2/tools"
+ "github.com/yookoala/gofast"
+
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/stretchr/testify/assert"
+)
+
+var sslClient = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true, //nolint:gosec
+ },
+ },
+}
+
+func TestHTTPInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ rIn := makeConfig("6001", "15395", "7921", "8892", "false", "false", "php ../../http/client.php echo pipes")
+ cfg := &config.Viper{
+ ReadInCfg: rIn,
+ Type: "yaml",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func TestHTTPInformerReset(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-resetter.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &informer.Plugin{},
+ &resetter.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("HTTPInformerTest", informerTest)
+ t.Run("HTTPEchoTestBefore", echoHTTP)
+ t.Run("HTTPResetTest", resetTest)
+ t.Run("HTTPEchoTestAfter", echoHTTP)
+
+ stopCh <- struct{}{}
+
+ wg.Wait()
+}
+
+func echoHTTP(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:10084?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func resetTest(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ // WorkerList contains list of workers.
+
+ var ret bool
+ err = client.Call("resetter.Reset", "http", &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ var services []string
+ err = client.Call("resetter.List", nil, &services)
+ assert.NoError(t, err)
+ if services[0] != "http" {
+ t.Fatal("no enough services")
+ }
+}
+
+func informerTest(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ // WorkerList contains list of workers.
+ list := struct {
+ // Workers is list of workers.
+ Workers []tools.ProcessState `json:"workers"`
+ }{}
+
+ err = client.Call("informer.Workers", "http", &list)
+ assert.NoError(t, err)
+ assert.Len(t, list.Workers, 2)
+}
+
+func TestSSL(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-ssl.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("SSLEcho", sslEcho)
+ t.Run("SSLNoRedirect", sslNoRedirect)
+ t.Run("fCGIecho", fcgiEcho)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func sslNoRedirect(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:8085?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := sslClient.Do(req)
+ assert.NoError(t, err)
+
+ assert.Nil(t, r.TLS)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.NoError(t, err)
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+
+ err2 := r.Body.Close()
+ if err2 != nil {
+ t.Errorf("fail to close the Body: error %v", err2)
+ }
+}
+
+func sslEcho(t *testing.T) {
+ req, err := http.NewRequest("GET", "https://localhost:8893?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := sslClient.Do(req)
+ assert.NoError(t, err)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.NoError(t, err)
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+
+ err2 := r.Body.Close()
+ if err2 != nil {
+ t.Errorf("fail to close the Body: error %v", err2)
+ }
+}
+
+func fcgiEcho(t *testing.T) {
+ fcgiConnFactory := gofast.SimpleConnFactory("tcp", "0.0.0.0:16920")
+
+ fcgiHandler := gofast.NewHandler(
+ gofast.BasicParamsMap(gofast.BasicSession),
+ gofast.SimpleClientFactory(fcgiConnFactory, 0),
+ )
+
+ w := httptest.NewRecorder()
+ req := httptest.NewRequest("GET", "http://site.local/?hello=world", nil)
+ fcgiHandler.ServeHTTP(w, req)
+
+ body, err := ioutil.ReadAll(w.Result().Body) //nolint:bodyclose
+
+ defer func() {
+ _ = w.Result().Body.Close()
+ w.Body.Reset()
+ }()
+
+ assert.NoError(t, err)
+ assert.Equal(t, 201, w.Result().StatusCode) //nolint:bodyclose
+ assert.Equal(t, "WORLD", string(body))
+}
+
+func TestSSLRedirect(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-ssl-redirect.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("SSLRedirect", sslRedirect)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func sslRedirect(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:8087?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := sslClient.Do(req)
+ assert.NoError(t, err)
+ assert.NotNil(t, r.TLS)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.NoError(t, err)
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+
+ err2 := r.Body.Close()
+ if err2 != nil {
+ t.Errorf("fail to close the Body: error %v", err2)
+ }
+}
+
+func TestSSLPushPipes(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-ssl-push.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("SSLPush", sslPush)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func sslPush(t *testing.T) {
+ req, err := http.NewRequest("GET", "https://localhost:8894?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := sslClient.Do(req)
+ assert.NoError(t, err)
+
+ assert.NotNil(t, r.TLS)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.Equal(t, "", r.Header.Get("Http2-Push"))
+
+ assert.NoError(t, err)
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+
+ err2 := r.Body.Close()
+ if err2 != nil {
+ t.Errorf("fail to close the Body: error %v", err2)
+ }
+}
+
+func TestFastCGI_RequestUri(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-fcgi-reqUri.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("FastCGIServiceRequestUri", fcgiReqURI)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func fcgiReqURI(t *testing.T) {
+ time.Sleep(time.Second * 2)
+ fcgiConnFactory := gofast.SimpleConnFactory("tcp", "127.0.0.1:6921")
+
+ fcgiHandler := gofast.NewHandler(
+ gofast.BasicParamsMap(gofast.BasicSession),
+ gofast.SimpleClientFactory(fcgiConnFactory, 0),
+ )
+
+ w := httptest.NewRecorder()
+ req := httptest.NewRequest("GET", "http://site.local/hello-world", nil)
+ fcgiHandler.ServeHTTP(w, req)
+
+ body, err := ioutil.ReadAll(w.Result().Body) //nolint:bodyclose
+ assert.NoError(t, err)
+ assert.Equal(t, 200, w.Result().StatusCode) //nolint:bodyclose
+ assert.Equal(t, "http://site.local/hello-world", string(body))
+}
+
+func TestH2CUpgrade(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-h2c.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("H2cUpgrade", h2cUpgrade)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func h2cUpgrade(t *testing.T) {
+ req, err := http.NewRequest("PRI", "http://localhost:8083?hello=world", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req.Header.Add("Upgrade", "h2c")
+ req.Header.Add("Connection", "HTTP2-Settings")
+ req.Header.Add("HTTP2-Settings", "")
+
+ r, err2 := http.DefaultClient.Do(req)
+ if err2 != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, "101 Switching Protocols", r.Status)
+
+ err3 := r.Body.Close()
+ if err3 != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestH2C(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-h2c.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("H2c", h2c)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func h2c(t *testing.T) {
+ req, err := http.NewRequest("PRI", "http://localhost:8083?hello=world", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ req.Header.Add("Connection", "HTTP2-Settings")
+ req.Header.Add("HTTP2-Settings", "")
+
+ r, err2 := http.DefaultClient.Do(req)
+ if err2 != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, "201 Created", r.Status)
+
+ err3 := r.Body.Close()
+ if err3 != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestHttpMiddleware(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &PluginMiddleware{},
+ &PluginMiddleware2{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("MiddlewareTest", middleware)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func middleware(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:18903?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+
+ req, err = http.NewRequest("GET", "http://localhost:18903/halt", nil)
+ assert.NoError(t, err)
+
+ r, err = http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+ b, err = ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 500, r.StatusCode)
+ assert.Equal(t, "halted", string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestHttpEchoErr(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ rIn := `
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../http/client.php echoerr pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:34999
+ maxRequestSize: 1024
+ middleware: [ "pluginMiddleware", "pluginMiddleware2" ]
+ uploads:
+ forbid: [ "" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+`
+
+ cfg := &config.Viper{
+ Path: "",
+ Prefix: "",
+ Type: "yaml",
+ ReadInCfg: []byte(rIn),
+ }
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ mockLogger.EXPECT().Info("worker destructed", "pid", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").MinTimes(1)
+ mockLogger.EXPECT().Info("WORLD", "pid", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Debug("worker event received", "event", events.EventWorkerLog, "worker state", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &PluginMiddleware{},
+ &PluginMiddleware2{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("HttpEchoError", echoError)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func echoError(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:34999?hello=world", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 201, r.StatusCode)
+ assert.Equal(t, "WORLD", string(b))
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestHttpEnvVariables(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-env.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &PluginMiddleware{},
+ &PluginMiddleware2{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("EnvVariablesTest", envVarsTest)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func envVarsTest(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:12084", nil)
+ assert.NoError(t, err)
+
+ r, err := http.DefaultClient.Do(req)
+ assert.NoError(t, err)
+
+ b, err := ioutil.ReadAll(r.Body)
+ assert.NoError(t, err)
+
+ assert.Equal(t, 200, r.StatusCode)
+ assert.Equal(t, "ENV_VALUE", string(b))
+
+ err = r.Body.Close()
+ assert.NoError(t, err)
+}
+
+func TestHttpBrokenPipes(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-broken-pipes.yaml",
+ Prefix: "rr",
+ Type: "yaml",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &PluginMiddleware{},
+ &PluginMiddleware2{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ assert.Error(t, err)
+
+ _, err = cont.Serve()
+ assert.Error(t, err)
+}
+
+func get(url string) (string, *http.Response, error) {
+ r, err := http.Get(url) //nolint:gosec
+ if err != nil {
+ return "", nil, err
+ }
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return "", nil, err
+ }
+ defer func() {
+ _ = r.Body.Close()
+ }()
+ return string(b), r, err
+}
+
+// get request and return body
+func getHeader(url string, h map[string]string) (string, *http.Response, error) {
+ req, err := http.NewRequest("GET", url, bytes.NewBuffer(nil))
+ if err != nil {
+ return "", nil, err
+ }
+
+ for k, v := range h {
+ req.Header.Set(k, v)
+ }
+
+ r, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", nil, err
+ }
+
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return "", nil, err
+ }
+
+ err = r.Body.Close()
+ if err != nil {
+ return "", nil, err
+ }
+ return string(b), r, err
+}
+
+func makeConfig(rpcPort, httpPort, fcgiPort, sslPort, redirect, http2Enabled, command string) []byte {
+ return []byte(fmt.Sprintf(`
+rpc:
+ listen: tcp://127.0.0.1:%s
+ disabled: false
+
+server:
+ command: "%s"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ address: 127.0.0.1:%s
+ maxRequestSize: 1024
+ middleware: [ "" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+ ssl:
+ port: %s
+ redirect: %s
+ cert: fixtures/server.crt
+ key: fixtures/server.key
+ # rootCa: root.crt
+ fcgi:
+ address: tcp://0.0.0.0:%s
+ http2:
+ enabled: %s
+ h2c: false
+ maxConcurrentStreams: 128
+logs:
+ mode: development
+ level: error
+`, rpcPort, command, httpPort, sslPort, redirect, fcgiPort, http2Enabled))
+}
diff --git a/service/http/parse_test.go b/tests/plugins/http/parse_test.go
index f95a3f9d..5cc1ce32 100644
--- a/service/http/parse_test.go
+++ b/tests/plugins/http/parse_test.go
@@ -1,6 +1,10 @@
package http
-import "testing"
+import (
+ "testing"
+
+ "github.com/spiral/roadrunner/v2/plugins/http"
+)
var samples = []struct {
in string
@@ -16,20 +20,18 @@ var samples = []struct {
}
func Test_FetchIndexes(t *testing.T) {
- for _, tt := range samples {
- t.Run(tt.in, func(t *testing.T) {
- r := fetchIndexes(tt.in)
- if !same(r, tt.out) {
- t.Errorf("got %q, want %q", r, tt.out)
- }
- })
+ for i := 0; i < len(samples); i++ {
+ r := http.FetchIndexes(samples[i].in)
+ if !same(r, samples[i].out) {
+ t.Errorf("got %q, want %q", r, samples[i].out)
+ }
}
}
func BenchmarkConfig_FetchIndexes(b *testing.B) {
for _, tt := range samples {
for n := 0; n < b.N; n++ {
- r := fetchIndexes(tt.in)
+ r := http.FetchIndexes(tt.in)
if !same(r, tt.out) {
b.Fail()
}
diff --git a/tests/plugins/http/plugin1.go b/tests/plugins/http/plugin1.go
new file mode 100644
index 00000000..0ec31211
--- /dev/null
+++ b/tests/plugins/http/plugin1.go
@@ -0,0 +1,27 @@
+package http
+
+import (
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+type Plugin1 struct {
+ config config.Configurer
+}
+
+func (p1 *Plugin1) Init(cfg config.Configurer) error {
+ p1.config = cfg
+ return nil
+}
+
+func (p1 *Plugin1) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+func (p1 *Plugin1) Stop() error {
+ return nil
+}
+
+func (p1 *Plugin1) Name() string {
+ return "http_test.plugin1"
+}
diff --git a/tests/plugins/http/plugin_middleware.go b/tests/plugins/http/plugin_middleware.go
new file mode 100644
index 00000000..00640b69
--- /dev/null
+++ b/tests/plugins/http/plugin_middleware.go
@@ -0,0 +1,69 @@
+package http
+
+import (
+ "net/http"
+
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+// PluginMiddleware test
+type PluginMiddleware struct {
+ config config.Configurer
+}
+
+// Init test
+func (p *PluginMiddleware) Init(cfg config.Configurer) error {
+ p.config = cfg
+ return nil
+}
+
+// Middleware test
+func (p *PluginMiddleware) Middleware(next http.Handler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/halt" {
+ w.WriteHeader(500)
+ _, err := w.Write([]byte("halted"))
+ if err != nil {
+ panic("error writing the data to the http reply")
+ }
+ } else {
+ next.ServeHTTP(w, r)
+ }
+ }
+}
+
+// Name test
+func (p *PluginMiddleware) Name() string {
+ return "pluginMiddleware"
+}
+
+// PluginMiddleware2 test
+type PluginMiddleware2 struct {
+ config config.Configurer
+}
+
+// Init test
+func (p *PluginMiddleware2) Init(cfg config.Configurer) error {
+ p.config = cfg
+ return nil
+}
+
+// Middleware test
+func (p *PluginMiddleware2) Middleware(next http.Handler) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/boom" {
+ w.WriteHeader(555)
+ _, err := w.Write([]byte("boom"))
+ if err != nil {
+ panic("error writing the data to the http reply")
+ }
+ } else {
+ next.ServeHTTP(w, r)
+ }
+ }
+}
+
+// Name test
+func (p *PluginMiddleware2) Name() string {
+ return "pluginMiddleware2"
+}
diff --git a/service/http/response_test.go b/tests/plugins/http/response_test.go
index 1f394276..9bd2626d 100644
--- a/service/http/response_test.go
+++ b/tests/plugins/http/response_test.go
@@ -6,7 +6,8 @@ import (
"net/http"
"testing"
- "github.com/spiral/roadrunner"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
"github.com/stretchr/testify/assert"
)
@@ -44,13 +45,13 @@ func (tw *testWriter) Push(target string, opts *http.PushOptions) error {
}
func TestNewResponse_Error(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{Context: []byte(`invalid payload`)})
+ r, err := httpPlugin.NewResponse(payload.Payload{Context: []byte(`invalid payload`)})
assert.Error(t, err)
assert.Nil(t, r)
}
func TestNewResponse_Write(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{
+ r, err := httpPlugin.NewResponse(payload.Payload{
Context: []byte(`{"headers":{"key":["value"]},"status": 301}`),
Body: []byte(`sample body`),
})
@@ -67,7 +68,7 @@ func TestNewResponse_Write(t *testing.T) {
}
func TestNewResponse_Stream(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{
+ r, err := httpPlugin.NewResponse(payload.Payload{
Context: []byte(`{"headers":{"key":["value"]},"status": 301}`),
})
@@ -76,8 +77,8 @@ func TestNewResponse_Stream(t *testing.T) {
t.Fatal("response is nil")
}
- r.body = &bytes.Buffer{}
- r.body.(*bytes.Buffer).WriteString("hello world")
+ r.Body = &bytes.Buffer{}
+ r.Body.(*bytes.Buffer).WriteString("hello world")
assert.NoError(t, err)
assert.NotNil(t, r)
@@ -91,7 +92,7 @@ func TestNewResponse_Stream(t *testing.T) {
}
func TestNewResponse_StreamError(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{
+ r, err := httpPlugin.NewResponse(payload.Payload{
Context: []byte(`{"headers":{"key":["value"]},"status": 301}`),
})
@@ -100,8 +101,8 @@ func TestNewResponse_StreamError(t *testing.T) {
t.Fatal("response is nil")
}
- r.body = &bytes.Buffer{}
- r.body.(*bytes.Buffer).WriteString("hello world")
+ r.Body = &bytes.Buffer{}
+ r.Body.(*bytes.Buffer).WriteString("hello world")
assert.NoError(t, err)
assert.NotNil(t, r)
@@ -111,7 +112,7 @@ func TestNewResponse_StreamError(t *testing.T) {
}
func TestWrite_HandlesPush(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{
+ r, err := httpPlugin.NewResponse(payload.Payload{
Context: []byte(`{"headers":{"Http2-Push":["/test.js"],"content-type":["text/html"]},"status": 200}`),
})
@@ -126,7 +127,7 @@ func TestWrite_HandlesPush(t *testing.T) {
}
func TestWrite_HandlesTrailers(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{
+ r, err := httpPlugin.NewResponse(payload.Payload{
Context: []byte(`{"headers":{"Trailer":["foo, bar", "baz"],"foo":["test"],"bar":["demo"]},"status": 200}`),
})
@@ -136,16 +137,16 @@ func TestWrite_HandlesTrailers(t *testing.T) {
w := &testWriter{h: http.Header(make(map[string][]string))}
assert.NoError(t, r.Write(w))
- assert.Nil(t, w.h[trailerHeaderKey])
- assert.Nil(t, w.h["foo"]) //nolint:golint,staticcheck
- assert.Nil(t, w.h["baz"]) //nolint:golint,staticcheck
+ assert.Nil(t, w.h[httpPlugin.TrailerHeaderKey])
+ assert.Nil(t, w.h["foo"]) //nolint:staticcheck
+ assert.Nil(t, w.h["baz"]) //nolint:staticcheck
assert.Equal(t, "test", w.h.Get("Trailer:foo"))
assert.Equal(t, "demo", w.h.Get("Trailer:bar"))
}
func TestWrite_HandlesHandlesWhitespacesInTrailer(t *testing.T) {
- r, err := NewResponse(&roadrunner.Payload{
+ r, err := httpPlugin.NewResponse(payload.Payload{
Context: []byte(
`{"headers":{"Trailer":["foo\t,bar , baz"],"foo":["a"],"bar":["b"],"baz":["c"]},"status": 200}`),
})
diff --git a/service/http/uploads_config_test.go b/tests/plugins/http/uploads_config_test.go
index ac8bfa1d..e76078ee 100644
--- a/service/http/uploads_config_test.go
+++ b/tests/plugins/http/uploads_config_test.go
@@ -4,11 +4,12 @@ import (
"os"
"testing"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
"github.com/stretchr/testify/assert"
)
func TestFsConfig_Forbids(t *testing.T) {
- cfg := UploadsConfig{Forbid: []string{".php"}}
+ cfg := httpPlugin.UploadsConfig{Forbid: []string{".php"}}
assert.True(t, cfg.Forbids("index.php"))
assert.True(t, cfg.Forbids("index.PHP"))
@@ -17,9 +18,9 @@ func TestFsConfig_Forbids(t *testing.T) {
}
func TestFsConfig_TmpFallback(t *testing.T) {
- cfg := UploadsConfig{Dir: "test"}
+ cfg := httpPlugin.UploadsConfig{Dir: "test"}
assert.Equal(t, "test", cfg.TmpDir())
- cfg = UploadsConfig{Dir: ""}
+ cfg = httpPlugin.UploadsConfig{Dir: ""}
assert.Equal(t, os.TempDir(), cfg.TmpDir())
}
diff --git a/service/http/uploads_test.go b/tests/plugins/http/uploads_test.go
index bab20d49..7bb25cbf 100644
--- a/service/http/uploads_test.go
+++ b/tests/plugins/http/uploads_test.go
@@ -3,7 +3,7 @@ package http
import (
"bytes"
"context"
- "crypto/md5"
+ "crypto/sha512"
"encoding/hex"
"fmt"
"io"
@@ -11,35 +11,39 @@ import (
"mime/multipart"
"net/http"
"os"
+ "os/exec"
"testing"
"time"
- "github.com/spiral/roadrunner"
+ j "github.com/json-iterator/go"
+ "github.com/spiral/roadrunner/v2/pkg/pipe"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
"github.com/stretchr/testify/assert"
)
+var json = j.ConfigCompatibleWithStandardLibrary
+
+const testFile = "uploads_test.go"
+
func TestHandler_Upload_File(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php upload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
}
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8021", Handler: h}
defer func() {
@@ -60,7 +64,7 @@ func TestHandler_Upload_File(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- f := mustOpen("uploads_test.go")
+ f := mustOpen(testFile)
defer func() {
err := f.Close()
if err != nil {
@@ -100,33 +104,29 @@ func TestHandler_Upload_File(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- fs := fileString("uploads_test.go", 0, "application/octet-stream")
+ fs := fileString(testFile, 0, "application/octet-stream")
assert.Equal(t, `{"upload":`+fs+`}`, string(b))
}
func TestHandler_Upload_NestedFile(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php upload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
}
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8021", Handler: h}
defer func() {
@@ -147,7 +147,7 @@ func TestHandler_Upload_NestedFile(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- f := mustOpen("uploads_test.go")
+ f := mustOpen(testFile)
defer func() {
err := f.Close()
if err != nil {
@@ -187,33 +187,29 @@ func TestHandler_Upload_NestedFile(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- fs := fileString("uploads_test.go", 0, "application/octet-stream")
+ fs := fileString(testFile, 0, "application/octet-stream")
assert.Equal(t, `{"upload":{"x":{"y":{"z":[`+fs+`]}}}}`, string(b))
}
func TestHandler_Upload_File_NoTmpDir(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: "-----",
- Forbid: []string{},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php upload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
}
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: "-------",
+ Forbid: []string{},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8021", Handler: h}
defer func() {
@@ -234,7 +230,7 @@ func TestHandler_Upload_File_NoTmpDir(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- f := mustOpen("uploads_test.go")
+ f := mustOpen(testFile)
defer func() {
err := f.Close()
if err != nil {
@@ -274,33 +270,29 @@ func TestHandler_Upload_File_NoTmpDir(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- fs := fileString("uploads_test.go", 5, "application/octet-stream")
+ fs := fileString(testFile, 6, "application/octet-stream")
assert.Equal(t, `{"upload":`+fs+`}`, string(b))
}
func TestHandler_Upload_File_Forbids(t *testing.T) {
- h := &Handler{
- cfg: &Config{
- MaxRequestSize: 1024,
- Uploads: &UploadsConfig{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- },
- },
- rr: roadrunner.NewServer(&roadrunner.ServerConfig{
- Command: "php ../../tests/http/client.php upload pipes",
- Relay: "pipes",
- Pool: &roadrunner.Config{
- NumWorkers: 1,
- AllocateTimeout: 10000000,
- DestroyTimeout: 10000000,
- },
- }),
+ pool, err := poolImpl.Initialize(context.Background(),
+ func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
+ pipe.NewPipeFactory(),
+ poolImpl.Config{
+ NumWorkers: 1,
+ AllocateTimeout: time.Second * 1000,
+ DestroyTimeout: time.Second * 1000,
+ })
+ if err != nil {
+ t.Fatal(err)
}
- assert.NoError(t, h.rr.Start())
- defer h.rr.Stop()
+ h, err := httpPlugin.NewHandler(1024, httpPlugin.UploadsConfig{
+ Dir: os.TempDir(),
+ Forbid: []string{".go"},
+ }, nil, pool)
+ assert.NoError(t, err)
hs := &http.Server{Addr: ":8021", Handler: h}
defer func() {
@@ -321,7 +313,7 @@ func TestHandler_Upload_File_Forbids(t *testing.T) {
var mb bytes.Buffer
w := multipart.NewWriter(&mb)
- f := mustOpen("uploads_test.go")
+ f := mustOpen(testFile)
defer func() {
err := f.Close()
if err != nil {
@@ -361,13 +353,13 @@ func TestHandler_Upload_File_Forbids(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 200, r.StatusCode)
- fs := fileString("uploads_test.go", 7, "application/octet-stream")
+ fs := fileString(testFile, 8, "application/octet-stream")
assert.Equal(t, `{"upload":`+fs+`}`, string(b))
}
func Test_FileExists(t *testing.T) {
- assert.True(t, exists("uploads_test.go"))
+ assert.True(t, exists(testFile))
assert.False(t, exists("uploads_test."))
}
@@ -380,11 +372,11 @@ func mustOpen(f string) *os.File {
}
type fInfo struct {
- Name string `json:"name"`
- Size int64 `json:"size"`
- Mime string `json:"mime"`
- Error int `json:"error"`
- MD5 string `json:"md5,omitempty"`
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mime string `json:"mime"`
+ Error int `json:"error"`
+ Sha512 string `json:"sha512,omitempty"`
}
func fileString(f string, errNo int, mime string) string {
@@ -405,22 +397,22 @@ func fileString(f string, errNo int, mime string) string {
}
}()
- h := md5.New()
+ h := sha512.New()
_, err = io.Copy(h, ff)
if err != nil {
fmt.Println(fmt.Errorf("error copying the file, error: %v", err))
}
v := &fInfo{
- Name: s.Name(),
- Size: s.Size(),
- Error: errNo,
- Mime: mime,
- MD5: hex.EncodeToString(h.Sum(nil)),
+ Name: s.Name(),
+ Size: s.Size(),
+ Error: errNo,
+ Mime: mime,
+ Sha512: hex.EncodeToString(h.Sum(nil)),
}
if errNo != 0 {
- v.MD5 = ""
+ v.Sha512 = ""
v.Size = 0
}
@@ -429,5 +421,12 @@ func fileString(f string, errNo int, mime string) string {
fmt.Println(fmt.Errorf("error marshalling fInfo, error: %v", err))
}
return string(r)
+}
+// exists if file exists.
+func exists(path string) bool {
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return false
+ }
+ return true
}
diff --git a/tests/plugins/informer/.rr-informer.yaml b/tests/plugins/informer/.rr-informer.yaml
new file mode 100644
index 00000000..e50ca9c9
--- /dev/null
+++ b/tests/plugins/informer/.rr-informer.yaml
@@ -0,0 +1,16 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/informer/informer_test.go b/tests/plugins/informer/informer_test.go
new file mode 100644
index 00000000..d9fc2143
--- /dev/null
+++ b/tests/plugins/informer/informer_test.go
@@ -0,0 +1,108 @@
+package informer
+
+import (
+ "net"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/informer"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/spiral/roadrunner/v2/tools"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestInformerInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := &config.Viper{
+ Path: ".rr-informer.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &server.Plugin{},
+ &logger.ZapLogger{},
+ &informer.Plugin{},
+ &rpcPlugin.Plugin{},
+ &Plugin1{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ stopCh := make(chan struct{}, 1)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("InformerRpcTest", informerRPCTest)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func informerRPCTest(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ // WorkerList contains list of workers.
+ list := struct {
+ // Workers is list of workers.
+ Workers []tools.ProcessState `json:"workers"`
+ }{}
+
+ err = client.Call("informer.Workers", "informer.plugin1", &list)
+ assert.NoError(t, err)
+ assert.Len(t, list.Workers, 10)
+}
diff --git a/tests/plugins/informer/test_plugin.go b/tests/plugins/informer/test_plugin.go
new file mode 100644
index 00000000..ba281d02
--- /dev/null
+++ b/tests/plugins/informer/test_plugin.go
@@ -0,0 +1,59 @@
+package informer
+
+import (
+ "context"
+ "time"
+
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+)
+
+var testPoolConfig = poolImpl.Config{
+ NumWorkers: 10,
+ MaxJobs: 100,
+ AllocateTimeout: time.Second * 10,
+ DestroyTimeout: time.Second * 10,
+ Supervisor: &poolImpl.SupervisorConfig{
+ WatchTick: 60,
+ TTL: 1000,
+ IdleTTL: 10,
+ ExecTTL: 10,
+ MaxWorkerMemory: 1000,
+ },
+}
+
+// Gauge //////////////
+type Plugin1 struct {
+ config config.Configurer
+ server server.Server
+}
+
+func (p1 *Plugin1) Init(cfg config.Configurer, server server.Server) error {
+ p1.config = cfg
+ p1.server = server
+ return nil
+}
+
+func (p1 *Plugin1) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+func (p1 *Plugin1) Stop() error {
+ return nil
+}
+
+func (p1 *Plugin1) Name() string {
+ return "informer.plugin1"
+}
+
+func (p1 *Plugin1) Workers() []worker.BaseProcess {
+ pool, err := p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ return pool.Workers()
+}
diff --git a/tests/plugins/kv/boltdb/configs/.rr-init.yaml b/tests/plugins/kv/boltdb/configs/.rr-init.yaml
new file mode 100644
index 00000000..4629a24b
--- /dev/null
+++ b/tests/plugins/kv/boltdb/configs/.rr-init.yaml
@@ -0,0 +1,46 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../../psr-worker-bench.php"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+logs:
+ mode: development
+ level: debug
+
+http:
+ address: 127.0.0.1:44933
+ maxRequestSize: 1024
+ middleware: ["gzip", "headers"]
+ uploads:
+ forbid: [".php", ".exe", ".bat"]
+ trustedSubnets:
+ [
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ ]
+ pool:
+ numWorkers: 6
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+# boltdb simple driver
+boltdb:
+ dir: "."
+ file: "rr"
+ bucket: "test"
+ permissions: 777
+ interval: 1 # seconds
diff --git a/tests/plugins/kv/boltdb/plugin_test.go b/tests/plugins/kv/boltdb/plugin_test.go
new file mode 100644
index 00000000..5548402d
--- /dev/null
+++ b/tests/plugins/kv/boltdb/plugin_test.go
@@ -0,0 +1,195 @@
+package boltdb_tests //nolint:golint,stylecheck
+
+import (
+ "net"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/kv/boltdb"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBoltDb(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.DebugLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-init.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &boltdb.Plugin{},
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("testBoltDbRPCMethods", testRPCMethods)
+ stopCh <- struct{}{}
+ wg.Wait()
+
+ _ = os.Remove("rr")
+}
+
+func testRPCMethods(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+
+ var setRes bool
+ items := make([]kv.Item, 0, 5)
+ items = append(items, kv.Item{
+ Key: "a",
+ Value: "aa",
+ })
+ items = append(items, kv.Item{
+ Key: "b",
+ Value: "bb",
+ })
+ // add 5 second ttl
+ tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+ items = append(items, kv.Item{
+ Key: "c",
+ Value: "cc",
+ TTL: tt,
+ })
+
+ items = append(items, kv.Item{
+ Key: "d",
+ Value: "dd",
+ })
+
+ items = append(items, kv.Item{
+ Key: "e",
+ Value: "ee",
+ })
+
+ // Register 3 keys with values
+ err = client.Call("boltdb.Set", items, &setRes)
+ assert.NoError(t, err)
+ assert.True(t, setRes)
+
+ ret := make(map[string]bool)
+ keys := []string{"a", "b", "c"}
+ err = client.Call("boltdb.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 3) // should be 3
+
+ // key "c" should be deleted
+ time.Sleep(time.Second * 7)
+
+ ret = make(map[string]bool)
+ err = client.Call("boltdb.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 2) // should be 2
+
+ mGet := make(map[string]interface{})
+ keys = []string{"a", "b", "c"}
+ err = client.Call("boltdb.MGet", keys, &mGet)
+ assert.NoError(t, err)
+ assert.Len(t, mGet, 2) // c is expired
+ assert.Equal(t, string("aa"), mGet["a"].(string))
+ assert.Equal(t, string("bb"), mGet["b"].(string))
+
+ mExpKeys := make([]kv.Item, 0, 2)
+ tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
+ mExpKeys = append(mExpKeys, kv.Item{Key: "a", TTL: tt2})
+ mExpKeys = append(mExpKeys, kv.Item{Key: "b", TTL: tt2})
+ mExpKeys = append(mExpKeys, kv.Item{Key: "d", TTL: tt2})
+
+ // MEXPIRE
+ var mExpRes bool
+ err = client.Call("boltdb.MExpire", mExpKeys, &mExpRes)
+ assert.NoError(t, err)
+ assert.True(t, mExpRes)
+
+ // TTL
+ keys = []string{"a", "b", "d"}
+ ttlRes := make(map[string]interface{})
+ err = client.Call("boltdb.TTL", keys, &ttlRes)
+ assert.NoError(t, err)
+ assert.Len(t, ttlRes, 3)
+
+ // HAS AFTER TTL
+ time.Sleep(time.Second * 15)
+ ret = make(map[string]bool)
+ keys = []string{"a", "b", "d"}
+ err = client.Call("boltdb.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 0)
+
+ // DELETE
+ keys = []string{"e"}
+ var delRet bool
+ err = client.Call("boltdb.Delete", keys, &delRet)
+ assert.NoError(t, err)
+ assert.True(t, delRet)
+
+ // HAS AFTER DELETE
+ ret = make(map[string]bool)
+ keys = []string{"e"}
+ err = client.Call("boltdb.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 0)
+}
diff --git a/tests/plugins/kv/memcached/configs/.rr-init.yaml b/tests/plugins/kv/memcached/configs/.rr-init.yaml
new file mode 100644
index 00000000..759fc3ba
--- /dev/null
+++ b/tests/plugins/kv/memcached/configs/.rr-init.yaml
@@ -0,0 +1,43 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../../psr-worker-bench.php"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+logs:
+ mode: development
+ level: debug
+
+http:
+ address: 127.0.0.1:44933
+ maxRequestSize: 1024
+ middleware: ["gzip", "headers"]
+ uploads:
+ forbid: [".php", ".exe", ".bat"]
+ trustedSubnets:
+ [
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ ]
+ pool:
+ numWorkers: 6
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+# boltdb simple driver
+memcached:
+ addr:
+ - "localhost:11211" \ No newline at end of file
diff --git a/tests/plugins/kv/memcached/plugin_test.go b/tests/plugins/kv/memcached/plugin_test.go
new file mode 100644
index 00000000..d4cb58bb
--- /dev/null
+++ b/tests/plugins/kv/memcached/plugin_test.go
@@ -0,0 +1,195 @@
+package memcached_test
+
+import (
+ "net"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/kv/memcached"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMemcache(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.DebugLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-init.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &memcached.Plugin{},
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("testMemcachedRPCMethods", testRPCMethods)
+ stopCh <- struct{}{}
+ wg.Wait()
+
+ _ = os.Remove("rr")
+}
+
+func testRPCMethods(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+
+ var setRes bool
+ items := make([]kv.Item, 0, 5)
+ items = append(items, kv.Item{
+ Key: "a",
+ Value: "aa",
+ })
+ items = append(items, kv.Item{
+ Key: "b",
+ Value: "bb",
+ })
+ // add 5 second ttl
+ tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+ items = append(items, kv.Item{
+ Key: "c",
+ Value: "cc",
+ TTL: tt,
+ })
+
+ items = append(items, kv.Item{
+ Key: "d",
+ Value: "dd",
+ })
+
+ items = append(items, kv.Item{
+ Key: "e",
+ Value: "ee",
+ })
+
+ // Register 3 keys with values
+ err = client.Call("memcached.Set", items, &setRes)
+ assert.NoError(t, err)
+ assert.True(t, setRes)
+
+ ret := make(map[string]bool)
+ keys := []string{"a", "b", "c"}
+ err = client.Call("memcached.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 3) // should be 3
+
+ // key "c" should be deleted
+ time.Sleep(time.Second * 7)
+
+ ret = make(map[string]bool)
+ err = client.Call("memcached.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 2) // should be 2
+
+ mGet := make(map[string]interface{})
+ keys = []string{"a", "b", "c"}
+ err = client.Call("memcached.MGet", keys, &mGet)
+ assert.NoError(t, err)
+ assert.Len(t, mGet, 2) // c is expired
+ assert.Equal(t, string("aa"), string(mGet["a"].([]byte)))
+ assert.Equal(t, string("bb"), string(mGet["b"].([]byte)))
+
+ mExpKeys := make([]kv.Item, 0, 2)
+ tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
+ mExpKeys = append(mExpKeys, kv.Item{Key: "a", TTL: tt2})
+ mExpKeys = append(mExpKeys, kv.Item{Key: "b", TTL: tt2})
+ mExpKeys = append(mExpKeys, kv.Item{Key: "d", TTL: tt2})
+
+ // MEXPIRE
+ var mExpRes bool
+ err = client.Call("memcached.MExpire", mExpKeys, &mExpRes)
+ assert.NoError(t, err)
+ assert.True(t, mExpRes)
+
+ // TTL call is not supported for the memcached driver
+ keys = []string{"a", "b", "d"}
+ ttlRes := make(map[string]interface{})
+ err = client.Call("memcached.TTL", keys, &ttlRes)
+ assert.Error(t, err)
+ assert.Len(t, ttlRes, 0)
+
+ // HAS AFTER TTL
+ time.Sleep(time.Second * 15)
+ ret = make(map[string]bool)
+ keys = []string{"a", "b", "d"}
+ err = client.Call("memcached.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 0)
+
+ // DELETE
+ keys = []string{"e"}
+ var delRet bool
+ err = client.Call("memcached.Delete", keys, &delRet)
+ assert.NoError(t, err)
+ assert.True(t, delRet)
+
+ // HAS AFTER DELETE
+ ret = make(map[string]bool)
+ keys = []string{"e"}
+ err = client.Call("memcached.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 0)
+}
diff --git a/tests/plugins/kv/memory/configs/.rr-init.yaml b/tests/plugins/kv/memory/configs/.rr-init.yaml
new file mode 100644
index 00000000..dedc6cd2
--- /dev/null
+++ b/tests/plugins/kv/memory/configs/.rr-init.yaml
@@ -0,0 +1,45 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+server:
+ command: "php ../../../psr-worker-bench.php"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+logs:
+ mode: development
+ level: debug
+
+http:
+ address: 127.0.0.1:44933
+ maxRequestSize: 1024
+ middleware: ["gzip", "headers"]
+ uploads:
+ forbid: [".php", ".exe", ".bat"]
+ trustedSubnets:
+ [
+ "10.0.0.0/8",
+ "127.0.0.0/8",
+ "172.16.0.0/12",
+ "192.168.0.0/16",
+ "::1/128",
+ "fc00::/7",
+ "fe80::/10",
+ ]
+ pool:
+ numWorkers: 6
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+
+# in memory KV driver
+memory:
+ enabled:
+ true
+ # keys ttl check interval
+ interval: 1
diff --git a/tests/plugins/kv/memory/plugin_test.go b/tests/plugins/kv/memory/plugin_test.go
new file mode 100644
index 00000000..ee01fabb
--- /dev/null
+++ b/tests/plugins/kv/memory/plugin_test.go
@@ -0,0 +1,195 @@
+package memory_test
+
+import (
+ "net"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/kv"
+ "github.com/spiral/roadrunner/v2/plugins/kv/memory"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestInMemory(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.DebugLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-init.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &memory.Plugin{},
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second * 1)
+ t.Run("testInMemoryRPCMethods", testRPCMethods)
+ stopCh <- struct{}{}
+ wg.Wait()
+
+ _ = os.Remove("rr")
+}
+
+func testRPCMethods(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+
+ var setRes bool
+ items := make([]kv.Item, 0, 5)
+ items = append(items, kv.Item{
+ Key: "a",
+ Value: "aa",
+ })
+ items = append(items, kv.Item{
+ Key: "b",
+ Value: "bb",
+ })
+ // add 5 second ttl
+ tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
+ items = append(items, kv.Item{
+ Key: "c",
+ Value: "cc",
+ TTL: tt,
+ })
+
+ items = append(items, kv.Item{
+ Key: "d",
+ Value: "dd",
+ })
+
+ items = append(items, kv.Item{
+ Key: "e",
+ Value: "ee",
+ })
+
+ // Register 3 keys with values
+ err = client.Call("memory.Set", items, &setRes)
+ assert.NoError(t, err)
+ assert.True(t, setRes)
+
+ ret := make(map[string]bool)
+ keys := []string{"a", "b", "c"}
+ err = client.Call("memory.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 3) // should be 3
+
+ // key "c" should be deleted
+ time.Sleep(time.Second * 7)
+
+ ret = make(map[string]bool)
+ err = client.Call("memory.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 2) // should be 2
+
+ mGet := make(map[string]interface{})
+ keys = []string{"a", "b", "c"}
+ err = client.Call("memory.MGet", keys, &mGet)
+ assert.NoError(t, err)
+ assert.Len(t, mGet, 2) // c is expired
+ assert.Equal(t, string("aa"), mGet["a"].(string))
+ assert.Equal(t, string("bb"), mGet["b"].(string))
+
+ mExpKeys := make([]kv.Item, 0, 2)
+ tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
+ mExpKeys = append(mExpKeys, kv.Item{Key: "a", TTL: tt2})
+ mExpKeys = append(mExpKeys, kv.Item{Key: "b", TTL: tt2})
+ mExpKeys = append(mExpKeys, kv.Item{Key: "d", TTL: tt2})
+
+ // MEXPIRE
+ var mExpRes bool
+ err = client.Call("memory.MExpire", mExpKeys, &mExpRes)
+ assert.NoError(t, err)
+ assert.True(t, mExpRes)
+
+ // TTL
+ keys = []string{"a", "b", "d"}
+ ttlRes := make(map[string]interface{})
+ err = client.Call("memory.TTL", keys, &ttlRes)
+ assert.NoError(t, err)
+ assert.Len(t, ttlRes, 3)
+
+ // HAS AFTER TTL
+ time.Sleep(time.Second * 15)
+ ret = make(map[string]bool)
+ keys = []string{"a", "b", "d"}
+ err = client.Call("memory.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 0)
+
+ // DELETE
+ keys = []string{"e"}
+ var delRet bool
+ err = client.Call("memory.Delete", keys, &delRet)
+ assert.NoError(t, err)
+ assert.True(t, delRet)
+
+ // HAS AFTER DELETE
+ ret = make(map[string]bool)
+ keys = []string{"e"}
+ err = client.Call("memory.Has", keys, &ret)
+ assert.NoError(t, err)
+ assert.Len(t, ret, 0)
+}
diff --git a/tests/plugins/logger/.rr.yaml b/tests/plugins/logger/.rr.yaml
new file mode 100644
index 00000000..cb555ec3
--- /dev/null
+++ b/tests/plugins/logger/.rr.yaml
@@ -0,0 +1,3 @@
+logs:
+ mode: development
+ level: debug \ No newline at end of file
diff --git a/tests/plugins/logger/logger_test.go b/tests/plugins/logger/logger_test.go
new file mode 100644
index 00000000..cc788be3
--- /dev/null
+++ b/tests/plugins/logger/logger_test.go
@@ -0,0 +1,79 @@
+package logger
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "testing"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLogger(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = ".rr.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ errCh, err := container.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // stop by CTRL+C
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+
+ stopCh := make(chan struct{}, 1)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-errCh:
+ assert.NoError(t, e.Error)
+ assert.NoError(t, container.Stop())
+ return
+ case <-c:
+ err = container.Stop()
+ assert.NoError(t, err)
+ return
+ case <-stopCh:
+ assert.NoError(t, container.Stop())
+ return
+ }
+ }
+ }()
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
diff --git a/tests/plugins/logger/plugin.go b/tests/plugins/logger/plugin.go
new file mode 100644
index 00000000..9ddf9ec9
--- /dev/null
+++ b/tests/plugins/logger/plugin.go
@@ -0,0 +1,40 @@
+package logger
+
+import (
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+type Plugin struct {
+ config config.Configurer
+ log logger.Logger
+}
+
+func (p1 *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
+ p1.config = cfg
+ p1.log = log
+ return nil
+}
+
+func (p1 *Plugin) Serve() chan error {
+ errCh := make(chan error, 1)
+ p1.log.Error("error", "test", errors.E(errors.Str("test")))
+ p1.log.Info("error", "test", errors.E(errors.Str("test")))
+ p1.log.Debug("error", "test", errors.E(errors.Str("test")))
+ p1.log.Warn("error", "test", errors.E(errors.Str("test")))
+
+ p1.log.Error("error", "test")
+ p1.log.Info("error", "test")
+ p1.log.Debug("error", "test")
+ p1.log.Warn("error", "test")
+ return errCh
+}
+
+func (p1 *Plugin) Stop() error {
+ return nil
+}
+
+func (p1 *Plugin) Name() string {
+ return "logger_plugin"
+}
diff --git a/tests/plugins/metrics/.rr-test.yaml b/tests/plugins/metrics/.rr-test.yaml
new file mode 100644
index 00000000..37c50395
--- /dev/null
+++ b/tests/plugins/metrics/.rr-test.yaml
@@ -0,0 +1,16 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+
+metrics:
+ # prometheus client address (path /metrics added automatically)
+ address: localhost:2112
+ collect:
+ app_metric:
+ type: histogram
+ help: "Custom application metric"
+ labels: [ "type" ]
+ buckets: [ 0.1, 0.2, 0.3, 1.0 ]
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/metrics/docker-compose.yml b/tests/plugins/metrics/docker-compose.yml
new file mode 100644
index 00000000..610633b4
--- /dev/null
+++ b/tests/plugins/metrics/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '3.7'
+
+services:
+ prometheus:
+ image: prom/prometheus
+ ports:
+ - 9090:9090
diff --git a/tests/plugins/metrics/metrics_test.go b/tests/plugins/metrics/metrics_test.go
new file mode 100644
index 00000000..c94d51bc
--- /dev/null
+++ b/tests/plugins/metrics/metrics_test.go
@@ -0,0 +1,739 @@
+package metrics
+
+import (
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/metrics"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/stretchr/testify/assert"
+)
+
+const dialAddr = "127.0.0.1:6001"
+const dialNetwork = "tcp"
+const getAddr = "http://localhost:2112/metrics"
+
+// get request and return body
+func get() (string, error) {
+ r, err := http.Get(getAddr)
+ if err != nil {
+ return "", err
+ }
+
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return "", err
+ }
+
+ err = r.Body.Close()
+ if err != nil {
+ return "", err
+ }
+ // unsafe
+ return string(b), err
+}
+
+func TestMetricsInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := &config.Viper{}
+ cfg.Prefix = "rr"
+ cfg.Path = ".rr-test.yaml"
+
+ err = cont.RegisterAll(
+ cfg,
+ &metrics.Plugin{},
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &Plugin1{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ tt := time.NewTimer(time.Second * 5)
+
+ out, err := get()
+ assert.NoError(t, err)
+
+ assert.Contains(t, out, "go_gc_duration_seconds")
+
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-tt.C:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+}
+
+func TestMetricsGaugeCollector(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := &config.Viper{}
+ cfg.Prefix = "rr"
+ cfg.Path = ".rr-test.yaml"
+
+ err = cont.RegisterAll(
+ cfg,
+ &metrics.Plugin{},
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ &Plugin1{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ time.Sleep(time.Second)
+ tt := time.NewTimer(time.Second * 5)
+
+ out, err := get()
+ assert.NoError(t, err)
+ assert.Contains(t, out, "my_gauge 100")
+ assert.Contains(t, out, "my_gauge2 100")
+
+ out, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, out, "go_gc_duration_seconds")
+
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-tt.C:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+}
+
+func TestMetricsDifferentRPCCalls(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := &config.Viper{}
+ cfg.Prefix = "rr"
+ cfg.Path = ".rr-test.yaml"
+
+ err = cont.RegisterAll(
+ cfg,
+ &metrics.Plugin{},
+ &rpcPlugin.Plugin{},
+ &logger.ZapLogger{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ go func() {
+ tt := time.NewTimer(time.Minute * 3)
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-tt.C:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ t.Run("DeclareMetric", declareMetricsTest)
+ genericOut, err := get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "test_metrics_named_collector")
+
+ t.Run("AddMetric", addMetricsTest)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "test_metrics_named_collector 10000")
+
+ t.Run("SetMetric", setMetric)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "user_gauge_collector 100")
+
+ t.Run("VectorMetric", vectorMetric)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "gauge_2_collector{section=\"first\",type=\"core\"} 100")
+
+ t.Run("MissingSection", missingSection)
+ t.Run("SetWithoutLabels", setWithoutLabels)
+ t.Run("SetOnHistogram", setOnHistogram)
+ t.Run("MetricSub", subMetric)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "sub_gauge_subMetric 1")
+
+ t.Run("SubVector", subVector)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "sub_gauge_subVector{section=\"first\",type=\"core\"} 1")
+
+ t.Run("RegisterHistogram", registerHistogram)
+
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, `TYPE histogram_registerHistogram`)
+
+ // check buckets
+ assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="0.1"} 0`)
+ assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="0.2"} 0`)
+ assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="0.5"} 0`)
+ assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="+Inf"} 0`)
+ assert.Contains(t, genericOut, `histogram_registerHistogram_sum 0`)
+ assert.Contains(t, genericOut, `histogram_registerHistogram_count 0`)
+
+ t.Run("CounterMetric", counterMetric)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "HELP default_default_counter_CounterMetric test_counter")
+ assert.Contains(t, genericOut, `default_default_counter_CounterMetric{section="section2",type="type2"}`)
+
+ t.Run("ObserveMetric", observeMetric)
+ genericOut, err = get()
+ assert.NoError(t, err)
+ assert.Contains(t, genericOut, "observe_observeMetric")
+
+ t.Run("ObserveMetricNotEnoughLabels", observeMetricNotEnoughLabels)
+
+ close(sig)
+}
+
+func observeMetricNotEnoughLabels(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "observe_observeMetricNotEnoughLabels",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Help: "test_observe",
+ Type: metrics.Histogram,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ assert.Error(t, client.Call("metrics.Observe", metrics.Metric{
+ Name: "observe_observeMetric",
+ Value: 100.0,
+ Labels: []string{"test"},
+ }, &ret))
+ assert.False(t, ret)
+}
+
+func observeMetric(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "observe_observeMetric",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Help: "test_observe",
+ Type: metrics.Histogram,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ assert.NoError(t, client.Call("metrics.Observe", metrics.Metric{
+ Name: "observe_observeMetric",
+ Value: 100.0,
+ Labels: []string{"test", "test2"},
+ }, &ret))
+ assert.True(t, ret)
+}
+
+func counterMetric(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "counter_CounterMetric",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Help: "test_counter",
+ Type: metrics.Counter,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+
+ ret = false
+
+ assert.NoError(t, client.Call("metrics.Add", metrics.Metric{
+ Name: "counter_CounterMetric",
+ Value: 100.0,
+ Labels: []string{"type2", "section2"},
+ }, &ret))
+ assert.True(t, ret)
+}
+
+func registerHistogram(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "histogram_registerHistogram",
+ Collector: metrics.Collector{
+ Help: "test_histogram",
+ Type: metrics.Histogram,
+ Buckets: []float64{0.1, 0.2, 0.5},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+
+ ret = false
+
+ m := metrics.Metric{
+ Name: "histogram_registerHistogram",
+ Value: 10000,
+ Labels: nil,
+ }
+
+ err = client.Call("metrics.Add", m, &ret)
+ assert.Error(t, err)
+ assert.False(t, ret)
+}
+
+func subVector(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "sub_gauge_subVector",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Gauge,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ m := metrics.Metric{
+ Name: "sub_gauge_subVector",
+ Value: 100000,
+ Labels: []string{"core", "first"},
+ }
+
+ err = client.Call("metrics.Add", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ m = metrics.Metric{
+ Name: "sub_gauge_subVector",
+ Value: 99999,
+ Labels: []string{"core", "first"},
+ }
+
+ err = client.Call("metrics.Sub", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+}
+
+func subMetric(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "sub_gauge_subMetric",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Gauge,
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ m := metrics.Metric{
+ Name: "sub_gauge_subMetric",
+ Value: 100000,
+ }
+
+ err = client.Call("metrics.Add", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ m = metrics.Metric{
+ Name: "sub_gauge_subMetric",
+ Value: 99999,
+ }
+
+ err = client.Call("metrics.Sub", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+}
+
+func setOnHistogram(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "histogram_setOnHistogram",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Histogram,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+
+ ret = false
+
+ m := metrics.Metric{
+ Name: "gauge_setOnHistogram",
+ Value: 100.0,
+ }
+
+ err = client.Call("metrics.Set", m, &ret) // expected 2 label values but got 1 in []string{"missing"}
+ assert.Error(t, err)
+ assert.False(t, ret)
+}
+
+func setWithoutLabels(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "gauge_setWithoutLabels",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Gauge,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+
+ ret = false
+
+ m := metrics.Metric{
+ Name: "gauge_setWithoutLabels",
+ Value: 100.0,
+ }
+
+ err = client.Call("metrics.Set", m, &ret) // expected 2 label values but got 1 in []string{"missing"}
+ assert.Error(t, err)
+ assert.False(t, ret)
+}
+
+func missingSection(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "gauge_missing_section_collector",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Gauge,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+
+ ret = false
+
+ m := metrics.Metric{
+ Name: "gauge_missing_section_collector",
+ Value: 100.0,
+ Labels: []string{"missing"},
+ }
+
+ err = client.Call("metrics.Set", m, &ret) // expected 2 label values but got 1 in []string{"missing"}
+ assert.Error(t, err)
+ assert.False(t, ret)
+}
+
+func vectorMetric(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "gauge_2_collector",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Gauge,
+ Labels: []string{"type", "section"},
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+
+ ret = false
+
+ m := metrics.Metric{
+ Name: "gauge_2_collector",
+ Value: 100.0,
+ Labels: []string{"core", "first"},
+ }
+
+ err = client.Call("metrics.Set", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+}
+
+func setMetric(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "user_gauge_collector",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Gauge,
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ m := metrics.Metric{
+ Name: "user_gauge_collector",
+ Value: 100.0,
+ }
+
+ err = client.Call("metrics.Set", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+}
+
+func addMetricsTest(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ m := metrics.Metric{
+ Name: "test_metrics_named_collector",
+ Value: 10000,
+ Labels: nil,
+ }
+
+ err = client.Call("metrics.Add", m, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+}
+
+func declareMetricsTest(t *testing.T) {
+ conn, err := net.Dial(dialNetwork, dialAddr)
+ assert.NoError(t, err)
+ defer func() {
+ _ = conn.Close()
+ }()
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret bool
+
+ nc := metrics.NamedCollector{
+ Name: "test_metrics_named_collector",
+ Collector: metrics.Collector{
+ Namespace: "default",
+ Subsystem: "default",
+ Type: metrics.Counter,
+ Help: "NO HELP!",
+ Labels: nil,
+ Buckets: nil,
+ },
+ }
+
+ err = client.Call("metrics.Declare", nc, &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+}
diff --git a/tests/plugins/metrics/plugin1.go b/tests/plugins/metrics/plugin1.go
new file mode 100644
index 00000000..ae024a8a
--- /dev/null
+++ b/tests/plugins/metrics/plugin1.go
@@ -0,0 +1,46 @@
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+// Gauge //////////////
+type Plugin1 struct {
+ config config.Configurer
+}
+
+func (p1 *Plugin1) Init(cfg config.Configurer) error {
+ p1.config = cfg
+ return nil
+}
+
+func (p1 *Plugin1) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+func (p1 *Plugin1) Stop() error {
+ return nil
+}
+
+func (p1 *Plugin1) Name() string {
+ return "metrics_test.plugin1"
+}
+
+func (p1 *Plugin1) MetricsCollector() []prometheus.Collector {
+ collector := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "my_gauge",
+ Help: "My gauge value",
+ })
+
+ collector.Set(100)
+
+ collector2 := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "my_gauge2",
+ Help: "My gauge2 value",
+ })
+
+ collector2.Set(100)
+ return []prometheus.Collector{collector, collector2}
+}
diff --git a/tests/plugins/mocks/mock_log.go b/tests/plugins/mocks/mock_log.go
new file mode 100644
index 00000000..e9631805
--- /dev/null
+++ b/tests/plugins/mocks/mock_log.go
@@ -0,0 +1,150 @@
+package mocks
+
+import (
+ "reflect"
+
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+)
+
+// MockLogger is a mock of Logger interface.
+type MockLogger struct {
+ ctrl *gomock.Controller
+ recorder *MockLoggerMockRecorder
+}
+
+// MockLoggerMockRecorder is the mock recorder for MockLogger.
+type MockLoggerMockRecorder struct {
+ mock *MockLogger
+}
+
+// NewMockLogger creates a new mock instance.
+func NewMockLogger(ctrl *gomock.Controller) *MockLogger {
+ mock := &MockLogger{ctrl: ctrl}
+ mock.recorder = &MockLoggerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockLogger) EXPECT() *MockLoggerMockRecorder {
+ return m.recorder
+}
+
+func (m *MockLogger) Init() error {
+ mock := &MockLogger{ctrl: m.ctrl}
+ mock.recorder = &MockLoggerMockRecorder{mock}
+ return nil
+}
+
+// Debug mocks base method.
+func (m *MockLogger) Debug(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Debug", varargs...)
+}
+
+// Warn mocks base method.
+func (m *MockLogger) Warn(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Warn", varargs...)
+}
+
+// Info mocks base method.
+func (m *MockLogger) Info(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Info", varargs...)
+}
+
+// Error mocks base method.
+func (m *MockLogger) Error(msg string, keyvals ...interface{}) {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{msg}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ m.ctrl.Call(m, "Error", varargs...)
+}
+
+// Warn indicates an expected call of Warn.
+func (mr *MockLoggerMockRecorder) Warn(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warn", reflect.TypeOf((*MockLogger)(nil).Warn), varargs...)
+}
+
+// Debug indicates an expected call of Debug.
+func (mr *MockLoggerMockRecorder) Debug(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debug", reflect.TypeOf((*MockLogger)(nil).Debug), varargs...)
+}
+
+// Error indicates an expected call of Error.
+func (mr *MockLoggerMockRecorder) Error(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...)
+}
+
+func (mr *MockLoggerMockRecorder) Init() error {
+ return nil
+}
+
+// Info indicates an expected call of Info.
+func (mr *MockLoggerMockRecorder) Info(msg interface{}, keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]interface{}{msg}, keyvals...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...)
+}
+
+// MockWithLogger is a mock of WithLogger interface.
+type MockWithLogger struct {
+ ctrl *gomock.Controller
+ recorder *MockWithLoggerMockRecorder
+}
+
+// MockWithLoggerMockRecorder is the mock recorder for MockWithLogger.
+type MockWithLoggerMockRecorder struct {
+ mock *MockWithLogger
+}
+
+// NewMockWithLogger creates a new mock instance.
+func NewMockWithLogger(ctrl *gomock.Controller) *MockWithLogger {
+ mock := &MockWithLogger{ctrl: ctrl}
+ mock.recorder = &MockWithLoggerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockWithLogger) EXPECT() *MockWithLoggerMockRecorder {
+ return m.recorder
+}
+
+// With mocks base method.
+func (m *MockWithLogger) With(keyvals ...interface{}) logger.Logger {
+ m.ctrl.T.Helper()
+ varargs := []interface{}{}
+ for _, a := range keyvals {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "With", varargs...)
+ ret0, _ := ret[0].(logger.Logger)
+ return ret0
+}
+
+// With indicates an expected call of With.
+func (mr *MockWithLoggerMockRecorder) With(keyvals ...interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "With", reflect.TypeOf((*MockWithLogger)(nil).With), keyvals...)
+}
diff --git a/tests/plugins/redis/plugin1.go b/tests/plugins/redis/plugin1.go
new file mode 100644
index 00000000..e50213e5
--- /dev/null
+++ b/tests/plugins/redis/plugin1.go
@@ -0,0 +1,43 @@
+package redis
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/spiral/errors"
+ redisPlugin "github.com/spiral/roadrunner/v2/plugins/redis"
+)
+
+type Plugin1 struct {
+ redisClient redis.UniversalClient
+}
+
+func (p *Plugin1) Init(redis redisPlugin.Redis) error {
+ p.redisClient = redis.GetClient()
+ return nil
+}
+
+func (p *Plugin1) Serve() chan error {
+ const op = errors.Op("plugin1 serve")
+ errCh := make(chan error, 1)
+ p.redisClient.Set(context.Background(), "foo", "bar", time.Minute)
+
+ stringCmd := p.redisClient.Get(context.Background(), "foo")
+ data, err := stringCmd.Result()
+ if err != nil {
+ errCh <- errors.E(op, err)
+ return errCh
+ }
+
+ if data != "bar" {
+ errCh <- errors.E(op, errors.Str("no such key"))
+ return errCh
+ }
+
+ return errCh
+}
+
+func (p *Plugin1) Stop() error {
+ return nil
+}
diff --git a/tests/plugins/redis/redis_plugin_test.go b/tests/plugins/redis/redis_plugin_test.go
new file mode 100644
index 00000000..eba05752
--- /dev/null
+++ b/tests/plugins/redis/redis_plugin_test.go
@@ -0,0 +1,120 @@
+package redis
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+
+ "github.com/alicebob/miniredis/v2"
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/redis"
+ "github.com/spiral/roadrunner/v2/tests/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+func redisConfig(port string) string {
+ cfg := `
+redis:
+ addrs:
+ - 'localhost:%s'
+ master_name: ''
+ username: ''
+ password: ''
+ db: 0
+ sentinel_password: ''
+ route_by_latency: false
+ route_randomly: false
+ dial_timeout: 0
+ max_retries: 1
+ min_retry_backoff: 0
+ max_retry_backoff: 0
+ pool_size: 0
+ min_idle_conns: 0
+ max_conn_age: 0
+ read_timeout: 0
+ write_timeout: 0
+ pool_timeout: 0
+ idle_timeout: 0
+ idle_check_freq: 0
+ read_only: false
+`
+ return fmt.Sprintf(cfg, port)
+}
+
+func TestRedisInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err := miniredis.Run()
+ assert.NoError(t, err)
+
+ c := redisConfig(s.Port())
+
+ cfg := &config.Viper{}
+ cfg.Type = "yaml"
+ cfg.ReadInCfg = []byte(c)
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &redis.Plugin{},
+ &Plugin1{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
diff --git a/service/reload/config_test.go b/tests/plugins/reload/config_test.go
index b0620aa1..72c11070 100644
--- a/service/reload/config_test.go
+++ b/tests/plugins/reload/config_test.go
@@ -4,20 +4,20 @@ import (
"testing"
"time"
+ "github.com/spiral/roadrunner/v2/plugins/reload"
"github.com/stretchr/testify/assert"
)
func Test_Config_Valid(t *testing.T) {
- services := make(map[string]ServiceConfig)
- services["test"] = ServiceConfig{
+ services := make(map[string]reload.ServiceConfig)
+ services["test"] = reload.ServiceConfig{
Recursive: false,
Patterns: nil,
Dirs: nil,
Ignore: nil,
- service: nil,
}
- cfg := &Config{
+ cfg := &reload.Config{
Interval: time.Second,
Patterns: nil,
Services: services,
@@ -26,8 +26,8 @@ func Test_Config_Valid(t *testing.T) {
}
func Test_Fake_ServiceConfig(t *testing.T) {
- services := make(map[string]ServiceConfig)
- cfg := &Config{
+ services := make(map[string]reload.ServiceConfig)
+ cfg := &reload.Config{
Interval: time.Microsecond,
Patterns: nil,
Services: services,
@@ -36,17 +36,16 @@ func Test_Fake_ServiceConfig(t *testing.T) {
}
func Test_Interval(t *testing.T) {
- services := make(map[string]ServiceConfig)
- services["test"] = ServiceConfig{
+ services := make(map[string]reload.ServiceConfig)
+ services["test"] = reload.ServiceConfig{
Enabled: false,
Recursive: false,
Patterns: nil,
Dirs: nil,
Ignore: nil,
- service: nil,
}
- cfg := &Config{
+ cfg := &reload.Config{
Interval: time.Millisecond, // should crash here
Patterns: nil,
Services: services,
@@ -55,7 +54,7 @@ func Test_Interval(t *testing.T) {
}
func Test_NoServiceConfig(t *testing.T) {
- cfg := &Config{
+ cfg := &reload.Config{
Interval: time.Second,
Patterns: nil,
Services: nil,
diff --git a/tests/plugins/reload/configs/.rr-reload-2.yaml b/tests/plugins/reload/configs/.rr-reload-2.yaml
new file mode 100644
index 00000000..ab32b2d1
--- /dev/null
+++ b/tests/plugins/reload/configs/.rr-reload-2.yaml
@@ -0,0 +1,44 @@
+server:
+ command: php ../../psr-worker-bench.php
+ user: ''
+ group: ''
+ env:
+ RR_HTTP: 'true'
+ relay: pipes
+ relayTimeout: 20s
+http:
+ debug: true
+ address: '127.0.0.1:27388'
+ maxRequestSize: 1024
+ middleware:
+ - ''
+ uploads:
+ forbid:
+ - .php
+ - .exe
+ - .bat
+ trustedSubnets:
+ - 10.0.0.0/8
+ - 127.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ - '::1/128'
+ - 'fc00::/7'
+ - 'fe80::/10'
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+reload:
+ interval: 2s
+ patterns:
+ - .txt
+ services:
+ http:
+ dirs:
+ - './unit_tests'
+ recursive: true
diff --git a/tests/plugins/reload/configs/.rr-reload-3.yaml b/tests/plugins/reload/configs/.rr-reload-3.yaml
new file mode 100644
index 00000000..881d9b88
--- /dev/null
+++ b/tests/plugins/reload/configs/.rr-reload-3.yaml
@@ -0,0 +1,46 @@
+server:
+ command: php ../../psr-worker-bench.php
+ user: ''
+ group: ''
+ env:
+ RR_HTTP: 'true'
+ relay: pipes
+ relayTimeout: 20s
+http:
+ debug: true
+ address: '127.0.0.1:37388'
+ maxRequestSize: 1024
+ middleware:
+ - ''
+ uploads:
+ forbid:
+ - .php
+ - .exe
+ - .bat
+ trustedSubnets:
+ - 10.0.0.0/8
+ - 127.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ - '::1/128'
+ - 'fc00::/7'
+ - 'fe80::/10'
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+reload:
+ interval: 2s
+ patterns:
+ - .txt
+ services:
+ http:
+ dirs:
+ - './unit_tests'
+ - './unit_tests_copied'
+ - './dir1'
+ recursive: true
diff --git a/tests/plugins/reload/configs/.rr-reload-4.yaml b/tests/plugins/reload/configs/.rr-reload-4.yaml
new file mode 100644
index 00000000..d47df558
--- /dev/null
+++ b/tests/plugins/reload/configs/.rr-reload-4.yaml
@@ -0,0 +1,46 @@
+server:
+ command: php ../../psr-worker-bench.php
+ user: ''
+ group: ''
+ env:
+ RR_HTTP: 'true'
+ relay: pipes
+ relayTimeout: 20s
+http:
+ debug: true
+ address: '127.0.0.1:22766'
+ maxRequestSize: 1024
+ middleware:
+ - ''
+ uploads:
+ forbid:
+ - .php
+ - .exe
+ - .bat
+ trustedSubnets:
+ - 10.0.0.0/8
+ - 127.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ - '::1/128'
+ - 'fc00::/7'
+ - 'fe80::/10'
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+reload:
+ interval: 2s
+ patterns:
+ - .aaa
+ services:
+ http:
+ dirs:
+ - './unit_tests'
+ - './unit_tests_copied'
+ - './dir1'
+ recursive: false
diff --git a/tests/plugins/reload/configs/.rr-reload.yaml b/tests/plugins/reload/configs/.rr-reload.yaml
new file mode 100644
index 00000000..794c41f2
--- /dev/null
+++ b/tests/plugins/reload/configs/.rr-reload.yaml
@@ -0,0 +1,44 @@
+server:
+ command: php ../../psr-worker-bench.php
+ user: ''
+ group: ''
+ env:
+ RR_HTTP: 'true'
+ relay: pipes
+ relayTimeout: 20s
+http:
+ debug: true
+ address: '127.0.0.1:22388'
+ maxRequestSize: 1024
+ middleware:
+ - ''
+ uploads:
+ forbid:
+ - .php
+ - .exe
+ - .bat
+ trustedSubnets:
+ - 10.0.0.0/8
+ - 127.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+ - '::1/128'
+ - 'fc00::/7'
+ - 'fe80::/10'
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error
+reload:
+ interval: 1s
+ patterns:
+ - .txt
+ services:
+ http:
+ dirs:
+ - './unit_tests'
+ recursive: true
diff --git a/tests/plugins/reload/reload_plugin_test.go b/tests/plugins/reload/reload_plugin_test.go
new file mode 100644
index 00000000..c83d4787
--- /dev/null
+++ b/tests/plugins/reload/reload_plugin_test.go
@@ -0,0 +1,827 @@
+package reload
+
+import (
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "strconv"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/reload"
+ "github.com/spiral/roadrunner/v2/plugins/resetter"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/spiral/roadrunner/v2/tests/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+const testDir string = "unit_tests"
+const testCopyToDir string = "unit_tests_copied"
+const dir1 string = "dir1"
+const hugeNumberOfFiles uint = 500
+
+func TestReloadInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-reload.yaml",
+ Prefix: "rr",
+ }
+
+ // try to remove, skip error
+ assert.NoError(t, freeResources(testDir))
+ err = os.Mkdir(testDir, 0755)
+ assert.NoError(t, err)
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
+ mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", "file.txt", "size", gomock.Any()).Times(2)
+ mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", "file.txt", "size", gomock.Any()).Times(2)
+ mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").Times(1)
+ mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").Times(1)
+ mockLogger.EXPECT().Info("HTTP listeners successfully re-added").Times(1)
+ mockLogger.EXPECT().Info("HTTP plugin successfully restarted").Times(1)
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &reload.Plugin{},
+ &resetter.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ assert.NoError(t, err)
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ t.Run("ReloadTestInit", reloadTestInit)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+ assert.NoError(t, freeResources(testDir))
+}
+
+func reloadTestInit(t *testing.T) {
+ err := ioutil.WriteFile(filepath.Join(testDir, "file.txt"), //nolint:gosec
+ []byte{}, 0755)
+ assert.NoError(t, err)
+}
+
+func TestReloadHugeNumberOfFiles(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-reload.yaml",
+ Prefix: "rr",
+ }
+
+ // try to remove, skip error
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+
+ assert.NoError(t, os.Mkdir(testDir, 0755))
+ assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
+ mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Debug("file was updated", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP listeners successfully re-added").MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &reload.Plugin{},
+ &resetter.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ assert.NoError(t, err)
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ t.Run("ReloadTestHugeNumberOfFiles", reloadHugeNumberOfFiles)
+ t.Run("ReloadRandomlyChangeFile", randomlyChangeFile)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+}
+
+func randomlyChangeFile(t *testing.T) {
+ // we know, that directory contains 500 files (0-499)
+ // let's try to randomly change it
+ for i := 0; i < 10; i++ {
+ // rand sleep
+ rSleep := rand.Int63n(500) // nolint:gosec
+ time.Sleep(time.Millisecond * time.Duration(rSleep))
+ rNum := rand.Int63n(int64(hugeNumberOfFiles)) // nolint:gosec
+ err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".txt"), []byte("Hello, Gophers!"), 0755) // nolint:gosec
+ assert.NoError(t, err)
+ }
+}
+
+func reloadHugeNumberOfFiles(t *testing.T) {
+ for i := uint(0); i < hugeNumberOfFiles; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
+ }
+}
+
+// Should be events only about creating files with txt ext
+func TestReloadFilterFileExt(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-reload-2.yaml",
+ Prefix: "rr",
+ }
+
+ // try to remove, skip error
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, os.Mkdir(testDir, 0755))
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
+ mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(100)
+ mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").Times(1)
+ mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").Times(1)
+ mockLogger.EXPECT().Info("HTTP listeners successfully re-added").Times(1)
+ mockLogger.EXPECT().Info("HTTP plugin successfully restarted").Times(1)
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &reload.Plugin{},
+ &resetter.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ assert.NoError(t, err)
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ t.Run("ReloadMakeFiles", reloadMakeFiles)
+ t.Run("ReloadFilteredExt", reloadFilteredExt)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+
+ assert.NoError(t, freeResources(testDir))
+}
+
+func reloadMakeFiles(t *testing.T) {
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
+ }
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
+ }
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
+ }
+}
+
+func reloadFilteredExt(t *testing.T) {
+ // change files with abc extension
+ for i := 0; i < 10; i++ {
+ // rand sleep
+ rSleep := rand.Int63n(1000) // nolint:gosec
+ time.Sleep(time.Millisecond * time.Duration(rSleep))
+ rNum := rand.Int63n(int64(hugeNumberOfFiles)) // nolint:gosec
+ err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".abc"), []byte("Hello, Gophers!"), 0755) // nolint:gosec
+ assert.NoError(t, err)
+ }
+
+ // change files with def extension
+ for i := 0; i < 10; i++ {
+ // rand sleep
+ rSleep := rand.Int63n(1000) // nolint:gosec
+ time.Sleep(time.Millisecond * time.Duration(rSleep))
+ rNum := rand.Int63n(int64(hugeNumberOfFiles)) // nolint:gosec
+ err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".def"), []byte("Hello, Gophers!"), 0755) // nolint:gosec
+ assert.NoError(t, err)
+ }
+}
+
+// Should be events only about creating files with txt ext
+func TestReloadCopy500(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-reload-3.yaml",
+ Prefix: "rr",
+ }
+
+ // try to remove, skip error
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+ assert.NoError(t, freeResources(dir1))
+
+ assert.NoError(t, os.Mkdir(testDir, 0755))
+ assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
+ assert.NoError(t, os.Mkdir(dir1, 0755))
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+ //
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
+ mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
+ mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
+ mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
+ mockLogger.EXPECT().Debug("file was removed from watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
+ mockLogger.EXPECT().Debug("file was updated", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
+ mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP listeners successfully re-added").MinTimes(1)
+ mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &reload.Plugin{},
+ &resetter.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ assert.NoError(t, err)
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ // Scenario
+ // 1
+ // Create 3k files with txt, abc, def extensions
+ // Copy files to the unit_tests_copy dir
+ // 2
+ // Delete both dirs, recreate
+ // Create 3k files with txt, abc, def extensions
+ // Move files to the unit_tests_copy dir
+ // 3
+ // Recursive
+
+ t.Run("ReloadMake300Files", reloadMake300Files)
+ t.Run("ReloadCopyFiles", reloadCopyFiles)
+ t.Run("ReloadRecursiveDirsSupport", copyFilesRecursive)
+ t.Run("RandomChangesInRecursiveDirs", randomChangesInRecursiveDirs)
+ t.Run("RemoveFilesSupport", removeFilesSupport)
+ t.Run("ReloadMoveSupport", reloadMoveSupport)
+
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+ assert.NoError(t, freeResources(dir1))
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func reloadMoveSupport(t *testing.T) {
+ t.Run("MoveSupportCopy", copyFilesRecursive)
+ // move some files
+ for i := 0; i < 10; i++ {
+ // rand sleep
+ rSleep := rand.Int63n(500) // nolint:gosec
+ time.Sleep(time.Millisecond * time.Duration(rSleep))
+ rNum := rand.Int63n(int64(100)) // nolint:gosec
+ rDir := rand.Int63n(9) // nolint:gosec
+ rExt := rand.Int63n(3) // nolint:gosec
+
+ ext := []string{
+ ".txt",
+ ".abc",
+ ".def",
+ }
+
+ // change files with def extension
+ dirs := []string{
+ "dir1",
+ "dir1/dir2",
+ "dir1/dir2/dir3",
+ "dir1/dir2/dir3/dir4",
+ "dir1/dir2/dir3/dir4/dir5",
+ "dir1/dir2/dir3/dir4/dir5/dir6",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
+ }
+
+ // move file
+ err := os.Rename(filepath.Join(dirs[rDir], "file_"+strconv.Itoa(int(rNum))+ext[rExt]), filepath.Join(dirs[rDir+1], "file_"+strconv.Itoa(int(rNum))+ext[rExt]))
+ assert.NoError(t, err)
+ }
+}
+
+func removeFilesSupport(t *testing.T) {
+ // remove some files
+ for i := 0; i < 10; i++ {
+ // rand sleep
+ rSleep := rand.Int63n(500) // nolint:gosec
+ time.Sleep(time.Millisecond * time.Duration(rSleep))
+ rNum := rand.Int63n(int64(100)) // nolint:gosec
+ rDir := rand.Int63n(10) // nolint:gosec
+ rExt := rand.Int63n(3) // nolint:gosec
+
+ ext := []string{
+ ".txt",
+ ".abc",
+ ".def",
+ }
+
+ // change files with def extension
+ dirs := []string{
+ "dir1",
+ "dir1/dir2",
+ "dir1/dir2/dir3",
+ "dir1/dir2/dir3/dir4",
+ "dir1/dir2/dir3/dir4/dir5",
+ "dir1/dir2/dir3/dir4/dir5/dir6",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
+ }
+ // here can be a situation, when file already deleted
+ _ = os.Remove(filepath.Join(dirs[rDir], "file_"+strconv.Itoa(int(rNum))+ext[rExt]))
+ }
+}
+
+func randomChangesInRecursiveDirs(t *testing.T) {
+ // change files with def extension
+ dirs := []string{
+ "dir1",
+ "dir1/dir2",
+ "dir1/dir2/dir3",
+ "dir1/dir2/dir3/dir4",
+ "dir1/dir2/dir3/dir4/dir5",
+ "dir1/dir2/dir3/dir4/dir5/dir6",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
+ "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
+ }
+
+ ext := []string{
+ ".txt",
+ ".abc",
+ ".def",
+ }
+
+ filenames := []string{
+ "file_", // should be update
+ "foo_", // should be created
+ "bar_", // should be created
+ }
+ for i := 0; i < 10; i++ {
+ // rand sleep
+ rSleep := rand.Int63n(500) // nolint:gosec
+ time.Sleep(time.Millisecond * time.Duration(rSleep))
+ rNum := rand.Int63n(int64(100)) // nolint:gosec
+ rDir := rand.Int63n(10) // nolint:gosec
+ rExt := rand.Int63n(3) // nolint:gosec
+ rName := rand.Int63n(3) // nolint:gosec
+
+ err := ioutil.WriteFile(filepath.Join(dirs[rDir], filenames[rName]+strconv.Itoa(int(rNum))+ext[rExt]), []byte("Hello, Gophers!"), 0755) // nolint:gosec
+ assert.NoError(t, err)
+ }
+}
+
+func copyFilesRecursive(t *testing.T) {
+ err := copyDir(testDir, "dir1")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9")
+ assert.NoError(t, err)
+ err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10")
+ assert.NoError(t, err)
+}
+
+func reloadCopyFiles(t *testing.T) {
+ err := copyDir(testDir, testCopyToDir)
+ assert.NoError(t, err)
+
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+
+ assert.NoError(t, os.Mkdir(testDir, 0755))
+ assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
+
+ // recreate files
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
+ }
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
+ }
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
+ }
+
+ err = copyDir(testDir, testCopyToDir)
+ assert.NoError(t, err)
+}
+
+func reloadMake300Files(t *testing.T) {
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
+ }
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
+ }
+ for i := uint(0); i < 100; i++ {
+ assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
+ }
+}
+
+func TestReloadNoRecursion(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-reload-4.yaml",
+ Prefix: "rr",
+ }
+
+ // try to remove, skip error
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+ assert.NoError(t, freeResources(dir1))
+
+ assert.NoError(t, os.Mkdir(testDir, 0755))
+ assert.NoError(t, os.Mkdir(dir1, 0755))
+ assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ // http server should not be restarted. all event from wrong file extensions should be skipped
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").Times(1)
+ mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &reload.Plugin{},
+ &resetter.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ assert.NoError(t, err)
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ t.Run("ReloadMakeFiles", reloadMakeFiles) // make files in the testDir
+ t.Run("ReloadCopyFilesRecursive", reloadCopyFiles)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+
+ assert.NoError(t, freeResources(testDir))
+ assert.NoError(t, freeResources(testCopyToDir))
+ assert.NoError(t, freeResources(dir1))
+}
+
+// ========================================================================
+
+func freeResources(path string) error {
+ return os.RemoveAll(path)
+}
+
+func makeFile(filename string) error {
+ return ioutil.WriteFile(filepath.Join(testDir, filename), []byte{}, 0755) //nolint:gosec
+}
+
+func copyDir(src string, dst string) error {
+ src = filepath.Clean(src)
+ dst = filepath.Clean(dst)
+
+ si, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !si.IsDir() {
+ return errors.E(errors.Str("source is not a directory"))
+ }
+
+ _, err = os.Stat(dst)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+
+ err = os.MkdirAll(dst, si.Mode())
+ if err != nil {
+ return err
+ }
+
+ entries, err := ioutil.ReadDir(src)
+ if err != nil {
+ return err
+ }
+
+ for _, entry := range entries {
+ srcPath := filepath.Join(src, entry.Name())
+ dstPath := filepath.Join(dst, entry.Name())
+
+ if entry.IsDir() {
+ err = copyDir(srcPath, dstPath)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Skip symlinks.
+ if entry.Mode()&os.ModeSymlink != 0 {
+ continue
+ }
+
+ err = copyFile(srcPath, dstPath)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func copyFile(src, dst string) error {
+ in, err := os.Open(src)
+ if err != nil {
+ return errors.E(err)
+ }
+ defer func() {
+ _ = in.Close()
+ }()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return errors.E(err)
+ }
+ defer func() {
+ _ = out.Close()
+ }()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return errors.E(err)
+ }
+
+ err = out.Sync()
+ if err != nil {
+ return errors.E(err)
+ }
+
+ si, err := os.Stat(src)
+ if err != nil {
+ return errors.E(err)
+ }
+ err = os.Chmod(dst, si.Mode())
+ if err != nil {
+ return errors.E(err)
+ }
+ return nil
+}
diff --git a/tests/plugins/resetter/.rr-resetter.yaml b/tests/plugins/resetter/.rr-resetter.yaml
new file mode 100644
index 00000000..e50ca9c9
--- /dev/null
+++ b/tests/plugins/resetter/.rr-resetter.yaml
@@ -0,0 +1,16 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+rpc:
+ listen: tcp://127.0.0.1:6001
+ disabled: false
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/resetter/resetter_test.go b/tests/plugins/resetter/resetter_test.go
new file mode 100644
index 00000000..89dd43c7
--- /dev/null
+++ b/tests/plugins/resetter/resetter_test.go
@@ -0,0 +1,113 @@
+package resetter
+
+import (
+ "net"
+ "net/rpc"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/resetter"
+ rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestResetterInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := &config.Viper{
+ Path: ".rr-resetter.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &server.Plugin{},
+ &logger.ZapLogger{},
+ &resetter.Plugin{},
+ &rpcPlugin.Plugin{},
+ &Plugin1{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ stopCh := make(chan struct{}, 1)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+
+ t.Run("ResetterRpcTest", resetterRPCTest)
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func resetterRPCTest(t *testing.T) {
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ assert.NoError(t, err)
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ // WorkerList contains list of workers.
+
+ var ret bool
+ err = client.Call("resetter.Reset", "resetter.plugin1", &ret)
+ assert.NoError(t, err)
+ assert.True(t, ret)
+ ret = false
+
+ var services []string
+ err = client.Call("resetter.List", nil, &services)
+ assert.NotNil(t, services)
+ assert.NoError(t, err)
+ if services[0] != "resetter.plugin1" {
+ t.Fatal("no enough services")
+ }
+}
diff --git a/tests/plugins/resetter/test_plugin.go b/tests/plugins/resetter/test_plugin.go
new file mode 100644
index 00000000..7d53bca0
--- /dev/null
+++ b/tests/plugins/resetter/test_plugin.go
@@ -0,0 +1,66 @@
+package resetter
+
+import (
+ "context"
+ "time"
+
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+)
+
+var testPoolConfig = poolImpl.Config{
+ NumWorkers: 10,
+ MaxJobs: 100,
+ AllocateTimeout: time.Second * 10,
+ DestroyTimeout: time.Second * 10,
+ Supervisor: &poolImpl.SupervisorConfig{
+ WatchTick: 60,
+ TTL: 1000,
+ IdleTTL: 10,
+ ExecTTL: 10,
+ MaxWorkerMemory: 1000,
+ },
+}
+
+// Gauge //////////////
+type Plugin1 struct {
+ config config.Configurer
+ server server.Server
+}
+
+func (p1 *Plugin1) Init(cfg config.Configurer, server server.Server) error {
+ p1.config = cfg
+ p1.server = server
+ return nil
+}
+
+func (p1 *Plugin1) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+func (p1 *Plugin1) Stop() error {
+ return nil
+}
+
+func (p1 *Plugin1) Name() string {
+ return "resetter.plugin1"
+}
+
+func (p1 *Plugin1) Reset() error {
+ pool, err := p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil)
+ if err != nil {
+ panic(err)
+ }
+ pool.Destroy(context.Background())
+
+ pool, err = p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ _ = pool
+
+ return nil
+}
diff --git a/service/rpc/config_test.go b/tests/plugins/rpc/config_test.go
index 70d58e84..df5fa391 100644..100755
--- a/service/rpc/config_test.go
+++ b/tests/plugins/rpc/config_test.go
@@ -1,44 +1,15 @@
package rpc
import (
+ "runtime"
"testing"
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/service"
+ "github.com/spiral/roadrunner/v2/plugins/rpc"
"github.com/stretchr/testify/assert"
)
-type testCfg struct{ cfg string }
-
-func (cfg *testCfg) Get(name string) service.Config { return nil }
-func (cfg *testCfg) Unmarshal(out interface{}) error {
- j := json.ConfigCompatibleWithStandardLibrary
- return j.Unmarshal([]byte(cfg.cfg), out)
-}
-
-func Test_Config_Hydrate(t *testing.T) {
- cfg := &testCfg{`{"enable": true, "listen": "tcp://:18001"}`}
- c := &Config{}
-
- assert.NoError(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error(t *testing.T) {
- cfg := &testCfg{`{"enable": true, "listen": "invalid"}`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
-func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := &testCfg{`{"enable": true, "listen": "invalid"`}
- c := &Config{}
-
- assert.Error(t, c.Hydrate(cfg))
-}
-
func TestConfig_Listener(t *testing.T) {
- cfg := &Config{Listen: "tcp://:18001"}
+ cfg := &rpc.Config{Listen: "tcp://:18001"}
ln, err := cfg.Listener()
assert.NoError(t, err)
@@ -51,11 +22,15 @@ func TestConfig_Listener(t *testing.T) {
}()
assert.Equal(t, "tcp", ln.Addr().Network())
- assert.Equal(t, "0.0.0.0:18001", ln.Addr().String())
+ if runtime.GOOS == "windows" {
+ assert.Equal(t, "[::]:18001", ln.Addr().String())
+ } else {
+ assert.Equal(t, "0.0.0.0:18001", ln.Addr().String())
+ }
}
func TestConfig_ListenerUnix(t *testing.T) {
- cfg := &Config{Listen: "unix://file.sock"}
+ cfg := &rpc.Config{Listen: "unix://file.sock"}
ln, err := cfg.Listener()
assert.NoError(t, err)
@@ -72,7 +47,7 @@ func TestConfig_ListenerUnix(t *testing.T) {
}
func Test_Config_Error(t *testing.T) {
- cfg := &Config{Listen: "uni:unix.sock"}
+ cfg := &rpc.Config{Listen: "uni:unix.sock"}
ln, err := cfg.Listener()
assert.Nil(t, ln)
assert.Error(t, err)
@@ -80,7 +55,7 @@ func Test_Config_Error(t *testing.T) {
}
func Test_Config_ErrorMethod(t *testing.T) {
- cfg := &Config{Listen: "xinu://unix.sock"}
+ cfg := &rpc.Config{Listen: "xinu://unix.sock"}
ln, err := cfg.Listener()
assert.Nil(t, ln)
@@ -88,7 +63,7 @@ func Test_Config_ErrorMethod(t *testing.T) {
}
func TestConfig_Dialer(t *testing.T) {
- cfg := &Config{Listen: "tcp://:18001"}
+ cfg := &rpc.Config{Listen: "tcp://:18001"}
ln, _ := cfg.Listener()
defer func() {
@@ -113,7 +88,7 @@ func TestConfig_Dialer(t *testing.T) {
}
func TestConfig_DialerUnix(t *testing.T) {
- cfg := &Config{Listen: "unix://file.sock"}
+ cfg := &rpc.Config{Listen: "unix://file.sock"}
ln, _ := cfg.Listener()
defer func() {
@@ -138,7 +113,7 @@ func TestConfig_DialerUnix(t *testing.T) {
}
func Test_Config_DialerError(t *testing.T) {
- cfg := &Config{Listen: "uni:unix.sock"}
+ cfg := &rpc.Config{Listen: "uni:unix.sock"}
ln, err := cfg.Dialer()
assert.Nil(t, ln)
assert.Error(t, err)
@@ -146,7 +121,7 @@ func Test_Config_DialerError(t *testing.T) {
}
func Test_Config_DialerErrorMethod(t *testing.T) {
- cfg := &Config{Listen: "xinu://unix.sock"}
+ cfg := &rpc.Config{Listen: "xinu://unix.sock"}
ln, err := cfg.Dialer()
assert.Nil(t, ln)
@@ -154,11 +129,7 @@ func Test_Config_DialerErrorMethod(t *testing.T) {
}
func Test_Config_Defaults(t *testing.T) {
- c := &Config{}
- err := c.InitDefaults()
- if err != nil {
- t.Errorf("error during the InitDefaults: error %v", err)
- }
- assert.Equal(t, true, c.Enable)
+ c := &rpc.Config{}
+ c.InitDefaults()
assert.Equal(t, "tcp://127.0.0.1:6001", c.Listen)
}
diff --git a/tests/plugins/rpc/configs/.rr-rpc-disabled.yaml b/tests/plugins/rpc/configs/.rr-rpc-disabled.yaml
new file mode 100644
index 00000000..5ab359d3
--- /dev/null
+++ b/tests/plugins/rpc/configs/.rr-rpc-disabled.yaml
@@ -0,0 +1,3 @@
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/rpc/configs/.rr.yaml b/tests/plugins/rpc/configs/.rr.yaml
new file mode 100644
index 00000000..67d935e3
--- /dev/null
+++ b/tests/plugins/rpc/configs/.rr.yaml
@@ -0,0 +1,5 @@
+rpc:
+ listen: tcp://127.0.0.1:6001
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/rpc/plugin1.go b/tests/plugins/rpc/plugin1.go
new file mode 100644
index 00000000..6843b396
--- /dev/null
+++ b/tests/plugins/rpc/plugin1.go
@@ -0,0 +1,42 @@
+package rpc
+
+import (
+ "fmt"
+
+ "github.com/spiral/roadrunner/v2/plugins/config"
+)
+
+type Plugin1 struct {
+ config config.Configurer
+}
+
+func (p1 *Plugin1) Init(cfg config.Configurer) error {
+ p1.config = cfg
+ return nil
+}
+
+func (p1 *Plugin1) Serve() chan error {
+ errCh := make(chan error, 1)
+ return errCh
+}
+
+func (p1 *Plugin1) Stop() error {
+ return nil
+}
+
+func (p1 *Plugin1) Name() string {
+ return "rpc_test.plugin1"
+}
+
+func (p1 *Plugin1) RPC() interface{} {
+ return &PluginRPC{srv: p1}
+}
+
+type PluginRPC struct {
+ srv *Plugin1
+}
+
+func (r *PluginRPC) Hello(in string, out *string) error {
+ *out = fmt.Sprintf("Hello, username: %s", in)
+ return nil
+}
diff --git a/tests/plugins/rpc/plugin2.go b/tests/plugins/rpc/plugin2.go
new file mode 100644
index 00000000..2c47158f
--- /dev/null
+++ b/tests/plugins/rpc/plugin2.go
@@ -0,0 +1,53 @@
+package rpc
+
+import (
+ "net"
+ "net/rpc"
+ "time"
+
+ "github.com/spiral/errors"
+ goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
+)
+
+// plugin2 makes a call to the plugin1 via RPC
+// this is just a simulation of external call FOR TEST
+// you don't need to do such things :)
+type Plugin2 struct {
+}
+
+func (p2 *Plugin2) Init() error {
+ return nil
+}
+
+func (p2 *Plugin2) Serve() chan error {
+ errCh := make(chan error, 1)
+
+ go func() {
+ time.Sleep(time.Second * 3)
+
+ conn, err := net.Dial("tcp", "127.0.0.1:6001")
+ if err != nil {
+ errCh <- errors.E(errors.Serve, err)
+ return
+ }
+ client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
+ var ret string
+ err = client.Call("rpc_test.plugin1.Hello", "Valery", &ret)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ if ret != "Hello, username: Valery" {
+ errCh <- errors.E("wrong response")
+ return
+ }
+ // to stop exec
+ errCh <- errors.E(errors.Disabled)
+ }()
+
+ return errCh
+}
+
+func (p2 *Plugin2) Stop() error {
+ return nil
+}
diff --git a/tests/plugins/rpc/rpc_test.go b/tests/plugins/rpc/rpc_test.go
new file mode 100644
index 00000000..98959b28
--- /dev/null
+++ b/tests/plugins/rpc/rpc_test.go
@@ -0,0 +1,188 @@
+package rpc
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/rpc"
+ "github.com/stretchr/testify/assert"
+)
+
+// graph https://bit.ly/3ensdNb
+func TestRpcInit(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&Plugin1{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&Plugin2{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v := &config.Viper{}
+ v.Path = "configs/.rr.yaml"
+ v.Prefix = "rr"
+ err = cont.Register(v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&rpc.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig := make(chan os.Signal, 1)
+
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ tt := time.NewTimer(time.Second * 10)
+
+ go func() {
+ defer wg.Done()
+ defer tt.Stop()
+ for {
+ select {
+ case e := <-ch:
+ // just stop, this is ok
+ if errors.Is(errors.Disabled, e.Error) {
+ return
+ }
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-tt.C:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ assert.Fail(t, "timeout")
+ }
+ }
+ }()
+
+ wg.Wait()
+}
+
+// graph https://bit.ly/3ensdNb
+func TestRpcDisabled(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&Plugin1{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&Plugin2{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ v := &config.Viper{}
+ v.Path = "configs/.rr-rpc-disabled.yaml"
+ v.Prefix = "rr"
+ err = cont.Register(v)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&rpc.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sig := make(chan os.Signal, 1)
+
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+ tt := time.NewTimer(time.Second * 20)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ defer tt.Stop()
+ for {
+ select {
+ case e := <-ch:
+ // RPC is turned off, should be and dial error
+ if errors.Is(errors.Disabled, e.Error) {
+ assert.FailNow(t, "should not be disabled error")
+ }
+ assert.Error(t, e.Error)
+ err = cont.Stop()
+ assert.Error(t, err)
+ return
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-tt.C:
+ // timeout
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+}
diff --git a/tests/plugins/server/configs/.rr-no-app-section.yaml b/tests/plugins/server/configs/.rr-no-app-section.yaml
new file mode 100644
index 00000000..4888d249
--- /dev/null
+++ b/tests/plugins/server/configs/.rr-no-app-section.yaml
@@ -0,0 +1,12 @@
+server:
+ command: "php ../../client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "pipes"
+ relayTimeout: "20s"
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/server/configs/.rr-sockets.yaml b/tests/plugins/server/configs/.rr-sockets.yaml
new file mode 100644
index 00000000..6b5b6bf5
--- /dev/null
+++ b/tests/plugins/server/configs/.rr-sockets.yaml
@@ -0,0 +1,12 @@
+server:
+ command: "php socket.php"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "unix://unix.sock"
+ relayTimeout: "20s"
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/server/configs/.rr-tcp.yaml b/tests/plugins/server/configs/.rr-tcp.yaml
new file mode 100644
index 00000000..ee1d450a
--- /dev/null
+++ b/tests/plugins/server/configs/.rr-tcp.yaml
@@ -0,0 +1,12 @@
+server:
+ command: "php tcp.php"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "tcp://localhost:9999"
+ relayTimeout: "20s"
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/server/configs/.rr-wrong-command.yaml b/tests/plugins/server/configs/.rr-wrong-command.yaml
new file mode 100644
index 00000000..e66349dd
--- /dev/null
+++ b/tests/plugins/server/configs/.rr-wrong-command.yaml
@@ -0,0 +1,12 @@
+server:
+ command: "php some_absent_file.php"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "pipes"
+ relayTimeout: "20s"
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/server/configs/.rr-wrong-relay.yaml b/tests/plugins/server/configs/.rr-wrong-relay.yaml
new file mode 100644
index 00000000..6f532c3f
--- /dev/null
+++ b/tests/plugins/server/configs/.rr-wrong-relay.yaml
@@ -0,0 +1,12 @@
+server:
+ command: "php ../../client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "pupes"
+ relayTimeout: "20s"
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/server/configs/.rr.yaml b/tests/plugins/server/configs/.rr.yaml
new file mode 100644
index 00000000..4888d249
--- /dev/null
+++ b/tests/plugins/server/configs/.rr.yaml
@@ -0,0 +1,12 @@
+server:
+ command: "php ../../client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_CONFIG": "/some/place/on/the/C134"
+ "RR_CONFIG2": "C138"
+ relay: "pipes"
+ relayTimeout: "20s"
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/server/plugin_pipes.go b/tests/plugins/server/plugin_pipes.go
new file mode 100644
index 00000000..5eb2fed1
--- /dev/null
+++ b/tests/plugins/server/plugin_pipes.go
@@ -0,0 +1,133 @@
+package server
+
+import (
+ "context"
+ "time"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
+ "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+)
+
+const ConfigSection = "server"
+const Response = "test"
+
+var testPoolConfig = poolImpl.Config{
+ NumWorkers: 10,
+ MaxJobs: 100,
+ AllocateTimeout: time.Second * 10,
+ DestroyTimeout: time.Second * 10,
+ Supervisor: &poolImpl.SupervisorConfig{
+ WatchTick: 60,
+ TTL: 1000,
+ IdleTTL: 10,
+ ExecTTL: 10,
+ MaxWorkerMemory: 1000,
+ },
+}
+
+type Foo struct {
+ configProvider config.Configurer
+ wf server.Server
+ pool pool.Pool
+}
+
+func (f *Foo) Init(p config.Configurer, workerFactory server.Server) error {
+ f.configProvider = p
+ f.wf = workerFactory
+ return nil
+}
+
+func (f *Foo) Serve() chan error {
+ const op = errors.Op("serve")
+
+ // test payload for echo
+ r := payload.Payload{
+ Context: nil,
+ Body: []byte(Response),
+ }
+
+ errCh := make(chan error, 1)
+
+ conf := &server.Config{}
+ var err error
+ err = f.configProvider.UnmarshalKey(ConfigSection, conf)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test CMDFactory
+ cmd, err := f.wf.CmdFactory(nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+ if cmd == nil {
+ errCh <- errors.E(op, "command is nil")
+ return errCh
+ }
+
+ // test worker creation
+ w, err := f.wf.NewWorker(context.Background(), nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test that our worker is functional
+ sw, err := worker.From(w)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ rsp, err := sw.Exec(r)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ if string(rsp.Body) != Response {
+ errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
+ return errCh
+ }
+
+ // should not be errors
+ err = sw.Stop()
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test pool
+ f.pool, err = f.wf.NewWorkerPool(context.Background(), testPoolConfig, nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test pool execution
+ rsp, err = f.pool.Exec(r)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // echo of the "test" should be -> test
+ if string(rsp.Body) != Response {
+ errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
+ return errCh
+ }
+
+ return errCh
+}
+
+func (f *Foo) Stop() error {
+ f.pool.Destroy(context.Background())
+ return nil
+}
diff --git a/tests/plugins/server/plugin_sockets.go b/tests/plugins/server/plugin_sockets.go
new file mode 100644
index 00000000..ede67ded
--- /dev/null
+++ b/tests/plugins/server/plugin_sockets.go
@@ -0,0 +1,113 @@
+package server
+
+import (
+ "context"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+)
+
+type Foo2 struct {
+ configProvider config.Configurer
+ wf server.Server
+ pool pool.Pool
+}
+
+func (f *Foo2) Init(p config.Configurer, workerFactory server.Server) error {
+ f.configProvider = p
+ f.wf = workerFactory
+ return nil
+}
+
+func (f *Foo2) Serve() chan error {
+ const op = errors.Op("serve")
+ var err error
+ errCh := make(chan error, 1)
+ conf := &server.Config{}
+
+ // test payload for echo
+ r := payload.Payload{
+ Context: nil,
+ Body: []byte(Response),
+ }
+
+ err = f.configProvider.UnmarshalKey(ConfigSection, conf)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test CMDFactory
+ cmd, err := f.wf.CmdFactory(nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+ if cmd == nil {
+ errCh <- errors.E(op, "command is nil")
+ return errCh
+ }
+
+ // test worker creation
+ w, err := f.wf.NewWorker(context.Background(), nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test that our worker is functional
+ sw, err := worker.From(w)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ rsp, err := sw.Exec(r)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ if string(rsp.Body) != Response {
+ errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
+ return errCh
+ }
+
+ // should not be errors
+ err = sw.Stop()
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test pool
+ f.pool, err = f.wf.NewWorkerPool(context.Background(), testPoolConfig, nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test pool execution
+ rsp, err = f.pool.Exec(r)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // echo of the "test" should be -> test
+ if string(rsp.Body) != Response {
+ errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
+ return errCh
+ }
+
+ return errCh
+}
+
+func (f *Foo2) Stop() error {
+ f.pool.Destroy(context.Background())
+ return nil
+}
diff --git a/tests/plugins/server/plugin_tcp.go b/tests/plugins/server/plugin_tcp.go
new file mode 100644
index 00000000..98c13b2b
--- /dev/null
+++ b/tests/plugins/server/plugin_tcp.go
@@ -0,0 +1,113 @@
+package server
+
+import (
+ "context"
+
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/pool"
+ "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+)
+
+type Foo3 struct {
+ configProvider config.Configurer
+ wf server.Server
+ pool pool.Pool
+}
+
+func (f *Foo3) Init(p config.Configurer, workerFactory server.Server) error {
+ f.configProvider = p
+ f.wf = workerFactory
+ return nil
+}
+
+func (f *Foo3) Serve() chan error {
+ const op = errors.Op("serve")
+ var err error
+ errCh := make(chan error, 1)
+ conf := &server.Config{}
+
+ // test payload for echo
+ r := payload.Payload{
+ Context: nil,
+ Body: []byte(Response),
+ }
+
+ err = f.configProvider.UnmarshalKey(ConfigSection, conf)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test CMDFactory
+ cmd, err := f.wf.CmdFactory(nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+ if cmd == nil {
+ errCh <- errors.E(op, "command is nil")
+ return errCh
+ }
+
+ // test worker creation
+ w, err := f.wf.NewWorker(context.Background(), nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test that our worker is functional
+ sw, err := worker.From(w)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ rsp, err := sw.Exec(r)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ if string(rsp.Body) != Response {
+ errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
+ return errCh
+ }
+
+ // should not be errors
+ err = sw.Stop()
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test pool
+ f.pool, err = f.wf.NewWorkerPool(context.Background(), testPoolConfig, nil)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // test pool execution
+ rsp, err = f.pool.Exec(r)
+ if err != nil {
+ errCh <- err
+ return errCh
+ }
+
+ // echo of the "test" should be -> test
+ if string(rsp.Body) != Response {
+ errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
+ return errCh
+ }
+
+ return errCh
+}
+
+func (f *Foo3) Stop() error {
+ f.pool.Destroy(context.Background())
+ return nil
+}
diff --git a/tests/plugins/server/server_plugin_test.go b/tests/plugins/server/server_plugin_test.go
new file mode 100644
index 00000000..d63b0ccd
--- /dev/null
+++ b/tests/plugins/server/server_plugin_test.go
@@ -0,0 +1,361 @@
+package server
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAppPipes(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rr.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ errCh, err := container.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // stop by CTRL+C
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+
+ tt := time.NewTimer(time.Second * 10)
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ defer tt.Stop()
+ for {
+ select {
+ case e := <-errCh:
+ assert.NoError(t, e.Error)
+ assert.NoError(t, container.Stop())
+ return
+ case <-c:
+ er := container.Stop()
+ assert.NoError(t, er)
+ return
+ case <-tt.C:
+ assert.NoError(t, container.Stop())
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+}
+
+func TestAppSockets(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rr-sockets.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo2{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ errCh, err := container.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // stop by CTRL+C
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+
+ // stop after 10 seconds
+ tt := time.NewTicker(time.Second * 10)
+
+ for {
+ select {
+ case e := <-errCh:
+ assert.NoError(t, e.Error)
+ assert.NoError(t, container.Stop())
+ return
+ case <-c:
+ er := container.Stop()
+ if er != nil {
+ panic(er)
+ }
+ return
+ case <-tt.C:
+ tt.Stop()
+ assert.NoError(t, container.Stop())
+ return
+ }
+ }
+}
+
+func TestAppTCP(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rr-tcp.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo3{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ errCh, err := container.Serve()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // stop by CTRL+C
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+
+ // stop after 10 seconds
+ tt := time.NewTicker(time.Second * 10)
+
+ for {
+ select {
+ case e := <-errCh:
+ assert.NoError(t, e.Error)
+ assert.NoError(t, container.Stop())
+ return
+ case <-c:
+ er := container.Stop()
+ if er != nil {
+ panic(er)
+ }
+ return
+ case <-tt.C:
+ tt.Stop()
+ assert.NoError(t, container.Stop())
+ return
+ }
+ }
+}
+
+func TestAppWrongConfig(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rrrrrrrrrr.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo3{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Error(t, container.Init())
+}
+
+func TestAppWrongRelay(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rr-wrong-relay.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo3{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ assert.Error(t, err)
+
+ _, err = container.Serve()
+ assert.Error(t, err)
+}
+
+func TestAppWrongCommand(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rr-wrong-command.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo3{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = container.Serve()
+ assert.Error(t, err)
+}
+
+func TestAppNoAppSectionInConfig(t *testing.T) {
+ container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // config plugin
+ vp := &config.Viper{}
+ vp.Path = "configs/.rr-wrong-command.yaml"
+ vp.Prefix = "rr"
+ err = container.Register(vp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&server.Plugin{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&Foo3{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Register(&logger.ZapLogger{})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = container.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = container.Serve()
+ assert.Error(t, err)
+}
diff --git a/tests/plugins/server/socket.php b/tests/plugins/server/socket.php
new file mode 100644
index 00000000..3159c445
--- /dev/null
+++ b/tests/plugins/server/socket.php
@@ -0,0 +1,25 @@
+<?php
+/**
+ * @var Goridge\RelayInterface $relay
+ */
+
+use Spiral\Goridge;
+use Spiral\RoadRunner;
+
+require dirname(__DIR__) . "/../vendor/autoload.php";
+
+$relay = new Goridge\SocketRelay(
+ "unix.sock",
+ null,
+ Goridge\SocketRelay::SOCK_UNIX
+ );
+
+$rr = new RoadRunner\Worker($relay);
+
+while ($in = $rr->waitPayload()) {
+ try {
+ $rr->send((string)$in->body);
+ } catch (\Throwable $e) {
+ $rr->error((string)$e);
+ }
+}
diff --git a/tests/plugins/server/tcp.php b/tests/plugins/server/tcp.php
new file mode 100644
index 00000000..88c49848
--- /dev/null
+++ b/tests/plugins/server/tcp.php
@@ -0,0 +1,20 @@
+<?php
+/**
+ * @var Goridge\RelayInterface $relay
+ */
+
+use Spiral\Goridge;
+use Spiral\RoadRunner;
+
+require dirname(__DIR__) . "/../vendor/autoload.php";
+
+$relay = new Goridge\SocketRelay("localhost", 9999);
+$rr = new RoadRunner\Worker($relay);
+
+while ($in = $rr->waitPayload()) {
+ try {
+ $rr->send((string)$in->body);
+ } catch (\Throwable $e) {
+ $rr->error((string)$e);
+ }
+}
diff --git a/tests/plugins/static/config_test.go b/tests/plugins/static/config_test.go
new file mode 100644
index 00000000..f458eed3
--- /dev/null
+++ b/tests/plugins/static/config_test.go
@@ -0,0 +1,49 @@
+package static
+
+import (
+ "testing"
+
+ "github.com/spiral/roadrunner/v2/plugins/static"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConfig_Forbids(t *testing.T) {
+ cfg := static.Config{Static: struct {
+ Dir string
+ Forbid []string
+ Always []string
+ Request map[string]string
+ Response map[string]string
+ }{Dir: "", Forbid: []string{".php"}, Always: nil, Request: nil, Response: nil}}
+
+ assert.True(t, cfg.AlwaysForbid("index.php"))
+ assert.True(t, cfg.AlwaysForbid("index.PHP"))
+ assert.True(t, cfg.AlwaysForbid("phpadmin/index.bak.php"))
+ assert.False(t, cfg.AlwaysForbid("index.html"))
+}
+
+func TestConfig_Valid(t *testing.T) {
+ assert.NoError(t, (&static.Config{Static: struct {
+ Dir string
+ Forbid []string
+ Always []string
+ Request map[string]string
+ Response map[string]string
+ }{Dir: "./"}}).Valid())
+
+ assert.Error(t, (&static.Config{Static: struct {
+ Dir string
+ Forbid []string
+ Always []string
+ Request map[string]string
+ Response map[string]string
+ }{Dir: "./config.go"}}).Valid())
+
+ assert.Error(t, (&static.Config{Static: struct {
+ Dir string
+ Forbid []string
+ Always []string
+ Request map[string]string
+ Response map[string]string
+ }{Dir: "./dir/"}}).Valid())
+}
diff --git a/tests/plugins/static/configs/.rr-http-static-disabled.yaml b/tests/plugins/static/configs/.rr-http-static-disabled.yaml
new file mode 100644
index 00000000..d8ee15e0
--- /dev/null
+++ b/tests/plugins/static/configs/.rr-http-static-disabled.yaml
@@ -0,0 +1,33 @@
+server:
+ command: "php ../../http/client.php pid pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:21234
+ maxRequestSize: 1024
+ middleware: [ "gzip", "static" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ static:
+ dir: "abc" #not exists
+ forbid: [ ".php", ".htaccess" ]
+ request:
+ "Example-Request-Header": "Value"
+ # Automatically add headers to every response.
+ response:
+ "X-Powered-By": "RoadRunner"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/static/configs/.rr-http-static-files-disable.yaml b/tests/plugins/static/configs/.rr-http-static-files-disable.yaml
new file mode 100644
index 00000000..563d95cf
--- /dev/null
+++ b/tests/plugins/static/configs/.rr-http-static-files-disable.yaml
@@ -0,0 +1,33 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:45877
+ maxRequestSize: 1024
+ middleware: [ "gzip", "static" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ static:
+ dir: "../../../tests"
+ forbid: [ ".php" ]
+ request:
+ "Example-Request-Header": "Value"
+ # Automatically add headers to every response.
+ response:
+ "X-Powered-By": "RoadRunner"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/static/configs/.rr-http-static-files.yaml b/tests/plugins/static/configs/.rr-http-static-files.yaml
new file mode 100644
index 00000000..8961c6f4
--- /dev/null
+++ b/tests/plugins/static/configs/.rr-http-static-files.yaml
@@ -0,0 +1,34 @@
+server:
+ command: "php ../../http/client.php echo pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ debug: true
+ address: 127.0.0.1:34653
+ maxRequestSize: 1024
+ middleware: [ "gzip", "static" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ static:
+ dir: "../../../tests"
+ forbid: [ ".php", ".htaccess" ]
+ always: [ ".ico" ]
+ request:
+ "Example-Request-Header": "Value"
+ # Automatically add headers to every response.
+ response:
+ "X-Powered-By": "RoadRunner"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/static/configs/.rr-http-static.yaml b/tests/plugins/static/configs/.rr-http-static.yaml
new file mode 100644
index 00000000..0a1f5df4
--- /dev/null
+++ b/tests/plugins/static/configs/.rr-http-static.yaml
@@ -0,0 +1,31 @@
+server:
+ command: "php ../../http/client.php pid pipes"
+ user: ""
+ group: ""
+ env:
+ "RR_HTTP": "true"
+ relay: "pipes"
+ relayTimeout: "20s"
+
+http:
+ address: 127.0.0.1:21603
+ maxRequestSize: 1024
+ middleware: [ "gzip", "static" ]
+ trustedSubnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
+ uploads:
+ forbid: [ ".php", ".exe", ".bat" ]
+ static:
+ dir: "../../../tests"
+ forbid: [ "" ]
+ request:
+ "input": "custom-header"
+ response:
+ "output": "output-header"
+ pool:
+ numWorkers: 2
+ maxJobs: 0
+ allocateTimeout: 60s
+ destroyTimeout: 60s
+logs:
+ mode: development
+ level: error \ No newline at end of file
diff --git a/tests/plugins/static/static_plugin_test.go b/tests/plugins/static/static_plugin_test.go
new file mode 100644
index 00000000..74daaa16
--- /dev/null
+++ b/tests/plugins/static/static_plugin_test.go
@@ -0,0 +1,437 @@
+package static
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/golang/mock/gomock"
+ "github.com/spiral/endure"
+ "github.com/spiral/roadrunner/v2/plugins/config"
+ "github.com/spiral/roadrunner/v2/plugins/gzip"
+ httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
+ "github.com/spiral/roadrunner/v2/plugins/logger"
+ "github.com/spiral/roadrunner/v2/plugins/server"
+ "github.com/spiral/roadrunner/v2/plugins/static"
+ "github.com/spiral/roadrunner/v2/tests/mocks"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStaticPlugin(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http-static.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &gzip.Gzip{},
+ &static.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("ServeSample", serveStaticSample)
+ t.Run("StaticNotForbid", staticNotForbid)
+ t.Run("StaticHeaders", staticHeaders)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func staticHeaders(t *testing.T) {
+ req, err := http.NewRequest("GET", "http://localhost:21603/client.php", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if resp.Header.Get("Output") != "output-header" {
+ t.Fatal("can't find output header in response")
+ }
+
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ _ = resp.Body.Close()
+ }()
+
+ assert.Equal(t, all("../../../tests/client.php"), string(b))
+ assert.Equal(t, all("../../../tests/client.php"), string(b))
+}
+
+func staticNotForbid(t *testing.T) {
+ b, r, err := get("http://localhost:21603/client.php")
+ assert.NoError(t, err)
+ assert.Equal(t, all("../../../tests/client.php"), b)
+ assert.Equal(t, all("../../../tests/client.php"), b)
+ _ = r.Body.Close()
+}
+
+func serveStaticSample(t *testing.T) {
+ b, r, err := get("http://localhost:21603/sample.txt")
+ assert.NoError(t, err)
+ assert.Equal(t, "sample", b)
+ _ = r.Body.Close()
+}
+
+func TestStaticDisabled(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http-static-disabled.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &gzip.Gzip{},
+ &static.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("StaticDisabled", staticDisabled)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func staticDisabled(t *testing.T) {
+ _, r, err := get("http://localhost:21234/sample.txt") //nolint:bodyclose
+ assert.Error(t, err)
+ assert.Nil(t, r)
+}
+
+func TestStaticFilesDisabled(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http-static-files-disable.yaml",
+ Prefix: "rr",
+ }
+
+ err = cont.RegisterAll(
+ cfg,
+ &logger.ZapLogger{},
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &gzip.Gzip{},
+ &static.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("StaticFilesDisabled", staticFilesDisabled)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func staticFilesDisabled(t *testing.T) {
+ b, r, err := get("http://localhost:45877/client.php?hello=world")
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, "WORLD", b)
+ _ = r.Body.Close()
+}
+
+func TestStaticFilesForbid(t *testing.T) {
+ cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
+ assert.NoError(t, err)
+
+ cfg := &config.Viper{
+ Path: "configs/.rr-http-static-files.yaml",
+ Prefix: "rr",
+ }
+
+ controller := gomock.NewController(t)
+ mockLogger := mocks.NewMockLogger(controller)
+
+ mockLogger.EXPECT().Info("worker constructed", "pid", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Debug("http handler response received", "elapsed", gomock.Any(), "remote address", "127.0.0.1").AnyTimes()
+ mockLogger.EXPECT().Error("file open error", "error", gomock.Any()).AnyTimes()
+ mockLogger.EXPECT().Info(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
+
+ err = cont.RegisterAll(
+ cfg,
+ mockLogger,
+ &server.Plugin{},
+ &httpPlugin.Plugin{},
+ &gzip.Gzip{},
+ &static.Plugin{},
+ )
+ assert.NoError(t, err)
+
+ err = cont.Init()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ch, err := cont.Serve()
+ assert.NoError(t, err)
+
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+
+ stopCh := make(chan struct{}, 1)
+
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case e := <-ch:
+ assert.Fail(t, "error", e.Error.Error())
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ case <-sig:
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ case <-stopCh:
+ // timeout
+ err = cont.Stop()
+ if err != nil {
+ assert.FailNow(t, "error", err.Error())
+ }
+ return
+ }
+ }
+ }()
+
+ time.Sleep(time.Second)
+ t.Run("StaticTestFilesDir", staticTestFilesDir)
+ t.Run("StaticNotFound", staticNotFound)
+ t.Run("StaticFilesForbid", staticFilesForbid)
+ t.Run("StaticFilesAlways", staticFilesAlways)
+
+ stopCh <- struct{}{}
+ wg.Wait()
+}
+
+func staticTestFilesDir(t *testing.T) {
+ b, r, err := get("http://localhost:34653/http?hello=world")
+ assert.NoError(t, err)
+ assert.Equal(t, "WORLD", b)
+ _ = r.Body.Close()
+}
+
+func staticNotFound(t *testing.T) {
+ b, _, _ := get("http://localhost:34653/client.XXX?hello=world") //nolint:bodyclose
+ assert.Equal(t, "WORLD", b)
+}
+
+func staticFilesAlways(t *testing.T) {
+ _, r, err := get("http://localhost:34653/favicon.ico")
+ assert.NoError(t, err)
+ assert.Equal(t, 404, r.StatusCode)
+ _ = r.Body.Close()
+}
+
+func staticFilesForbid(t *testing.T) {
+ b, r, err := get("http://localhost:34653/client.php?hello=world")
+ if err != nil {
+ t.Fatal(err)
+ }
+ assert.Equal(t, "WORLD", b)
+ _ = r.Body.Close()
+}
+
+// HELPERS
+func get(url string) (string, *http.Response, error) {
+ r, err := http.Get(url) //nolint:gosec
+ if err != nil {
+ return "", nil, err
+ }
+
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return "", nil, err
+ }
+
+ err = r.Body.Close()
+ if err != nil {
+ return "", nil, err
+ }
+
+ return string(b), r, err
+}
+
+func all(fn string) string {
+ f, _ := os.Open(fn)
+
+ b := new(bytes.Buffer)
+ _, err := io.Copy(b, f)
+ if err != nil {
+ return ""
+ }
+
+ err = f.Close()
+ if err != nil {
+ return ""
+ }
+
+ return b.String()
+}
diff --git a/tests/psr-worker-bench.php b/tests/psr-worker-bench.php
new file mode 100644
index 00000000..e6df81ad
--- /dev/null
+++ b/tests/psr-worker-bench.php
@@ -0,0 +1,28 @@
+<?php
+/**
+ * @var Goridge\RelayInterface $relay
+ */
+use Spiral\Goridge;
+use Spiral\RoadRunner;
+
+ini_set('display_errors', 'stderr');
+require __DIR__ . "/vendor/autoload.php";
+
+$worker = new RoadRunner\Worker(new Goridge\StreamRelay(STDIN, STDOUT));
+$psr7 = new RoadRunner\Http\PSR7Worker(
+ $worker,
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory()
+);
+
+while ($req = $psr7->waitRequest()) {
+ try {
+ $resp = new \Nyholm\Psr7\Response();
+ $resp->getBody()->write("hello world");
+
+ $psr7->respond($resp);
+ } catch (\Throwable $e) {
+ $psr7->getWorker()->error((string)$e);
+ }
+}
diff --git a/tests/psr-worker.php b/tests/psr-worker.php
new file mode 100644
index 00000000..db53eee2
--- /dev/null
+++ b/tests/psr-worker.php
@@ -0,0 +1,28 @@
+<?php
+/**
+ * @var Goridge\RelayInterface $relay
+ */
+use Spiral\Goridge;
+use Spiral\RoadRunner;
+
+ini_set('display_errors', 'stderr');
+require __DIR__ . "/vendor/autoload.php";
+
+$worker = new RoadRunner\Worker(new Goridge\StreamRelay(STDIN, STDOUT));
+$psr7 = new RoadRunner\Http\PSR7Worker(
+ $worker,
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory(),
+ new \Nyholm\Psr7\Factory\Psr17Factory()
+);
+
+while ($req = $psr7->waitRequest()) {
+ try {
+ $resp = new \Nyholm\Psr7\Response();
+ $resp->getBody()->write(str_repeat("hello world", 1000));
+
+ $psr7->respond($resp);
+ } catch (\Throwable $e) {
+ $psr7->getWorker()->error((string)$e);
+ }
+}
diff --git a/tests/sleep.php b/tests/sleep.php
new file mode 100644
index 00000000..e34a6834
--- /dev/null
+++ b/tests/sleep.php
@@ -0,0 +1,15 @@
+<?php
+
+declare(strict_types=1);
+
+use Spiral\Goridge\StreamRelay;
+use Spiral\RoadRunner\Worker as RoadRunner;
+
+require __DIR__ . "/vendor/autoload.php";
+
+$rr = new RoadRunner(new StreamRelay(\STDIN, \STDOUT));
+
+while($rr->waitPayload()){
+ sleep(3);
+ $rr->send("");
+}
diff --git a/tests/slow-client.php b/tests/slow-client.php
index ece0a439..7737f0b1 100644
--- a/tests/slow-client.php
+++ b/tests/slow-client.php
@@ -3,7 +3,7 @@
use Spiral\Goridge;
ini_set('display_errors', 'stderr');
-require dirname(__DIR__) . "/vendor_php/autoload.php";
+require __DIR__ . "/vendor/autoload.php";
if (count($argv) < 3) {
die("need 2 arguments");
diff --git a/tests/slow-destroy.php b/tests/slow-destroy.php
index e2a01af2..900bb68a 100644
--- a/tests/slow-destroy.php
+++ b/tests/slow-destroy.php
@@ -3,7 +3,7 @@
use Spiral\Goridge;
ini_set('display_errors', 'stderr');
-require dirname(__DIR__) . "/vendor_php/autoload.php";
+require __DIR__ . "/vendor/autoload.php";
if (count($argv) < 3) {
die("need 2 arguments");
diff --git a/tests/slow-pid.php b/tests/slow-pid.php
index 747e7e86..3660cb40 100644
--- a/tests/slow-pid.php
+++ b/tests/slow-pid.php
@@ -8,7 +8,7 @@
$rr = new RoadRunner\Worker($relay);
- while ($in = $rr->receive($ctx)) {
+ while ($in = $rr->waitPayload()) {
try {
sleep(1);
$rr->send((string)getmypid());
diff --git a/tests/stop.php b/tests/stop.php
index 0100ad0f..f83d3f29 100644
--- a/tests/stop.php
+++ b/tests/stop.php
@@ -9,7 +9,7 @@ use Spiral\RoadRunner;
$rr = new RoadRunner\Worker($relay);
$used = false;
-while ($in = $rr->receive($ctx)) {
+while ($in = $rr->waitPayload()) {
try {
if ($used) {
// kill on second attempt
diff --git a/tools/process.go b/tools/process.go
new file mode 100644
index 00000000..d92755d1
--- /dev/null
+++ b/tools/process.go
@@ -0,0 +1,44 @@
+package tools
+
+import (
+ "github.com/shirou/gopsutil/process"
+ "github.com/spiral/errors"
+ "github.com/spiral/roadrunner/v2/interfaces/worker"
+)
+
+// ProcessState provides information about specific worker.
+type ProcessState struct {
+ // Pid contains process id.
+ Pid int `json:"pid"`
+
+ // Status of the worker.
+ Status string `json:"status"`
+
+ // Number of worker executions.
+ NumJobs int64 `json:"numExecs"`
+
+ // Created is unix nano timestamp of worker creation time.
+ Created int64 `json:"created"`
+
+ // MemoryUsage holds the information about worker memory usage in bytes.
+ // Values might vary for different operating systems and based on RSS.
+ MemoryUsage uint64 `json:"memoryUsage"`
+}
+
+// WorkerProcessState creates new worker state definition.
+func WorkerProcessState(w worker.BaseProcess) (ProcessState, error) {
+ const op = errors.Op("worker_process state")
+ p, _ := process.NewProcess(int32(w.Pid()))
+ i, err := p.MemoryInfo()
+ if err != nil {
+ return ProcessState{}, errors.E(op, err)
+ }
+
+ return ProcessState{
+ Pid: int(w.Pid()),
+ Status: w.State().String(),
+ NumJobs: w.State().NumExecs(),
+ Created: w.Created().UnixNano(),
+ MemoryUsage: i.RSS,
+ }, nil
+}
diff --git a/cmd/util/table.go b/tools/worker_table.go
index 4f76be2c..4aeb6ae7 100644
--- a/cmd/util/table.go
+++ b/tools/worker_table.go
@@ -1,18 +1,18 @@
-package util
+package tools
import (
- "os"
+ "io"
"strconv"
"time"
"github.com/dustin/go-humanize"
+ "github.com/fatih/color"
"github.com/olekukonko/tablewriter"
- rrutil "github.com/spiral/roadrunner/util"
)
// WorkerTable renders table with information about rr server workers.
-func WorkerTable(workers []*rrutil.State) *tablewriter.Table {
- tw := tablewriter.NewWriter(os.Stdout)
+func WorkerTable(writer io.Writer, workers []ProcessState) *tablewriter.Table {
+ tw := tablewriter.NewWriter(writer)
tw.SetHeader([]string{"PID", "Status", "Execs", "Memory", "Created"})
tw.SetColMinWidth(0, 7)
tw.SetColMinWidth(1, 9)
@@ -20,13 +20,13 @@ func WorkerTable(workers []*rrutil.State) *tablewriter.Table {
tw.SetColMinWidth(3, 7)
tw.SetColMinWidth(4, 18)
- for _, w := range workers {
+ for key := range workers {
tw.Append([]string{
- strconv.Itoa(w.Pid),
- renderStatus(w.Status),
- renderJobs(w.NumJobs),
- humanize.Bytes(w.MemoryUsage),
- renderAlive(time.Unix(0, w.Created)),
+ strconv.Itoa(workers[key].Pid),
+ renderStatus(workers[key].Status),
+ renderJobs(workers[key].NumJobs),
+ humanize.Bytes(workers[key].MemoryUsage),
+ renderAlive(time.Unix(0, workers[key].Created)),
})
}
@@ -36,24 +36,24 @@ func WorkerTable(workers []*rrutil.State) *tablewriter.Table {
func renderStatus(status string) string {
switch status {
case "inactive":
- return Sprintf("<yellow>inactive</reset>")
+ return color.YellowString("inactive")
case "ready":
- return Sprintf("<cyan>ready</reset>")
+ return color.CyanString("ready")
case "working":
- return Sprintf("<green>working</reset>")
+ return color.GreenString("working")
case "invalid":
- return Sprintf("<yellow>invalid</reset>")
+ return color.YellowString("invalid")
case "stopped":
- return Sprintf("<red>stopped</reset>")
+ return color.RedString("stopped")
case "errored":
- return Sprintf("<red>errored</reset>")
+ return color.RedString("errored")
}
return status
}
func renderJobs(number int64) string {
- return humanize.Comma(int64(number))
+ return humanize.Comma(number)
}
func renderAlive(t time.Time) string {
diff --git a/util/state.go b/util/state.go
deleted file mode 100644
index 5a08f9f2..00000000
--- a/util/state.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package util
-
-import (
- "errors"
-
- "github.com/shirou/gopsutil/process"
- "github.com/spiral/roadrunner"
-)
-
-// State provides information about specific worker.
-type State struct {
- // Pid contains process id.
- Pid int `json:"pid"`
-
- // Status of the worker.
- Status string `json:"status"`
-
- // Number of worker executions.
- NumJobs int64 `json:"numExecs"`
-
- // Created is unix nano timestamp of worker creation time.
- Created int64 `json:"created"`
-
- // MemoryUsage holds the information about worker memory usage in bytes.
- // Values might vary for different operating systems and based on RSS.
- MemoryUsage uint64 `json:"memoryUsage"`
-}
-
-// WorkerState creates new worker state definition.
-func WorkerState(w *roadrunner.Worker) (*State, error) {
- p, _ := process.NewProcess(int32(*w.Pid))
- i, err := p.MemoryInfo()
- if err != nil {
- return nil, err
- }
-
- return &State{
- Pid: *w.Pid,
- Status: w.State().String(),
- NumJobs: w.State().NumExecs(),
- Created: w.Created.UnixNano(),
- MemoryUsage: i.RSS,
- }, nil
-}
-
-// ServerState returns list of all worker states of a given rr server.
-func ServerState(rr *roadrunner.Server) ([]*State, error) {
- if rr == nil {
- return nil, errors.New("rr server is not running")
- }
-
- result := make([]*State, 0)
- for _, w := range rr.Workers() {
- state, err := WorkerState(w)
- if err != nil {
- return nil, err
- }
-
- result = append(result, state)
- }
-
- return result, nil
-}
diff --git a/util/state_test.go b/util/state_test.go
deleted file mode 100644
index 2a4a140b..00000000
--- a/util/state_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package util
-
-import (
- "runtime"
- "testing"
- "time"
-
- "github.com/spiral/roadrunner"
- "github.com/stretchr/testify/assert"
-)
-
-func TestServerState(t *testing.T) {
- rr := roadrunner.NewServer(
- &roadrunner.ServerConfig{
- Command: "php ../tests/client.php echo tcp",
- Relay: "tcp://:9007",
- RelayTimeout: 10 * time.Second,
- Pool: &roadrunner.Config{
- NumWorkers: int64(runtime.NumCPU()),
- AllocateTimeout: time.Second,
- DestroyTimeout: time.Second,
- },
- })
- defer rr.Stop()
-
- assert.NoError(t, rr.Start())
-
- state, err := ServerState(rr)
- assert.NoError(t, err)
-
- assert.Len(t, state, runtime.NumCPU())
-}
-
-func TestServerState_Err(t *testing.T) {
- _, err := ServerState(nil)
- assert.Error(t, err)
-}
diff --git a/utils/doc.go b/utils/doc.go
new file mode 100755
index 00000000..2c1c0d9c
--- /dev/null
+++ b/utils/doc.go
@@ -0,0 +1,5 @@
+package utils
+
+/*
+This package should not contain roadrunner dependencies, only system or third-party
+*/
diff --git a/osutil/isolate.go b/utils/isolate.go
index 9eaf8a44..b797a999 100644..100755
--- a/osutil/isolate.go
+++ b/utils/isolate.go
@@ -1,6 +1,6 @@
// +build !windows
-package osutil
+package utils
import (
"fmt"
@@ -9,6 +9,8 @@ import (
"os/user"
"strconv"
"syscall"
+
+ "github.com/spiral/errors"
)
// IsolateProcess change gpid for the process to avoid bypassing signals to php processes.
@@ -18,19 +20,20 @@ func IsolateProcess(cmd *exec.Cmd) {
// ExecuteFromUser may work only if run RR under root user
func ExecuteFromUser(cmd *exec.Cmd, u string) error {
+ const op = errors.Op("execute from user")
usr, err := user.Lookup(u)
if err != nil {
- return err
+ return errors.E(op, err)
}
- usrI32, err := strconv.Atoi(usr.Uid)
+ usrI32, err := strconv.ParseInt(usr.Uid, 10, 32)
if err != nil {
- return err
+ return errors.E(op, err)
}
- grI32, err := strconv.Atoi(usr.Gid)
+ grI32, err := strconv.ParseInt(usr.Gid, 10, 32)
if err != nil {
- return err
+ return errors.E(op, err)
}
// For more information:
@@ -44,7 +47,7 @@ func ExecuteFromUser(cmd *exec.Cmd, u string) error {
return fmt.Errorf("unable to test user namespaces due to permissions")
}
- return fmt.Errorf("failed to stat /proc/self/ns/user: %v", err)
+ return errors.E(op, errors.Errorf("failed to stat /proc/self/ns/user: %v", err))
}
cmd.SysProcAttr.Credential = &syscall.Credential{
diff --git a/osutil/isolate_win.go b/utils/isolate_win.go
index bac0a35e..b2b213a8 100644..100755
--- a/osutil/isolate_win.go
+++ b/utils/isolate_win.go
@@ -1,6 +1,6 @@
// +build windows
-package osutil
+package utils
import (
"os/exec"
diff --git a/util/network.go b/utils/network.go
index f35d842b..fcfc4ace 100644..100755
--- a/util/network.go
+++ b/utils/network.go
@@ -1,6 +1,6 @@
// +build linux darwin freebsd
-package util
+package utils
import (
"errors"
@@ -24,13 +24,19 @@ func CreateListener(address string) (net.Listener, error) {
return nil, errors.New("invalid Protocol (tcp://:6001, unix://file.sock)")
}
- if dsn[0] == "unix" && fileExists(dsn[1]) {
- err := syscall.Unlink(dsn[1])
- if err != nil {
- return nil, fmt.Errorf("error during the unlink syscall: error %v", err)
+ // create unix listener
+ if dsn[0] == "unix" {
+ // check if the file exist
+ if fileExists(dsn[1]) {
+ err := syscall.Unlink(dsn[1])
+ if err != nil {
+ return nil, fmt.Errorf("error during the unlink syscall: error %v", err)
+ }
}
+ return net.Listen(dsn[0], dsn[1])
}
+ // configure and create tcp4 listener
cfg := tcplisten.Config{
ReusePort: true,
DeferAccept: true,
@@ -38,12 +44,8 @@ func CreateListener(address string) (net.Listener, error) {
Backlog: 0,
}
- // tcp4 is currently supported
- if dsn[0] == "tcp" {
- return cfg.NewListener("tcp4", dsn[1])
- }
-
- return net.Listen(dsn[0], dsn[1])
+ // only tcp4 is currently supported
+ return cfg.NewListener("tcp4", dsn[1])
}
// fileExists checks if a file exists and is not a directory before we
diff --git a/util/network_test.go b/utils/network_test.go
index 1dc16e94..cfed98f9 100644..100755
--- a/util/network_test.go
+++ b/utils/network_test.go
@@ -1,6 +1,6 @@
// +build linux darwin freebsd
-package util
+package utils
import (
"testing"
diff --git a/util/network_windows.go b/utils/network_windows.go
index 843d5779..ebe343a3 100644..100755
--- a/util/network_windows.go
+++ b/utils/network_windows.go
@@ -1,6 +1,6 @@
// +build windows
-package util
+package utils
import (
"errors"
diff --git a/util/network_windows_test.go b/utils/network_windows_test.go
index 3fdc8a5c..59ec0485 100644..100755
--- a/util/network_windows_test.go
+++ b/utils/network_windows_test.go
@@ -1,6 +1,6 @@
// +build windows
-package util
+package utils
import (
"testing"
diff --git a/worker.go b/worker.go
deleted file mode 100644
index 7d9e79b0..00000000
--- a/worker.go
+++ /dev/null
@@ -1,261 +0,0 @@
-package roadrunner
-
-import (
- "fmt"
- "os"
- "os/exec"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/pkg/errors"
- "github.com/spiral/goridge/v2"
-)
-
-// Worker - supervised process with api over goridge.Relay.
-type Worker struct {
- // Pid of the process, points to Pid of underlying process and
- // can be nil while process is not started.
- Pid *int
-
- // Created indicates at what time worker has been created.
- Created time.Time
-
- // state holds information about current worker state,
- // number of worker executions, buf status change time.
- // publicly this object is receive-only and protected using Mutex
- // and atomic counter.
- state *state
-
- // underlying command with associated process, command must be
- // provided to worker from outside in non-started form. CmdSource
- // stdErr direction will be handled by worker to aggregate error message.
- cmd *exec.Cmd
-
- // err aggregates stderr output from underlying process. Value can be
- // receive only once command is completed and all pipes are closed.
- err *errBuffer
-
- // channel is being closed once command is complete.
- waitDone chan interface{}
-
- // contains information about resulted process state.
- endState *os.ProcessState
-
- // ensures than only one execution can be run at once.
- mu sync.Mutex
-
- // communication bus with underlying process.
- rl goridge.Relay
-}
-
-// newWorker creates new worker over given exec.cmd.
-func newWorker(cmd *exec.Cmd) (*Worker, error) {
- if cmd.Process != nil {
- return nil, fmt.Errorf("can't attach to running process")
- }
-
- w := &Worker{
- Created: time.Now(),
- cmd: cmd,
- err: newErrBuffer(),
- waitDone: make(chan interface{}),
- state: newState(StateInactive),
- }
-
- // piping all stderr to command errBuffer
- w.cmd.Stderr = w.err
-
- return w, nil
-}
-
-// State return receive-only worker state object, state can be used to safely access
-// worker status, time when status changed and number of worker executions.
-func (w *Worker) State() State {
- return w.state
-}
-
-// String returns worker description.
-func (w *Worker) String() string {
- state := w.state.String()
- if w.Pid != nil {
- state = state + ", pid:" + strconv.Itoa(*w.Pid)
- }
-
- return fmt.Sprintf(
- "(`%s` [%s], numExecs: %v)",
- strings.Join(w.cmd.Args, " "),
- state,
- w.state.NumExecs(),
- )
-}
-
-// Wait must be called once for each worker, call will be released once worker is
-// complete and will return process error (if any), if stderr is presented it's value
-// will be wrapped as WorkerError. Method will return error code if php process fails
-// to find or start the script.
-func (w *Worker) Wait() error {
- <-w.waitDone
-
- // ensure that all receive/send operations are complete
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if w.endState.Success() {
- w.state.set(StateStopped)
- return nil
- }
-
- if w.state.Value() != StateStopping {
- w.state.set(StateErrored)
- } else {
- w.state.set(StateStopped)
- }
-
- if w.err.Len() != 0 {
- return errors.New(w.err.String())
- }
-
- // generic process error
- return &exec.ExitError{ProcessState: w.endState}
-}
-
-// Stop sends soft termination command to the worker and waits for process completion.
-func (w *Worker) Stop() error {
- select {
- case <-w.waitDone:
- return nil
- default:
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.state.set(StateStopping)
- err := sendControl(w.rl, &stopCommand{Stop: true})
-
- <-w.waitDone
- return err
- }
-}
-
-// Kill kills underlying process, make sure to call Wait() func to gather
-// error log from the stderr. Does not waits for process completion!
-func (w *Worker) Kill() error {
- select {
- case <-w.waitDone:
- return nil
- default:
- w.state.set(StateStopping)
- err := w.cmd.Process.Signal(os.Kill)
-
- <-w.waitDone
- return err
- }
-}
-
-var ErrEmptyPayload = errors.New("payload can not be empty")
-var ErrWorkerNotReady = errors.New("worker is not ready")
-
-// Exec sends payload to worker, executes it and returns result or
-// error. Make sure to handle worker.Wait() to gather worker level
-// errors. Method might return JobError indicating issue with payload.
-func (w *Worker) Exec(rqs *Payload) (rsp *Payload, err error) {
- w.mu.Lock()
-
- if rqs == nil {
- w.mu.Unlock()
- return nil, ErrEmptyPayload
- }
-
- if w.state.Value() != StateReady {
- w.mu.Unlock()
- return nil, ErrWorkerNotReady
- }
-
- w.state.set(StateWorking)
-
- rsp, err = w.execPayload(rqs)
- if err != nil {
- if _, ok := err.(JobError); !ok {
- w.state.set(StateErrored)
- w.state.registerExec()
- w.mu.Unlock()
- return nil, err
- }
- }
-
- w.state.set(StateReady)
- w.state.registerExec()
- w.mu.Unlock()
- return rsp, err
-}
-
-func (w *Worker) markInvalid() {
- w.state.set(StateInvalid)
-}
-
-func (w *Worker) start() error {
- if err := w.cmd.Start(); err != nil {
- close(w.waitDone)
- return err
- }
-
- w.Pid = &w.cmd.Process.Pid
-
- // wait for process to complete
- go func() {
- w.endState, _ = w.cmd.Process.Wait()
- if w.waitDone != nil {
- close(w.waitDone)
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if w.rl != nil {
- err := w.rl.Close()
- if err != nil {
- w.err.lsn(EventWorkerError, WorkerError{Worker: w, Caused: err})
- }
- }
-
- err := w.err.Close()
- if err != nil {
- w.err.lsn(EventWorkerError, WorkerError{Worker: w, Caused: err})
- }
- }
- }()
-
- return nil
-}
-
-func (w *Worker) execPayload(rqs *Payload) (rsp *Payload, err error) {
- // two things
- if err := sendControl(w.rl, rqs.Context); err != nil {
- return nil, errors.Wrap(err, "header error")
- }
-
- if err = w.rl.Send(rqs.Body, 0); err != nil {
- return nil, errors.Wrap(err, "sender error")
- }
-
- var pr goridge.Prefix
- rsp = new(Payload)
-
- if rsp.Context, pr, err = w.rl.Receive(); err != nil {
- return nil, errors.Wrap(err, "worker error")
- }
-
- if !pr.HasFlag(goridge.PayloadControl) {
- return nil, fmt.Errorf("malformed worker response")
- }
-
- if pr.HasFlag(goridge.PayloadError) {
- return nil, JobError(rsp.Context)
- }
-
- // add streaming support :)
- if rsp.Body, _, err = w.rl.Receive(); err != nil {
- return nil, errors.Wrap(err, "worker error")
- }
-
- return rsp, nil
-}
diff --git a/worker_test.go b/worker_test.go
deleted file mode 100644
index 8f93ee96..00000000
--- a/worker_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package roadrunner
-
-import (
- "os/exec"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func Test_GetState(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- assert.Equal(t, StateStopped, w.State().Value())
- }()
-
- assert.NoError(t, err)
- assert.NotNil(t, w)
-
- assert.Equal(t, StateReady, w.State().Value())
- err = w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
-}
-
-func Test_Kill(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.Error(t, w.Wait())
- assert.Equal(t, StateStopped, w.State().Value())
- }()
-
- assert.NoError(t, err)
- assert.NotNil(t, w)
-
- assert.Equal(t, StateReady, w.State().Value())
- defer func() {
- err := w.Kill()
- if err != nil {
- t.Errorf("error killing the worker: error %v", err)
- }
- }()
-}
-
-func Test_Echo(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
-
- assert.Nil(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_BadPayload(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- res, err := w.Exec(nil)
-
- assert.Error(t, err)
- assert.Nil(t, res)
-
- assert.Equal(t, "payload can not be empty", err.Error())
-}
-
-func Test_NotStarted_String(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := newWorker(cmd)
- assert.Contains(t, w.String(), "php tests/client.php echo pipes")
- assert.Contains(t, w.String(), "inactive")
- assert.Contains(t, w.String(), "numExecs: 0")
-}
-
-func Test_NotStarted_Exec(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := newWorker(cmd)
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
-
- assert.Error(t, err)
- assert.Nil(t, res)
-
- assert.Equal(t, "worker is not ready", err.Error())
-}
-
-func Test_String(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- assert.Contains(t, w.String(), "php tests/client.php echo pipes")
- assert.Contains(t, w.String(), "ready")
- assert.Contains(t, w.String(), "numExecs: 0")
-}
-
-func Test_Echo_Slow(t *testing.T) {
- cmd := exec.Command("php", "tests/slow-client.php", "echo", "pipes", "10", "10")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
-
- assert.Nil(t, err)
- assert.NotNil(t, res)
- assert.NotNil(t, res.Body)
- assert.Nil(t, res.Context)
-
- assert.Equal(t, "hello", res.String())
-}
-
-func Test_Broken(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "broken", "pipes")
-
- w, err := NewPipeFactory().SpawnWorker(cmd)
- if err != nil {
- t.Fatal(err)
- }
-
- go func() {
- err := w.Wait()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "undefined_function()")
- }()
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
- assert.Nil(t, res)
- assert.NotNil(t, err)
-
- time.Sleep(time.Second)
- assert.NoError(t, w.Stop())
-}
-
-func Test_OnStarted(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "broken", "pipes")
- assert.Nil(t, cmd.Start())
-
- w, err := newWorker(cmd)
- assert.Nil(t, w)
- assert.NotNil(t, err)
-
- assert.Equal(t, "can't attach to running process", err.Error())
-}
-
-func Test_Error(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "error", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
-
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- res, err := w.Exec(&Payload{Body: []byte("hello")})
- assert.Nil(t, res)
- assert.NotNil(t, err)
-
- assert.IsType(t, JobError{}, err)
- assert.Equal(t, "hello", err.Error())
-}
-
-func Test_NumExecs(t *testing.T) {
- cmd := exec.Command("php", "tests/client.php", "echo", "pipes")
-
- w, _ := NewPipeFactory().SpawnWorker(cmd)
- go func() {
- assert.NoError(t, w.Wait())
- }()
- defer func() {
- err := w.Stop()
- if err != nil {
- t.Errorf("error stopping the worker: error %v", err)
- }
- }()
-
- _, err := w.Exec(&Payload{Body: []byte("hello")})
- if err != nil {
- t.Errorf("fail to execute payload: error %v", err)
- }
- assert.Equal(t, int64(1), w.State().NumExecs())
-
- _, err = w.Exec(&Payload{Body: []byte("hello")})
- if err != nil {
- t.Errorf("fail to execute payload: error %v", err)
- }
- assert.Equal(t, int64(2), w.State().NumExecs())
-
- _, err = w.Exec(&Payload{Body: []byte("hello")})
- if err != nil {
- t.Errorf("fail to execute payload: error %v", err)
- }
- assert.Equal(t, int64(3), w.State().NumExecs())
-}