From f3491c089b4da77fd8d2bc942a88b6b8d117a8a5 Mon Sep 17 00:00:00 2001 From: Valery Piashchynski Date: Thu, 16 Sep 2021 17:12:37 +0300 Subject: Move plugins to a separate repository Signed-off-by: Valery Piashchynski --- .github/workflows/linux.yml | 13 +- .vscode/settings.json | 2 + Makefile | 44 - bst/bst.go | 152 ++ bst/bst_test.go | 325 +++ bst/doc.go | 7 + bst/interface.go | 13 + codecov.yml | 37 +- common/doc.go | 9 - common/jobs/interface.go | 31 - common/kv/interface.go | 42 - common/pubsub/interface.go | 56 - common/pubsub/psmessage.go | 15 - doc/README.md | 21 + doc/pool_workflow.drawio | 1 + doc/pool_workflow.svg | 3 + events/general.go | 41 + events/interface.go | 14 + events/jobs_events.go | 81 + events/pool_events.go | 71 + events/worker_events.go | 36 + go.mod | 72 +- go.sum | 774 +----- payload/payload.go | 20 + pkg/bst/bst.go | 152 -- pkg/bst/bst_test.go | 325 --- pkg/bst/doc.go | 7 - pkg/bst/interface.go | 13 - pkg/doc/README.md | 21 - pkg/doc/pool_workflow.drawio | 1 - pkg/doc/pool_workflow.svg | 3 - pkg/events/general.go | 41 - pkg/events/interface.go | 14 - pkg/events/jobs_events.go | 81 - pkg/events/pool_events.go | 70 - pkg/events/worker_events.go | 36 - pkg/payload/payload.go | 20 - pkg/pool/config.go | 75 - pkg/pool/interface.go | 53 - pkg/pool/static_pool.go | 374 --- pkg/pool/static_pool_test.go | 721 ------ pkg/pool/supervisor_pool.go | 230 -- pkg/pool/supervisor_test.go | 413 ---- pkg/priority_queue/binary_heap.go | 125 - pkg/priority_queue/binary_heap_test.go | 154 -- pkg/priority_queue/interface.go | 31 - pkg/state/job/state.go | 19 - pkg/state/process/state.go | 76 - pkg/transport/interface.go | 21 - pkg/transport/pipe/pipe_factory.go | 197 -- pkg/transport/pipe/pipe_factory_spawn_test.go | 461 ---- pkg/transport/pipe/pipe_factory_test.go | 503 ---- pkg/transport/socket/socket_factory.go | 255 -- pkg/transport/socket/socket_factory_spawn_test.go | 533 ----- pkg/transport/socket/socket_factory_test.go | 622 ----- pkg/worker/interface.go | 74 - pkg/worker/state.go | 111 - pkg/worker/state_test.go | 27 - pkg/worker/sync_worker.go | 283 --- pkg/worker/sync_worker_test.go | 33 - pkg/worker/worker.go | 220 -- pkg/worker/worker_test.go | 19 - pkg/worker_handler/constants.go | 8 - pkg/worker_handler/errors.go | 26 - pkg/worker_handler/errors_windows.go | 28 - pkg/worker_handler/handler.go | 246 -- pkg/worker_handler/parse.go | 149 -- pkg/worker_handler/request.go | 189 -- pkg/worker_handler/response.go | 105 - pkg/worker_handler/uploads.go | 159 -- pkg/worker_watcher/container/channel/vec.go | 107 - pkg/worker_watcher/container/queue/queue.go | 102 - pkg/worker_watcher/worker_watcher.go | 318 --- plugins/amqp/amqpjobs/config.go | 67 - plugins/amqp/amqpjobs/consumer.go | 524 ---- plugins/amqp/amqpjobs/item.go | 250 -- plugins/amqp/amqpjobs/listener.go | 25 - plugins/amqp/amqpjobs/rabbit_init.go | 57 - plugins/amqp/amqpjobs/redial.go | 138 -- plugins/amqp/plugin.go | 41 - plugins/beanstalk/config.go | 53 - plugins/beanstalk/connection.go | 223 -- plugins/beanstalk/consumer.go | 374 --- plugins/beanstalk/encode_test.go | 75 - plugins/beanstalk/item.go | 138 -- plugins/beanstalk/listen.go | 39 - plugins/beanstalk/plugin.go | 47 - plugins/boltdb/boltjobs/config.go | 39 - plugins/boltdb/boltjobs/consumer.go | 430 ---- plugins/boltdb/boltjobs/item.go | 229 -- plugins/boltdb/boltjobs/listener.go | 156 -- plugins/boltdb/boltkv/config.go | 30 - plugins/boltdb/boltkv/driver.go | 472 ---- plugins/boltdb/doc/boltjobs.drawio | 1 - plugins/boltdb/doc/job_lifecycle.md | 9 - plugins/boltdb/plugin.go | 68 - plugins/broadcast/config.go | 27 - plugins/broadcast/doc/broadcast_arch.drawio | 1 - plugins/broadcast/interface.go | 7 - plugins/broadcast/plugin.go | 192 -- plugins/broadcast/rpc.go | 87 - plugins/config/config.go | 10 - plugins/config/interface.go | 29 - plugins/config/plugin.go | 174 -- plugins/gzip/plugin.go | 28 - plugins/headers/config.go | 36 - plugins/headers/plugin.go | 127 - plugins/http/attributes/attributes.go | 89 - plugins/http/config/fcgi.go | 7 - plugins/http/config/http.go | 187 -- plugins/http/config/http2.go | 28 - plugins/http/config/ip.go | 26 - plugins/http/config/ssl.go | 84 - plugins/http/config/ssl_config_test.go | 116 - plugins/http/config/uploads_config.go | 46 - plugins/http/metrics.go | 92 - plugins/http/plugin.go | 412 ---- plugins/http/serve.go | 254 -- plugins/informer/interface.go | 34 - plugins/informer/plugin.go | 89 - plugins/informer/rpc.go | 59 - plugins/jobs/config.go | 62 - plugins/jobs/doc/jobs_arch.drawio | 1 - plugins/jobs/doc/response_protocol.md | 54 - plugins/jobs/job/job.go | 51 - plugins/jobs/job/job_test.go | 18 - plugins/jobs/metrics.go | 92 - plugins/jobs/pipeline/pipeline.go | 98 - plugins/jobs/pipeline/pipeline_test.go | 21 - plugins/jobs/plugin.go | 719 ------ plugins/jobs/protocol.go | 78 - plugins/jobs/rpc.go | 160 -- plugins/kv/config.go | 6 - plugins/kv/doc/kv.drawio | 1 - plugins/kv/plugin.go | 159 -- plugins/kv/rpc.go | 180 -- plugins/logger/config.go | 212 -- plugins/logger/encoder.go | 66 - plugins/logger/enums.go | 12 - plugins/logger/interface.go | 14 - plugins/logger/plugin.go | 86 - plugins/logger/std_log_adapter.go | 26 - plugins/logger/zap_adapter.go | 79 - plugins/memcached/memcachedkv/config.go | 12 - plugins/memcached/memcachedkv/driver.go | 254 -- plugins/memcached/plugin.go | 49 - plugins/memory/memoryjobs/consumer.go | 296 --- plugins/memory/memoryjobs/item.go | 134 -- plugins/memory/memorykv/config.go | 14 - plugins/memory/memorykv/kv.go | 257 -- plugins/memory/memorypubsub/pubsub.go | 92 - plugins/memory/plugin.go | 68 - plugins/metrics/config.go | 140 -- plugins/metrics/config_test.go | 89 - plugins/metrics/doc.go | 1 - plugins/metrics/interface.go | 7 - plugins/metrics/plugin.go | 242 -- plugins/metrics/rpc.go | 294 --- plugins/redis/config.go | 34 - plugins/redis/kv/config.go | 36 - plugins/redis/kv/kv.go | 255 -- plugins/redis/plugin.go | 77 - plugins/redis/pubsub/channel.go | 97 - plugins/redis/pubsub/config.go | 34 - plugins/redis/pubsub/pubsub.go | 187 -- plugins/reload/config.go | 62 - plugins/reload/plugin.go | 167 -- plugins/reload/watcher.go | 372 --- plugins/resetter/interface.go | 7 - plugins/resetter/plugin.go | 55 - plugins/resetter/rpc.go | 29 - plugins/rpc/config.go | 46 - plugins/rpc/doc/plugin_arch.drawio | 1 - plugins/rpc/interface.go | 7 - plugins/rpc/plugin.go | 155 -- plugins/server/command.go | 33 - plugins/server/command_test.go | 43 - plugins/server/config.go | 60 - plugins/server/interface.go | 23 - plugins/server/plugin.go | 268 --- plugins/service/config.go | 34 - plugins/service/plugin.go | 110 - plugins/service/process.go | 147 -- plugins/sqs/config.go | 114 - plugins/sqs/consumer.go | 421 ---- plugins/sqs/item.go | 250 -- plugins/sqs/listener.go | 87 - plugins/sqs/plugin.go | 39 - plugins/static/config.go | 55 - plugins/static/etag.go | 72 - plugins/static/plugin.go | 188 -- plugins/status/config.go | 18 - plugins/status/interface.go | 18 - plugins/status/plugin.go | 214 -- plugins/status/rpc.go | 43 - plugins/websockets/commands/enums.go | 9 - plugins/websockets/config.go | 83 - plugins/websockets/connection/connection.go | 67 - plugins/websockets/doc/broadcast.drawio | 1 - plugins/websockets/doc/doc.go | 27 - plugins/websockets/executor/executor.go | 214 -- plugins/websockets/origin.go | 28 - plugins/websockets/origin_test.go | 73 - plugins/websockets/plugin.go | 370 --- plugins/websockets/pool/workers_pool.go | 135 -- plugins/websockets/validator/access_validator.go | 81 - plugins/websockets/wildcard.go | 12 - pool/config.go | 75 + pool/interface.go | 53 + pool/static_pool.go | 374 +++ pool/static_pool_test.go | 721 ++++++ pool/supervisor_pool.go | 230 ++ pool/supervisor_test.go | 413 ++++ priority_queue/binary_heap.go | 125 + priority_queue/binary_heap_test.go | 154 ++ priority_queue/interface.go | 31 + proto/jobs/v1beta/jobs.pb.go | 840 ------- proto/jobs/v1beta/jobs.proto | 60 - proto/kv/v1beta/kv.pb.go | 302 --- proto/kv/v1beta/kv.proto | 22 - proto/websockets/v1beta/websockets.pb.go | 292 --- proto/websockets/v1beta/websockets.proto | 20 - state/job/state.go | 19 + state/process/state.go | 76 + tests/mocks/mock_log.go | 146 -- tests/plugins/broadcast/broadcast_plugin_test.go | 513 ---- .../configs/.rr-broadcast-config-error.yaml | 32 - .../broadcast/configs/.rr-broadcast-global.yaml | 50 - .../broadcast/configs/.rr-broadcast-init.yaml | 36 - .../broadcast/configs/.rr-broadcast-no-config.yaml | 29 - .../configs/.rr-broadcast-same-section.yaml | 48 - tests/plugins/broadcast/plugins/plugin1.go | 73 - tests/plugins/broadcast/plugins/plugin2.go | 74 - tests/plugins/broadcast/plugins/plugin3.go | 74 - tests/plugins/broadcast/plugins/plugin4.go | 74 - tests/plugins/broadcast/plugins/plugin5.go | 74 - tests/plugins/broadcast/plugins/plugin6.go | 74 - tests/plugins/config/config_test.go | 272 --- tests/plugins/config/configs/.rr-env.yaml | 24 - tests/plugins/config/configs/.rr.yaml | 24 - tests/plugins/config/plugin1.go | 96 - tests/plugins/config/plugin2.go | 50 - tests/plugins/config/plugin3.go | 34 - .../gzip/configs/.rr-http-middlewareNotExist.yaml | 20 - tests/plugins/gzip/configs/.rr-http-withGzip.yaml | 22 - tests/plugins/gzip/plugin_test.go | 180 -- .../plugins/headers/configs/.rr-cors-headers.yaml | 34 - .../plugins/headers/configs/.rr-headers-init.yaml | 38 - tests/plugins/headers/configs/.rr-req-headers.yaml | 31 - tests/plugins/headers/configs/.rr-res-headers.yaml | 31 - tests/plugins/headers/headers_plugin_test.go | 368 --- tests/plugins/http/attributes_test.go | 78 - tests/plugins/http/configs/.rr-big-req-size.yaml | 21 - tests/plugins/http/configs/.rr-broken-pipes.yaml | 29 - tests/plugins/http/configs/.rr-env.yaml | 27 - tests/plugins/http/configs/.rr-fcgi-reqUri.yaml | 32 - tests/plugins/http/configs/.rr-fcgi.yaml | 30 - tests/plugins/http/configs/.rr-h2c.yaml | 28 - tests/plugins/http/configs/.rr-http-ipv6-2.yaml | 24 - tests/plugins/http/configs/.rr-http-ipv6.yaml | 24 - .../http/configs/.rr-http-static-disabled.yaml | 27 - .../http/configs/.rr-http-static-etags.yaml | 34 - .../configs/.rr-http-static-files-disable.yaml | 24 - .../http/configs/.rr-http-static-files.yaml | 30 - .../http/configs/.rr-http-static-security.yaml | 34 - tests/plugins/http/configs/.rr-http-static.yaml | 30 - .../http/configs/.rr-http-supervised-pool.yaml | 28 - tests/plugins/http/configs/.rr-http.yaml | 25 - tests/plugins/http/configs/.rr-init.yaml | 36 - tests/plugins/http/configs/.rr-issue659.yaml | 23 - tests/plugins/http/configs/.rr-no-http.yaml | 16 - tests/plugins/http/configs/.rr-resetter.yaml | 28 - tests/plugins/http/configs/.rr-ssl-push.yaml | 30 - tests/plugins/http/configs/.rr-ssl-redirect.yaml | 30 - tests/plugins/http/configs/.rr-ssl.yaml | 32 - tests/plugins/http/fixtures/server.crt | 15 - tests/plugins/http/fixtures/server.key | 9 - tests/plugins/http/handler_test.go | 1862 --------------- tests/plugins/http/http_plugin_test.go | 2516 -------------------- tests/plugins/http/parse_test.go | 54 - tests/plugins/http/plugin1.go | 27 - tests/plugins/http/plugin_middleware.go | 69 - tests/plugins/http/response_test.go | 165 -- tests/plugins/http/uploads_config_test.go | 26 - tests/plugins/http/uploads_test.go | 433 ---- tests/plugins/informer/.rr-informer.yaml | 15 - tests/plugins/informer/informer_test.go | 136 -- tests/plugins/informer/test_plugin.go | 71 - tests/plugins/jobs/amqp/.rr-amqp-declare.yaml | 24 - tests/plugins/jobs/amqp/.rr-amqp-init.yaml | 55 - tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml | 24 - tests/plugins/jobs/amqp/.rr-no-global.yaml | 47 - .../jobs/beanstalk/.rr-beanstalk-declare.yaml | 27 - .../plugins/jobs/beanstalk/.rr-beanstalk-init.yaml | 45 - .../jobs/beanstalk/.rr-beanstalk-jobs-err.yaml | 25 - tests/plugins/jobs/beanstalk/.rr-no-global.yaml | 34 - tests/plugins/jobs/boltdb/.rr-boltdb-declare.yaml | 24 - tests/plugins/jobs/boltdb/.rr-boltdb-init.yaml | 43 - tests/plugins/jobs/boltdb/.rr-boltdb-jobs-err.yaml | 24 - tests/plugins/jobs/boltdb/.rr-no-global.yaml | 41 - tests/plugins/jobs/configs/.rr-jobs-init.yaml | 112 - tests/plugins/jobs/configs/.rr-jobs-metrics.yaml | 27 - .../durability/.rr-amqp-durability-redial.yaml | 55 - .../.rr-beanstalk-durability-redial.yaml | 44 - .../jobs/durability/.rr-sqs-durability-redial.yaml | 60 - tests/plugins/jobs/helpers.go | 234 -- tests/plugins/jobs/jobs_amqp_test.go | 499 ---- tests/plugins/jobs/jobs_beanstalk_test.go | 515 ---- tests/plugins/jobs/jobs_boltdb_test.go | 506 ---- tests/plugins/jobs/jobs_general_test.go | 249 -- tests/plugins/jobs/jobs_memory_test.go | 570 ----- tests/plugins/jobs/jobs_sqs_test.go | 503 ---- tests/plugins/jobs/jobs_with_toxics_test.go | 392 --- tests/plugins/jobs/memory/.rr-memory-declare.yaml | 21 - tests/plugins/jobs/memory/.rr-memory-init.yaml | 37 - tests/plugins/jobs/memory/.rr-memory-jobs-err.yaml | 21 - .../jobs/memory/.rr-memory-pause-resume.yaml | 44 - tests/plugins/jobs/sqs/.rr-no-global.yaml | 39 - tests/plugins/jobs/sqs/.rr-sqs-declare.yaml | 29 - tests/plugins/jobs/sqs/.rr-sqs-init.yaml | 54 - tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml | 28 - tests/plugins/kv/configs/.rr-boltdb.yaml | 16 - tests/plugins/kv/configs/.rr-in-memory.yaml | 12 - .../kv/configs/.rr-kv-bolt-no-interval.yaml | 19 - tests/plugins/kv/configs/.rr-kv-bolt-perms.yaml | 18 - tests/plugins/kv/configs/.rr-kv-init.yaml | 35 - tests/plugins/kv/configs/.rr-memcached.yaml | 13 - tests/plugins/kv/configs/.rr-redis-global.yaml | 14 - tests/plugins/kv/configs/.rr-redis-no-config.yaml | 10 - tests/plugins/kv/configs/.rr-redis.yaml | 13 - tests/plugins/kv/storage_plugin_test.go | 1517 ------------ tests/plugins/logger/configs/.rr-file-logger.yaml | 23 - tests/plugins/logger/configs/.rr-no-logger.yaml | 0 tests/plugins/logger/configs/.rr-no-logger2.yaml | 16 - tests/plugins/logger/configs/.rr-raw-mode.yaml | 15 - tests/plugins/logger/configs/.rr.yaml | 3 - tests/plugins/logger/logger_test.go | 430 ---- tests/plugins/logger/plugin.go | 71 - .../plugins/metrics/configs/.rr-http-metrics.yaml | 20 - tests/plugins/metrics/configs/.rr-issue-571.yaml | 13 - tests/plugins/metrics/configs/.rr-test.yaml | 18 - tests/plugins/metrics/metrics_test.go | 1091 --------- tests/plugins/metrics/plugin1.go | 46 - tests/plugins/reload/config_test.go | 63 - tests/plugins/reload/configs/.rr-reload-2.yaml | 37 - tests/plugins/reload/configs/.rr-reload-3.yaml | 39 - tests/plugins/reload/configs/.rr-reload-4.yaml | 40 - tests/plugins/reload/configs/.rr-reload.yaml | 37 - tests/plugins/reload/reload_plugin_test.go | 852 ------- tests/plugins/resetter/.rr-resetter.yaml | 15 - tests/plugins/resetter/resetter_test.go | 125 - tests/plugins/resetter/test_plugin.go | 66 - tests/plugins/rpc/config_test.go | 163 -- tests/plugins/rpc/configs/.rr-rpc-disabled.yaml | 8 - tests/plugins/rpc/configs/.rr.yaml | 11 - tests/plugins/rpc/plugin1.go | 42 - tests/plugins/rpc/plugin2.go | 53 - tests/plugins/rpc/rpc_test.go | 188 -- .../plugins/server/configs/.rr-no-app-section.yaml | 12 - tests/plugins/server/configs/.rr-sockets.yaml | 12 - tests/plugins/server/configs/.rr-tcp.yaml | 12 - .../plugins/server/configs/.rr-wrong-command.yaml | 12 - tests/plugins/server/configs/.rr-wrong-relay.yaml | 12 - tests/plugins/server/configs/.rr.yaml | 12 - tests/plugins/server/plugin_pipes.go | 128 - tests/plugins/server/plugin_sockets.go | 109 - tests/plugins/server/plugin_tcp.go | 109 - tests/plugins/server/server_plugin_test.go | 352 --- tests/plugins/server/socket.php | 25 - tests/plugins/server/tcp.php | 20 - .../plugins/service/configs/.rr-service-error.yaml | 16 - .../plugins/service/configs/.rr-service-init.yaml | 22 - .../service/configs/.rr-service-restarts.yaml | 16 - tests/plugins/service/service_plugin_test.go | 254 -- tests/plugins/service/test_files/loop.php | 6 - tests/plugins/service/test_files/test_binary | Bin 1363968 -> 0 bytes tests/plugins/status/configs/.rr-ready-init.yaml | 28 - tests/plugins/status/configs/.rr-status-init.yaml | 28 - tests/plugins/status/plugin_test.go | 388 --- .../websockets/configs/.rr-websockets-allow.yaml | 52 - .../websockets/configs/.rr-websockets-allow2.yaml | 54 - .../configs/.rr-websockets-broker-no-section.yaml | 48 - .../websockets/configs/.rr-websockets-deny.yaml | 48 - .../websockets/configs/.rr-websockets-deny2.yaml | 50 - .../websockets/configs/.rr-websockets-init.yaml | 48 - .../websockets/configs/.rr-websockets-redis.yaml | 51 - .../websockets/configs/.rr-websockets-stop.yaml | 48 - tests/plugins/websockets/websocket_plugin_test.go | 918 ------- transport/interface.go | 21 + transport/pipe/pipe_factory.go | 197 ++ transport/pipe/pipe_factory_spawn_test.go | 461 ++++ transport/pipe/pipe_factory_test.go | 503 ++++ transport/socket/socket_factory.go | 255 ++ transport/socket/socket_factory_spawn_test.go | 533 +++++ transport/socket/socket_factory_test.go | 622 +++++ worker/interface.go | 74 + worker/state.go | 111 + worker/state_test.go | 27 + worker/sync_worker.go | 283 +++ worker/sync_worker_test.go | 33 + worker/worker.go | 220 ++ worker/worker_test.go | 19 + worker_watcher/container/channel/vec.go | 107 + worker_watcher/container/queue/queue.go | 102 + worker_watcher/worker_watcher.go | 318 +++ 405 files changed, 6976 insertions(+), 46409 deletions(-) create mode 100644 bst/bst.go create mode 100644 bst/bst_test.go create mode 100644 bst/doc.go create mode 100644 bst/interface.go delete mode 100644 common/doc.go delete mode 100644 common/jobs/interface.go delete mode 100644 common/kv/interface.go delete mode 100644 common/pubsub/interface.go delete mode 100644 common/pubsub/psmessage.go create mode 100644 doc/README.md create mode 100644 doc/pool_workflow.drawio create mode 100644 doc/pool_workflow.svg create mode 100755 events/general.go create mode 100644 events/interface.go create mode 100644 events/jobs_events.go create mode 100644 events/pool_events.go create mode 100644 events/worker_events.go create mode 100755 payload/payload.go delete mode 100644 pkg/bst/bst.go delete mode 100644 pkg/bst/bst_test.go delete mode 100644 pkg/bst/doc.go delete mode 100644 pkg/bst/interface.go delete mode 100644 pkg/doc/README.md delete mode 100644 pkg/doc/pool_workflow.drawio delete mode 100644 pkg/doc/pool_workflow.svg delete mode 100755 pkg/events/general.go delete mode 100644 pkg/events/interface.go delete mode 100644 pkg/events/jobs_events.go delete mode 100644 pkg/events/pool_events.go delete mode 100644 pkg/events/worker_events.go delete mode 100755 pkg/payload/payload.go delete mode 100644 pkg/pool/config.go delete mode 100644 pkg/pool/interface.go delete mode 100755 pkg/pool/static_pool.go delete mode 100755 pkg/pool/static_pool_test.go delete mode 100755 pkg/pool/supervisor_pool.go delete mode 100644 pkg/pool/supervisor_test.go delete mode 100644 pkg/priority_queue/binary_heap.go delete mode 100644 pkg/priority_queue/binary_heap_test.go delete mode 100644 pkg/priority_queue/interface.go delete mode 100644 pkg/state/job/state.go delete mode 100644 pkg/state/process/state.go delete mode 100644 pkg/transport/interface.go delete mode 100755 pkg/transport/pipe/pipe_factory.go delete mode 100644 pkg/transport/pipe/pipe_factory_spawn_test.go delete mode 100755 pkg/transport/pipe/pipe_factory_test.go delete mode 100755 pkg/transport/socket/socket_factory.go delete mode 100644 pkg/transport/socket/socket_factory_spawn_test.go delete mode 100755 pkg/transport/socket/socket_factory_test.go delete mode 100644 pkg/worker/interface.go delete mode 100755 pkg/worker/state.go delete mode 100755 pkg/worker/state_test.go delete mode 100755 pkg/worker/sync_worker.go delete mode 100755 pkg/worker/sync_worker_test.go delete mode 100755 pkg/worker/worker.go delete mode 100755 pkg/worker/worker_test.go delete mode 100644 pkg/worker_handler/constants.go delete mode 100644 pkg/worker_handler/errors.go delete mode 100644 pkg/worker_handler/errors_windows.go delete mode 100644 pkg/worker_handler/handler.go delete mode 100644 pkg/worker_handler/parse.go delete mode 100644 pkg/worker_handler/request.go delete mode 100644 pkg/worker_handler/response.go delete mode 100644 pkg/worker_handler/uploads.go delete mode 100644 pkg/worker_watcher/container/channel/vec.go delete mode 100644 pkg/worker_watcher/container/queue/queue.go delete mode 100755 pkg/worker_watcher/worker_watcher.go delete mode 100644 plugins/amqp/amqpjobs/config.go delete mode 100644 plugins/amqp/amqpjobs/consumer.go delete mode 100644 plugins/amqp/amqpjobs/item.go delete mode 100644 plugins/amqp/amqpjobs/listener.go delete mode 100644 plugins/amqp/amqpjobs/rabbit_init.go delete mode 100644 plugins/amqp/amqpjobs/redial.go delete mode 100644 plugins/amqp/plugin.go delete mode 100644 plugins/beanstalk/config.go delete mode 100644 plugins/beanstalk/connection.go delete mode 100644 plugins/beanstalk/consumer.go delete mode 100644 plugins/beanstalk/encode_test.go delete mode 100644 plugins/beanstalk/item.go delete mode 100644 plugins/beanstalk/listen.go delete mode 100644 plugins/beanstalk/plugin.go delete mode 100644 plugins/boltdb/boltjobs/config.go delete mode 100644 plugins/boltdb/boltjobs/consumer.go delete mode 100644 plugins/boltdb/boltjobs/item.go delete mode 100644 plugins/boltdb/boltjobs/listener.go delete mode 100644 plugins/boltdb/boltkv/config.go delete mode 100644 plugins/boltdb/boltkv/driver.go delete mode 100644 plugins/boltdb/doc/boltjobs.drawio delete mode 100644 plugins/boltdb/doc/job_lifecycle.md delete mode 100644 plugins/boltdb/plugin.go delete mode 100644 plugins/broadcast/config.go delete mode 100644 plugins/broadcast/doc/broadcast_arch.drawio delete mode 100644 plugins/broadcast/interface.go delete mode 100644 plugins/broadcast/plugin.go delete mode 100644 plugins/broadcast/rpc.go delete mode 100644 plugins/config/config.go delete mode 100644 plugins/config/interface.go delete mode 100755 plugins/config/plugin.go delete mode 100644 plugins/gzip/plugin.go delete mode 100644 plugins/headers/config.go delete mode 100644 plugins/headers/plugin.go delete mode 100644 plugins/http/attributes/attributes.go delete mode 100644 plugins/http/config/fcgi.go delete mode 100644 plugins/http/config/http.go delete mode 100644 plugins/http/config/http2.go delete mode 100644 plugins/http/config/ip.go delete mode 100644 plugins/http/config/ssl.go delete mode 100644 plugins/http/config/ssl_config_test.go delete mode 100644 plugins/http/config/uploads_config.go delete mode 100644 plugins/http/metrics.go delete mode 100644 plugins/http/plugin.go delete mode 100644 plugins/http/serve.go delete mode 100644 plugins/informer/interface.go delete mode 100644 plugins/informer/plugin.go delete mode 100644 plugins/informer/rpc.go delete mode 100644 plugins/jobs/config.go delete mode 100644 plugins/jobs/doc/jobs_arch.drawio delete mode 100644 plugins/jobs/doc/response_protocol.md delete mode 100644 plugins/jobs/job/job.go delete mode 100644 plugins/jobs/job/job_test.go delete mode 100644 plugins/jobs/metrics.go delete mode 100644 plugins/jobs/pipeline/pipeline.go delete mode 100644 plugins/jobs/pipeline/pipeline_test.go delete mode 100644 plugins/jobs/plugin.go delete mode 100644 plugins/jobs/protocol.go delete mode 100644 plugins/jobs/rpc.go delete mode 100644 plugins/kv/config.go delete mode 100644 plugins/kv/doc/kv.drawio delete mode 100644 plugins/kv/plugin.go delete mode 100644 plugins/kv/rpc.go delete mode 100644 plugins/logger/config.go delete mode 100644 plugins/logger/encoder.go delete mode 100644 plugins/logger/enums.go delete mode 100644 plugins/logger/interface.go delete mode 100644 plugins/logger/plugin.go delete mode 100644 plugins/logger/std_log_adapter.go delete mode 100644 plugins/logger/zap_adapter.go delete mode 100644 plugins/memcached/memcachedkv/config.go delete mode 100644 plugins/memcached/memcachedkv/driver.go delete mode 100644 plugins/memcached/plugin.go delete mode 100644 plugins/memory/memoryjobs/consumer.go delete mode 100644 plugins/memory/memoryjobs/item.go delete mode 100644 plugins/memory/memorykv/config.go delete mode 100644 plugins/memory/memorykv/kv.go delete mode 100644 plugins/memory/memorypubsub/pubsub.go delete mode 100644 plugins/memory/plugin.go delete mode 100644 plugins/metrics/config.go delete mode 100644 plugins/metrics/config_test.go delete mode 100644 plugins/metrics/doc.go delete mode 100644 plugins/metrics/interface.go delete mode 100644 plugins/metrics/plugin.go delete mode 100644 plugins/metrics/rpc.go delete mode 100644 plugins/redis/config.go delete mode 100644 plugins/redis/kv/config.go delete mode 100644 plugins/redis/kv/kv.go delete mode 100644 plugins/redis/plugin.go delete mode 100644 plugins/redis/pubsub/channel.go delete mode 100644 plugins/redis/pubsub/config.go delete mode 100644 plugins/redis/pubsub/pubsub.go delete mode 100644 plugins/reload/config.go delete mode 100644 plugins/reload/plugin.go delete mode 100644 plugins/reload/watcher.go delete mode 100644 plugins/resetter/interface.go delete mode 100644 plugins/resetter/plugin.go delete mode 100644 plugins/resetter/rpc.go delete mode 100644 plugins/rpc/config.go delete mode 100644 plugins/rpc/doc/plugin_arch.drawio delete mode 100644 plugins/rpc/interface.go delete mode 100644 plugins/rpc/plugin.go delete mode 100644 plugins/server/command.go delete mode 100644 plugins/server/command_test.go delete mode 100644 plugins/server/config.go delete mode 100644 plugins/server/interface.go delete mode 100644 plugins/server/plugin.go delete mode 100644 plugins/service/config.go delete mode 100644 plugins/service/plugin.go delete mode 100644 plugins/service/process.go delete mode 100644 plugins/sqs/config.go delete mode 100644 plugins/sqs/consumer.go delete mode 100644 plugins/sqs/item.go delete mode 100644 plugins/sqs/listener.go delete mode 100644 plugins/sqs/plugin.go delete mode 100644 plugins/static/config.go delete mode 100644 plugins/static/etag.go delete mode 100644 plugins/static/plugin.go delete mode 100644 plugins/status/config.go delete mode 100644 plugins/status/interface.go delete mode 100644 plugins/status/plugin.go delete mode 100644 plugins/status/rpc.go delete mode 100644 plugins/websockets/commands/enums.go delete mode 100644 plugins/websockets/config.go delete mode 100644 plugins/websockets/connection/connection.go delete mode 100644 plugins/websockets/doc/broadcast.drawio delete mode 100644 plugins/websockets/doc/doc.go delete mode 100644 plugins/websockets/executor/executor.go delete mode 100644 plugins/websockets/origin.go delete mode 100644 plugins/websockets/origin_test.go delete mode 100644 plugins/websockets/plugin.go delete mode 100644 plugins/websockets/pool/workers_pool.go delete mode 100644 plugins/websockets/validator/access_validator.go delete mode 100644 plugins/websockets/wildcard.go create mode 100644 pool/config.go create mode 100644 pool/interface.go create mode 100755 pool/static_pool.go create mode 100755 pool/static_pool_test.go create mode 100755 pool/supervisor_pool.go create mode 100644 pool/supervisor_test.go create mode 100644 priority_queue/binary_heap.go create mode 100644 priority_queue/binary_heap_test.go create mode 100644 priority_queue/interface.go delete mode 100644 proto/jobs/v1beta/jobs.pb.go delete mode 100644 proto/jobs/v1beta/jobs.proto delete mode 100644 proto/kv/v1beta/kv.pb.go delete mode 100644 proto/kv/v1beta/kv.proto delete mode 100644 proto/websockets/v1beta/websockets.pb.go delete mode 100644 proto/websockets/v1beta/websockets.proto create mode 100644 state/job/state.go create mode 100644 state/process/state.go delete mode 100644 tests/mocks/mock_log.go delete mode 100644 tests/plugins/broadcast/broadcast_plugin_test.go delete mode 100644 tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml delete mode 100644 tests/plugins/broadcast/configs/.rr-broadcast-global.yaml delete mode 100644 tests/plugins/broadcast/configs/.rr-broadcast-init.yaml delete mode 100644 tests/plugins/broadcast/configs/.rr-broadcast-no-config.yaml delete mode 100644 tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml delete mode 100644 tests/plugins/broadcast/plugins/plugin1.go delete mode 100644 tests/plugins/broadcast/plugins/plugin2.go delete mode 100644 tests/plugins/broadcast/plugins/plugin3.go delete mode 100644 tests/plugins/broadcast/plugins/plugin4.go delete mode 100644 tests/plugins/broadcast/plugins/plugin5.go delete mode 100644 tests/plugins/broadcast/plugins/plugin6.go delete mode 100755 tests/plugins/config/config_test.go delete mode 100755 tests/plugins/config/configs/.rr-env.yaml delete mode 100755 tests/plugins/config/configs/.rr.yaml delete mode 100755 tests/plugins/config/plugin1.go delete mode 100755 tests/plugins/config/plugin2.go delete mode 100755 tests/plugins/config/plugin3.go delete mode 100644 tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml delete mode 100644 tests/plugins/gzip/configs/.rr-http-withGzip.yaml delete mode 100644 tests/plugins/gzip/plugin_test.go delete mode 100644 tests/plugins/headers/configs/.rr-cors-headers.yaml delete mode 100644 tests/plugins/headers/configs/.rr-headers-init.yaml delete mode 100644 tests/plugins/headers/configs/.rr-req-headers.yaml delete mode 100644 tests/plugins/headers/configs/.rr-res-headers.yaml delete mode 100644 tests/plugins/headers/headers_plugin_test.go delete mode 100644 tests/plugins/http/attributes_test.go delete mode 100644 tests/plugins/http/configs/.rr-big-req-size.yaml delete mode 100644 tests/plugins/http/configs/.rr-broken-pipes.yaml delete mode 100644 tests/plugins/http/configs/.rr-env.yaml delete mode 100644 tests/plugins/http/configs/.rr-fcgi-reqUri.yaml delete mode 100644 tests/plugins/http/configs/.rr-fcgi.yaml delete mode 100644 tests/plugins/http/configs/.rr-h2c.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-ipv6-2.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-ipv6.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-static-disabled.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-static-etags.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-static-files-disable.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-static-files.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-static-security.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-static.yaml delete mode 100644 tests/plugins/http/configs/.rr-http-supervised-pool.yaml delete mode 100644 tests/plugins/http/configs/.rr-http.yaml delete mode 100644 tests/plugins/http/configs/.rr-init.yaml delete mode 100644 tests/plugins/http/configs/.rr-issue659.yaml delete mode 100644 tests/plugins/http/configs/.rr-no-http.yaml delete mode 100644 tests/plugins/http/configs/.rr-resetter.yaml delete mode 100644 tests/plugins/http/configs/.rr-ssl-push.yaml delete mode 100644 tests/plugins/http/configs/.rr-ssl-redirect.yaml delete mode 100644 tests/plugins/http/configs/.rr-ssl.yaml delete mode 100644 tests/plugins/http/fixtures/server.crt delete mode 100644 tests/plugins/http/fixtures/server.key delete mode 100644 tests/plugins/http/handler_test.go delete mode 100644 tests/plugins/http/http_plugin_test.go delete mode 100644 tests/plugins/http/parse_test.go delete mode 100644 tests/plugins/http/plugin1.go delete mode 100644 tests/plugins/http/plugin_middleware.go delete mode 100644 tests/plugins/http/response_test.go delete mode 100644 tests/plugins/http/uploads_config_test.go delete mode 100644 tests/plugins/http/uploads_test.go delete mode 100644 tests/plugins/informer/.rr-informer.yaml delete mode 100644 tests/plugins/informer/informer_test.go delete mode 100644 tests/plugins/informer/test_plugin.go delete mode 100644 tests/plugins/jobs/amqp/.rr-amqp-declare.yaml delete mode 100644 tests/plugins/jobs/amqp/.rr-amqp-init.yaml delete mode 100644 tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml delete mode 100644 tests/plugins/jobs/amqp/.rr-no-global.yaml delete mode 100644 tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml delete mode 100644 tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml delete mode 100644 tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml delete mode 100644 tests/plugins/jobs/beanstalk/.rr-no-global.yaml delete mode 100644 tests/plugins/jobs/boltdb/.rr-boltdb-declare.yaml delete mode 100644 tests/plugins/jobs/boltdb/.rr-boltdb-init.yaml delete mode 100644 tests/plugins/jobs/boltdb/.rr-boltdb-jobs-err.yaml delete mode 100644 tests/plugins/jobs/boltdb/.rr-no-global.yaml delete mode 100644 tests/plugins/jobs/configs/.rr-jobs-init.yaml delete mode 100644 tests/plugins/jobs/configs/.rr-jobs-metrics.yaml delete mode 100644 tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml delete mode 100644 tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml delete mode 100644 tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml delete mode 100644 tests/plugins/jobs/helpers.go delete mode 100644 tests/plugins/jobs/jobs_amqp_test.go delete mode 100644 tests/plugins/jobs/jobs_beanstalk_test.go delete mode 100644 tests/plugins/jobs/jobs_boltdb_test.go delete mode 100644 tests/plugins/jobs/jobs_general_test.go delete mode 100644 tests/plugins/jobs/jobs_memory_test.go delete mode 100644 tests/plugins/jobs/jobs_sqs_test.go delete mode 100644 tests/plugins/jobs/jobs_with_toxics_test.go delete mode 100644 tests/plugins/jobs/memory/.rr-memory-declare.yaml delete mode 100644 tests/plugins/jobs/memory/.rr-memory-init.yaml delete mode 100644 tests/plugins/jobs/memory/.rr-memory-jobs-err.yaml delete mode 100644 tests/plugins/jobs/memory/.rr-memory-pause-resume.yaml delete mode 100644 tests/plugins/jobs/sqs/.rr-no-global.yaml delete mode 100644 tests/plugins/jobs/sqs/.rr-sqs-declare.yaml delete mode 100644 tests/plugins/jobs/sqs/.rr-sqs-init.yaml delete mode 100644 tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml delete mode 100644 tests/plugins/kv/configs/.rr-boltdb.yaml delete mode 100644 tests/plugins/kv/configs/.rr-in-memory.yaml delete mode 100644 tests/plugins/kv/configs/.rr-kv-bolt-no-interval.yaml delete mode 100644 tests/plugins/kv/configs/.rr-kv-bolt-perms.yaml delete mode 100644 tests/plugins/kv/configs/.rr-kv-init.yaml delete mode 100644 tests/plugins/kv/configs/.rr-memcached.yaml delete mode 100644 tests/plugins/kv/configs/.rr-redis-global.yaml delete mode 100644 tests/plugins/kv/configs/.rr-redis-no-config.yaml delete mode 100644 tests/plugins/kv/configs/.rr-redis.yaml delete mode 100644 tests/plugins/kv/storage_plugin_test.go delete mode 100644 tests/plugins/logger/configs/.rr-file-logger.yaml delete mode 100644 tests/plugins/logger/configs/.rr-no-logger.yaml delete mode 100644 tests/plugins/logger/configs/.rr-no-logger2.yaml delete mode 100644 tests/plugins/logger/configs/.rr-raw-mode.yaml delete mode 100644 tests/plugins/logger/configs/.rr.yaml delete mode 100644 tests/plugins/logger/logger_test.go delete mode 100644 tests/plugins/logger/plugin.go delete mode 100644 tests/plugins/metrics/configs/.rr-http-metrics.yaml delete mode 100644 tests/plugins/metrics/configs/.rr-issue-571.yaml delete mode 100644 tests/plugins/metrics/configs/.rr-test.yaml delete mode 100644 tests/plugins/metrics/metrics_test.go delete mode 100644 tests/plugins/metrics/plugin1.go delete mode 100644 tests/plugins/reload/config_test.go delete mode 100644 tests/plugins/reload/configs/.rr-reload-2.yaml delete mode 100644 tests/plugins/reload/configs/.rr-reload-3.yaml delete mode 100644 tests/plugins/reload/configs/.rr-reload-4.yaml delete mode 100644 tests/plugins/reload/configs/.rr-reload.yaml delete mode 100644 tests/plugins/reload/reload_plugin_test.go delete mode 100644 tests/plugins/resetter/.rr-resetter.yaml delete mode 100644 tests/plugins/resetter/resetter_test.go delete mode 100644 tests/plugins/resetter/test_plugin.go delete mode 100755 tests/plugins/rpc/config_test.go delete mode 100644 tests/plugins/rpc/configs/.rr-rpc-disabled.yaml delete mode 100644 tests/plugins/rpc/configs/.rr.yaml delete mode 100644 tests/plugins/rpc/plugin1.go delete mode 100644 tests/plugins/rpc/plugin2.go delete mode 100644 tests/plugins/rpc/rpc_test.go delete mode 100644 tests/plugins/server/configs/.rr-no-app-section.yaml delete mode 100644 tests/plugins/server/configs/.rr-sockets.yaml delete mode 100644 tests/plugins/server/configs/.rr-tcp.yaml delete mode 100644 tests/plugins/server/configs/.rr-wrong-command.yaml delete mode 100644 tests/plugins/server/configs/.rr-wrong-relay.yaml delete mode 100644 tests/plugins/server/configs/.rr.yaml delete mode 100644 tests/plugins/server/plugin_pipes.go delete mode 100644 tests/plugins/server/plugin_sockets.go delete mode 100644 tests/plugins/server/plugin_tcp.go delete mode 100644 tests/plugins/server/server_plugin_test.go delete mode 100644 tests/plugins/server/socket.php delete mode 100644 tests/plugins/server/tcp.php delete mode 100644 tests/plugins/service/configs/.rr-service-error.yaml delete mode 100644 tests/plugins/service/configs/.rr-service-init.yaml delete mode 100644 tests/plugins/service/configs/.rr-service-restarts.yaml delete mode 100644 tests/plugins/service/service_plugin_test.go delete mode 100644 tests/plugins/service/test_files/loop.php delete mode 100755 tests/plugins/service/test_files/test_binary delete mode 100755 tests/plugins/status/configs/.rr-ready-init.yaml delete mode 100755 tests/plugins/status/configs/.rr-status-init.yaml delete mode 100644 tests/plugins/status/plugin_test.go delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-allow.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-allow2.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-broker-no-section.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-deny.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-deny2.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-init.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-redis.yaml delete mode 100644 tests/plugins/websockets/configs/.rr-websockets-stop.yaml delete mode 100644 tests/plugins/websockets/websocket_plugin_test.go create mode 100644 transport/interface.go create mode 100755 transport/pipe/pipe_factory.go create mode 100644 transport/pipe/pipe_factory_spawn_test.go create mode 100755 transport/pipe/pipe_factory_test.go create mode 100755 transport/socket/socket_factory.go create mode 100644 transport/socket/socket_factory_spawn_test.go create mode 100755 transport/socket/socket_factory_test.go create mode 100644 worker/interface.go create mode 100755 worker/state.go create mode 100755 worker/state_test.go create mode 100755 worker/sync_worker.go create mode 100755 worker/sync_worker_test.go create mode 100755 worker/worker.go create mode 100755 worker/worker_test.go create mode 100644 worker_watcher/container/channel/vec.go create mode 100644 worker_watcher/container/queue/queue.go create mode 100755 worker_watcher/worker_watcher.go diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 0224de69..be74d606 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -7,12 +7,12 @@ on: - beta - stable tags-ignore: - - '**' + - "**" paths-ignore: - - '**.md' + - "**.md" pull_request: paths-ignore: - - '**.md' + - "**.md" jobs: golang: @@ -22,9 +22,9 @@ jobs: strategy: fail-fast: true matrix: - php: [ "7.4", "8.0" ] - go: [ "1.17" ] - os: [ 'ubuntu-latest' ] + php: ["7.4", "8.0", "8.1"] + go: ["1.17.1"] + os: ["ubuntu-latest"] steps: - name: Set up Go ${{ matrix.go }} uses: actions/setup-go@v2 # action page: @@ -71,4 +71,3 @@ jobs: with: file: ./coverage-ci/summary.txt fail_ci_if_error: false - diff --git a/.vscode/settings.json b/.vscode/settings.json index 48749050..c5155388 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -5,12 +5,14 @@ "amqpjobs", "boltdb", "codecov", + "Errored", "golangci", "gomemcache", "goridge", "hget", "hset", "INMEMORY", + "keyvals", "memcachedkv", "memorykv", "mexpire", diff --git a/Makefile b/Makefile index ab6d6c0b..120e727c 100644 --- a/Makefile +++ b/Makefile @@ -15,28 +15,6 @@ test_coverage: go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/bst.out -covermode=atomic ./pkg/bst go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pq.out -covermode=atomic ./pkg/priority_queue go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker_stack.out -covermode=atomic ./pkg/worker_watcher - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/ws_origin.out -covermode=atomic ./plugins/websockets - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http_config.out -covermode=atomic ./plugins/http/config - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server_cmd.out -covermode=atomic ./plugins/server - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/struct_jobs.out -covermode=atomic ./plugins/jobs/job - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipeline_jobs.out -covermode=atomic ./plugins/jobs/pipeline - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/jobs_core.out -covermode=atomic ./tests/plugins/jobs - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/kv_plugin.out -covermode=atomic ./tests/plugins/kv - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/broadcast_plugin.out -covermode=atomic ./tests/plugins/broadcast - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/websockets.out -covermode=atomic ./tests/plugins/websockets - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http.out -covermode=atomic ./tests/plugins/http - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/informer.out -covermode=atomic ./tests/plugins/informer - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/reload.out -covermode=atomic ./tests/plugins/reload - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server.out -covermode=atomic ./tests/plugins/server - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/service.out -covermode=atomic ./tests/plugins/service - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/status.out -covermode=atomic ./tests/plugins/status - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/config.out -covermode=atomic ./tests/plugins/config - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/gzip.out -covermode=atomic ./tests/plugins/gzip - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/headers.out -covermode=atomic ./tests/plugins/headers - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/logger.out -covermode=atomic ./tests/plugins/logger - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/metrics.out -covermode=atomic ./tests/plugins/metrics - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/resetter.out -covermode=atomic ./tests/plugins/resetter - go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/rpc.out -covermode=atomic ./tests/plugins/rpc echo 'mode: atomic' > ./coverage-ci/summary.txt tail -q -n +2 ./coverage-ci/*.out >> ./coverage-ci/summary.txt docker-compose -f tests/env/docker-compose.yaml down @@ -50,26 +28,4 @@ test: ## Run application tests go test -v -race -tags=debug ./pkg/worker_watcher go test -v -race -tags=debug ./pkg/bst go test -v -race -tags=debug ./pkg/priority_queue - go test -v -race -tags=debug ./plugins/jobs/pipeline - go test -v -race -tags=debug ./plugins/http/config - go test -v -race -tags=debug ./plugins/server - go test -v -race -tags=debug ./plugins/jobs/job - go test -v -race -tags=debug ./tests/plugins/jobs - go test -v -race -tags=debug ./tests/plugins/kv - go test -v -race -tags=debug ./tests/plugins/broadcast - go test -v -race -tags=debug ./tests/plugins/websockets - go test -v -race -tags=debug ./plugins/websockets - go test -v -race -tags=debug ./tests/plugins/http - go test -v -race -tags=debug ./tests/plugins/informer - go test -v -race -tags=debug ./tests/plugins/reload - go test -v -race -tags=debug ./tests/plugins/server - go test -v -race -tags=debug ./tests/plugins/service - go test -v -race -tags=debug ./tests/plugins/status - go test -v -race -tags=debug ./tests/plugins/config - go test -v -race -tags=debug ./tests/plugins/gzip - go test -v -race -tags=debug ./tests/plugins/headers - go test -v -race -tags=debug ./tests/plugins/logger - go test -v -race -tags=debug ./tests/plugins/metrics - go test -v -race -tags=debug ./tests/plugins/resetter - go test -v -race -tags=debug ./tests/plugins/rpc docker-compose -f tests/env/docker-compose.yaml down diff --git a/bst/bst.go b/bst/bst.go new file mode 100644 index 00000000..dab9346c --- /dev/null +++ b/bst/bst.go @@ -0,0 +1,152 @@ +package bst + +// BST ... +type BST struct { + // registered topic, not unique + topic string + // associated connections with the topic + uuids map[string]struct{} + + // left and right subtrees + left *BST + right *BST +} + +func NewBST() Storage { + return &BST{ + uuids: make(map[string]struct{}, 10), + } +} + +// Insert uuid to the topic +func (b *BST) Insert(uuid string, topic string) { + curr := b + + for { + if topic == curr.topic { + curr.uuids[uuid] = struct{}{} + return + } + // if topic less than curr topic + if topic < curr.topic { + if curr.left == nil { + curr.left = &BST{ + topic: topic, + uuids: map[string]struct{}{uuid: {}}, + } + return + } + // move forward + curr = curr.left + } else { + if curr.right == nil { + curr.right = &BST{ + topic: topic, + uuids: map[string]struct{}{uuid: {}}, + } + return + } + + curr = curr.right + } + } +} + +func (b *BST) Contains(topic string) bool { + curr := b + for curr != nil { + switch { + case topic < curr.topic: + curr = curr.left + case topic > curr.topic: + curr = curr.right + case topic == curr.topic: + return true + } + } + + return false +} + +func (b *BST) Get(topic string) map[string]struct{} { + curr := b + for curr != nil { + switch { + case topic < curr.topic: + curr = curr.left + case topic > curr.topic: + curr = curr.right + case topic == curr.topic: + return curr.uuids + } + } + + return nil +} + +func (b *BST) Remove(uuid string, topic string) { + b.removeHelper(uuid, topic, nil) +} + +func (b *BST) removeHelper(uuid string, topic string, parent *BST) { + curr := b + for curr != nil { + if topic < curr.topic { //nolint:gocritic + parent = curr + curr = curr.left + } else if topic > curr.topic { + parent = curr + curr = curr.right + } else { + // if more than 1 topic - remove only topic, do not remove the whole vertex + if len(curr.uuids) > 1 { + if _, ok := curr.uuids[uuid]; ok { + delete(curr.uuids, uuid) + return + } + } + + if curr.left != nil && curr.right != nil { //nolint:gocritic + curr.topic, curr.uuids = curr.right.traverseForMinString() + curr.right.removeHelper(curr.topic, uuid, curr) + } else if parent == nil { + if curr.left != nil { //nolint:gocritic + curr.topic = curr.left.topic + curr.uuids = curr.left.uuids + + curr.right = curr.left.right + curr.left = curr.left.left + } else if curr.right != nil { + curr.topic = curr.right.topic + curr.uuids = curr.right.uuids + + curr.left = curr.right.left + curr.right = curr.right.right + } else { //nolint:staticcheck + // single node tree + } + } else if parent.left == curr { + if curr.left != nil { + parent.left = curr.left + } else { + parent.left = curr.right + } + } else if parent.right == curr { + if curr.left != nil { + parent.right = curr.left + } else { + parent.right = curr.right + } + } + break + } + } +} + +//go:inline +func (b *BST) traverseForMinString() (string, map[string]struct{}) { + if b.left == nil { + return b.topic, b.uuids + } + return b.left.traverseForMinString() +} diff --git a/bst/bst_test.go b/bst/bst_test.go new file mode 100644 index 00000000..2271508c --- /dev/null +++ b/bst/bst_test.go @@ -0,0 +1,325 @@ +package bst + +import ( + "math/rand" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/assert" +) + +const predifined = "chat-1-2" + +func TestNewBST(t *testing.T) { + // create a new bst + g := NewBST() + + for i := 0; i < 100; i++ { + g.Insert(uuid.NewString(), "comments") + } + + for i := 0; i < 100; i++ { + g.Insert(uuid.NewString(), "comments2") + } + + for i := 0; i < 100; i++ { + g.Insert(uuid.NewString(), "comments3") + } + + // should be 100 + exist := g.Get("comments") + assert.Len(t, exist, 100) + + // should be 100 + exist2 := g.Get("comments2") + assert.Len(t, exist2, 100) + + // should be 100 + exist3 := g.Get("comments3") + assert.Len(t, exist3, 100) +} + +func BenchmarkGraph(b *testing.B) { + g := NewBST() + + for i := 0; i < 1000; i++ { + uid := uuid.New().String() + g.Insert(uuid.NewString(), uid) + } + + g.Insert(uuid.NewString(), predifined) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + exist := g.Get(predifined) + _ = exist + } +} + +func BenchmarkBigSearch(b *testing.B) { + g1 := NewBST() + g2 := NewBST() + g3 := NewBST() + + predefinedSlice := make([]string, 0, 1000) + for i := 0; i < 1000; i++ { + predefinedSlice = append(predefinedSlice, uuid.NewString()) + } + if predefinedSlice == nil { + b.FailNow() + } + + for i := 0; i < 1000; i++ { + g1.Insert(uuid.NewString(), uuid.NewString()) + } + for i := 0; i < 1000; i++ { + g2.Insert(uuid.NewString(), uuid.NewString()) + } + for i := 0; i < 1000; i++ { + g3.Insert(uuid.NewString(), uuid.NewString()) + } + + for i := 0; i < 333; i++ { + g1.Insert(uuid.NewString(), predefinedSlice[i]) + } + + for i := 0; i < 333; i++ { + g2.Insert(uuid.NewString(), predefinedSlice[333+i]) + } + + for i := 0; i < 333; i++ { + g3.Insert(uuid.NewString(), predefinedSlice[666+i]) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for i := 0; i < 333; i++ { + exist := g1.Get(predefinedSlice[i]) + _ = exist + } + } + for i := 0; i < b.N; i++ { + for i := 0; i < 333; i++ { + exist := g2.Get(predefinedSlice[333+i]) + _ = exist + } + } + for i := 0; i < b.N; i++ { + for i := 0; i < 333; i++ { + exist := g3.Get(predefinedSlice[666+i]) + _ = exist + } + } +} + +func BenchmarkBigSearchWithRemoves(b *testing.B) { + g1 := NewBST() + g2 := NewBST() + g3 := NewBST() + + predefinedSlice := make([]string, 0, 1000) + for i := 0; i < 1000; i++ { + predefinedSlice = append(predefinedSlice, uuid.NewString()) + } + if predefinedSlice == nil { + b.FailNow() + } + + for i := 0; i < 1000; i++ { + g1.Insert(uuid.NewString(), uuid.NewString()) + } + for i := 0; i < 1000; i++ { + g2.Insert(uuid.NewString(), uuid.NewString()) + } + for i := 0; i < 1000; i++ { + g3.Insert(uuid.NewString(), uuid.NewString()) + } + + for i := 0; i < 333; i++ { + g1.Insert(uuid.NewString(), predefinedSlice[i]) + } + + for i := 0; i < 333; i++ { + g2.Insert(uuid.NewString(), predefinedSlice[333+i]) + } + + for i := 0; i < 333; i++ { + g3.Insert(uuid.NewString(), predefinedSlice[666+i]) + } + + go func() { + tt := time.NewTicker(time.Millisecond) + for { + select { + case <-tt.C: + num := rand.Intn(333) //nolint:gosec + values := g1.Get(predefinedSlice[num]) + for k := range values { + g1.Remove(k, predefinedSlice[num]) + } + } + } + }() + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for i := 0; i < 333; i++ { + exist := g1.Get(predefinedSlice[i]) + _ = exist + } + } + for i := 0; i < b.N; i++ { + for i := 0; i < 333; i++ { + exist := g2.Get(predefinedSlice[333+i]) + _ = exist + } + } + for i := 0; i < b.N; i++ { + for i := 0; i < 333; i++ { + exist := g3.Get(predefinedSlice[666+i]) + _ = exist + } + } +} + +func TestGraph(t *testing.T) { + g := NewBST() + + for i := 0; i < 1000; i++ { + uid := uuid.New().String() + g.Insert(uuid.NewString(), uid) + } + + g.Insert(uuid.NewString(), predifined) + + exist := g.Get(predifined) + assert.NotNil(t, exist) + assert.Len(t, exist, 1) +} + +func TestTreeConcurrentContains(t *testing.T) { + g := NewBST() + + key1 := uuid.NewString() + key2 := uuid.NewString() + key3 := uuid.NewString() + key4 := uuid.NewString() + key5 := uuid.NewString() + + g.Insert(key1, predifined) + g.Insert(key2, predifined) + g.Insert(key3, predifined) + g.Insert(key4, predifined) + g.Insert(key5, predifined) + + for i := 0; i < 100; i++ { + go func() { + _ = g.Get(predifined) + }() + + go func() { + _ = g.Get(predifined) + }() + + go func() { + _ = g.Get(predifined) + }() + + go func() { + _ = g.Get(predifined) + }() + } + + time.Sleep(time.Second * 2) + + exist := g.Get(predifined) + assert.NotNil(t, exist) + assert.Len(t, exist, 5) +} + +func TestGraphRemove(t *testing.T) { + g := NewBST() + + key1 := uuid.NewString() + key2 := uuid.NewString() + key3 := uuid.NewString() + key4 := uuid.NewString() + key5 := uuid.NewString() + + g.Insert(key1, predifined) + g.Insert(key2, predifined) + g.Insert(key3, predifined) + g.Insert(key4, predifined) + g.Insert(key5, predifined) + + exist := g.Get(predifined) + assert.NotNil(t, exist) + assert.Len(t, exist, 5) + + g.Remove(key1, predifined) + + exist = g.Get(predifined) + assert.NotNil(t, exist) + assert.Len(t, exist, 4) +} + +func TestBigSearch(t *testing.T) { + g1 := NewBST() + g2 := NewBST() + g3 := NewBST() + + predefinedSlice := make([]string, 0, 1000) + for i := 0; i < 1000; i++ { + predefinedSlice = append(predefinedSlice, uuid.NewString()) + } + if predefinedSlice == nil { + t.FailNow() + } + + for i := 0; i < 1000; i++ { + g1.Insert(uuid.NewString(), uuid.NewString()) + } + for i := 0; i < 1000; i++ { + g2.Insert(uuid.NewString(), uuid.NewString()) + } + for i := 0; i < 1000; i++ { + g3.Insert(uuid.NewString(), uuid.NewString()) + } + + for i := 0; i < 333; i++ { + g1.Insert(uuid.NewString(), predefinedSlice[i]) + } + + for i := 0; i < 333; i++ { + g2.Insert(uuid.NewString(), predefinedSlice[333+i]) + } + + for i := 0; i < 333; i++ { + g3.Insert(uuid.NewString(), predefinedSlice[666+i]) + } + + for i := 0; i < 333; i++ { + exist := g1.Get(predefinedSlice[i]) + assert.NotNil(t, exist) + assert.Len(t, exist, 1) + } + + for i := 0; i < 333; i++ { + exist := g2.Get(predefinedSlice[333+i]) + assert.NotNil(t, exist) + assert.Len(t, exist, 1) + } + + for i := 0; i < 333; i++ { + exist := g3.Get(predefinedSlice[666+i]) + assert.NotNil(t, exist) + assert.Len(t, exist, 1) + } +} diff --git a/bst/doc.go b/bst/doc.go new file mode 100644 index 00000000..abb7e6e9 --- /dev/null +++ b/bst/doc.go @@ -0,0 +1,7 @@ +package bst + +/* +Binary search tree for the pubsub + +The vertex may have one or multiply topics associated with the single websocket connection UUID +*/ diff --git a/bst/interface.go b/bst/interface.go new file mode 100644 index 00000000..95b03e11 --- /dev/null +++ b/bst/interface.go @@ -0,0 +1,13 @@ +package bst + +// Storage is general in-memory BST storage implementation +type Storage interface { + // Insert inserts to a vertex with topic ident connection uuid + Insert(uuid string, topic string) + // Remove removes uuid from topic, if the uuid is single for a topic, whole vertex will be removed + Remove(uuid, topic string) + // Get will return all connections associated with the topic + Get(topic string) map[string]struct{} + // Contains checks if the BST contains a topic + Contains(topic string) bool +} diff --git a/codecov.yml b/codecov.yml index e61cda06..088a4d1b 100644 --- a/codecov.yml +++ b/codecov.yml @@ -19,25 +19,18 @@ ignore: - "tests" - "systemd" - "utils/to_ptr.go" - - "plugins/metrics/config_test.go" - - "plugins/websockets/storage/storage_test.go" - - "plugins/websockets/config.go" - - "plugins/amqp/amqpjobs/config.go" - - "plugins/beanstalk/config.go" - - "plugins/redis/config.go" - - "plugins/redis/kv/config.go" - - "pkg/doc" - - "pkg/bst/bst_test.go" - - "pkg/pool/static_pool_test.go" - - "pkg/pool/supervisor_test.go" - - "pkg/transport/pipe/pipe_factory_spawn_test.go" - - "pkg/transport/pipe/pipe_factory_test.go" - - "pkg/transport/socket/socket_factory_spawn_test.go" - - "pkg/transport/socket/socket_factory_test.go" - - "pkg/transport/interface.go" - - "pkg/worker/state_test.go" - - "pkg/worker/sync_worker_test.go" - - "pkg/worker/worker_test.go" - - "pkg/events/pool_events.go" - - "pkg/events/worker_events.go" - - "pkg/events/jobs_events.go" + - "doc" + - "bst/bst_test.go" + - "pool/static_pool_test.go" + - "pool/supervisor_test.go" + - "transport/pipe/pipe_factory_spawn_test.go" + - "transport/pipe/pipe_factory_test.go" + - "transport/socket/socket_factory_spawn_test.go" + - "transport/socket/socket_factory_test.go" + - "transport/interface.go" + - "worker/state_test.go" + - "worker/sync_worker_test.go" + - "worker/worker_test.go" + - "events/pool_events.go" + - "events/worker_events.go" + - "events/jobs_events.go" diff --git a/common/doc.go b/common/doc.go deleted file mode 100644 index adc03351..00000000 --- a/common/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package common used to collect common interfaces/structures which might be implemented (or imported) by a different plugins. -For example, 'pubsub' interface might be implemented by memory, redis, websockets and many other plugins. - -Folders: -- kv - contains KV interfaces and structures -- pubsub - contains pub-sub interfaces and structures -*/ -package common diff --git a/common/jobs/interface.go b/common/jobs/interface.go deleted file mode 100644 index 4b5ff70e..00000000 --- a/common/jobs/interface.go +++ /dev/null @@ -1,31 +0,0 @@ -package jobs - -import ( - "context" - - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" -) - -// Consumer represents a single jobs driver interface -type Consumer interface { - Push(ctx context.Context, job *job.Job) error - Register(ctx context.Context, pipeline *pipeline.Pipeline) error - Run(ctx context.Context, pipeline *pipeline.Pipeline) error - Stop(ctx context.Context) error - - Pause(ctx context.Context, pipeline string) - Resume(ctx context.Context, pipeline string) - - // State provide information about driver state - State(ctx context.Context) (*jobState.State, error) -} - -// Constructor constructs Consumer interface. Endure abstraction. -type Constructor interface { - JobsConstruct(configKey string, e events.Handler, queue priorityqueue.Queue) (Consumer, error) - FromPipeline(pipe *pipeline.Pipeline, e events.Handler, queue priorityqueue.Queue) (Consumer, error) -} diff --git a/common/kv/interface.go b/common/kv/interface.go deleted file mode 100644 index bc6a07b2..00000000 --- a/common/kv/interface.go +++ /dev/null @@ -1,42 +0,0 @@ -package kv - -import kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" - -// Storage represents single abstract storage. -type Storage interface { - // Has checks if value exists. - Has(keys ...string) (map[string]bool, error) - - // Get loads value content into a byte slice. - Get(key string) ([]byte, error) - - // MGet loads content of multiple values - // Returns the map with existing keys and associated values - MGet(keys ...string) (map[string][]byte, error) - - // Set used to upload item to KV with TTL - // 0 value in TTL means no TTL - Set(items ...*kvv1.Item) error - - // MExpire sets the TTL for multiply keys - MExpire(items ...*kvv1.Item) error - - // TTL return the rest time to live for provided keys - // Not supported for the memcached - TTL(keys ...string) (map[string]string, error) - - // Clear clean the entire storage - Clear() error - - // Delete one or multiple keys. - Delete(keys ...string) error - - // Stop the storage driver - Stop() -} - -// Constructor provides storage based on the config -type Constructor interface { - // KVConstruct provides Storage based on the config key - KVConstruct(key string) (Storage, error) -} diff --git a/common/pubsub/interface.go b/common/pubsub/interface.go deleted file mode 100644 index 5b69d577..00000000 --- a/common/pubsub/interface.go +++ /dev/null @@ -1,56 +0,0 @@ -package pubsub - -import "context" - -/* -This interface is in BETA. It might be changed. -*/ - -// PubSub interface designed to implement on any storage type to provide pub-sub abilities -// Publisher used to receive messages from the PHP app via RPC -// Subscriber should be implemented to subscribe to a topics and provide a connections list per topic -// Reader return next message from the channel -type PubSub interface { - Publisher - Subscriber - Reader -} - -type SubReader interface { - Subscriber - Reader -} - -// Subscriber defines the ability to operate as message passing broker. -// BETA interface -type Subscriber interface { - // Subscribe broker to one or multiple topics. - Subscribe(connectionID string, topics ...string) error - - // Unsubscribe from one or multiply topics - Unsubscribe(connectionID string, topics ...string) error - - // Connections returns all connections associated with the particular topic - Connections(topic string, ret map[string]struct{}) -} - -// Publisher publish one or more messages -// BETA interface -type Publisher interface { - // Publish one or multiple Channel. - Publish(message *Message) error - - // PublishAsync publish message and return immediately - // If error occurred it will be printed into the logger - PublishAsync(message *Message) -} - -// Reader interface should return next message -type Reader interface { - Next(ctx context.Context) (*Message, error) -} - -// Constructor is a special pub-sub interface made to return a constructed PubSub type -type Constructor interface { - PSConstruct(key string) (PubSub, error) -} diff --git a/common/pubsub/psmessage.go b/common/pubsub/psmessage.go deleted file mode 100644 index e33d9284..00000000 --- a/common/pubsub/psmessage.go +++ /dev/null @@ -1,15 +0,0 @@ -package pubsub - -import json "github.com/json-iterator/go" - -// Message represents a single message with payload bound to a particular topic -type Message struct { - // Topic (channel in terms of redis) - Topic string `json:"topic"` - // Payload (on some decode stages might be represented as base64 string) - Payload []byte `json:"payload"` -} - -func (m *Message) MarshalBinary() (data []byte, err error) { - return json.Marshal(m) -} diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 00000000..709df603 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,21 @@ +This is the drawio diagrams showing basic workflows inside RoadRunner 2.0 + +Simple HTTP workflow description: +![alt text](pool_workflow.svg) + +1. Allocate sync workers. When plugin starts (which use workers pool), then it allocates required number of processes + via `cmd.exec` command. + +2. When user send HTTP request to the RR2, HTTP plugin receive it and transfer to the workers pool `Exec/ExecWithContex` +method. And workers pool ask Worker watcher to get free worker. + +3. Workers watcher uses stack data structure under the hood and making POP operation to get first free worker. If there are +no workers in the `stack`, watcher waits for the specified via config (`allocate_timeout`) time. + +4. Stack returns free worker to the watcher. +5. Watcher returns that worker to the `pool`. +6. Pool invoke `Exec/ExecWithTimeout` method on the golang worker with provided request payload. +7. Golang worker send that request to the PHP worker via various set of transports (`pkg/transport` package). +8. PHP worker send back response to the golang worker (or error via stderr). +9. Golang worker return response payload to the pool. +10. Pool process this response and return answer to the user. diff --git a/doc/pool_workflow.drawio b/doc/pool_workflow.drawio new file mode 100644 index 00000000..3f74d0fc --- /dev/null +++ b/doc/pool_workflow.drawio @@ -0,0 +1 @@ +7Vxbd6M4Ev41OWfnoX0Qkrg8Jul09+zOTGc6vSfTjwRkm22MvCB37Pn1IwGydcExsTGO03HOiUEgLqqqr6o+lXwBr2fLj0U0n/5OE5JduE6yvIDvL1wXAMflX6Jl1bQ4jlO3TIo0qduUhrv0byJPbFoXaULKpq1uYpRmLJ3rjTHNcxIzrS0qCvqonzamWaI1zKMJsRru4iizW+/ThE3li3nh5sAnkk6m8tYeRvWRWSTPbl6lnEYJfVSa4M0FvC4oZfXWbHlNMjF8+sB82HJ0/WQFyVmXDn/hCPvfPk++/Ocm+HyPr5Z/TsfvvOYyP6Js0bzyhetl/IJXU37Im4itu3Q25wPiOv8tScG/CvL/BSmZPJHfcnNu865sJUewoIs8IeIZHH74cZoycjePYnH0kSuNuBGbZXwP8M0xzdmHaJZmQl+u6aJIqxv+Qfi4XUVZOsl5e0bG4k4/SMFSLqnLppnR+fr+6qjIN+Snk6XS1IzSR0JnhBUrfkpzFGNcd2mUFrmNFj+qGhA2GjBVpB9ItY0atZusL74RDN9oZPMMOYHQGliScE1tdmnBpnRC8yi72bRe6UO/Oec3KoaqGvD/EcZWjdlFC0Z1cZBlyv4S3Ucubna/KYfeL5tLVzsruZPzF1Z7if1v6sFNv2pPdtSE/2/CHooozUv+0r/TnD4l2pIrSkyeGr0GOaJiQtgT50lTFUP7pKYUJItY+kMHiTahN11vacqfea1hoedpGgYc2SCvUT9q083QnfVz7K9Orm32H8XrcNgS/8pVHgt9p8X3yv7+dfv5ln/ROSn4a9P8l1Zl/C164PCvKZA02ZiLjF/JNtpZmiS1rpIy/Tt6qK4nlGEuXr0aDHx1gd/vpR5PWpKFB2sn0jyFBtNtOPHOGTke0iUZ1HsH6sc7AJ2RDkL8ZsB1lA80VYaOxyU5jrZgS1vgmwbUQnE939e9RT8agAz5hyMHDSZv/7TOxledjdPV2fiaswEv3tnAvn3NQQGGRC7FxvEZ27hU4cNtnJs4QlDHYtiPjUPDxvEogEPZOHTsCEDcae3030RfwTvwdNm7vYgemaJ/B9DIw0MJ347+7iupi8G+j1g8bVGA5+VxaZZd04wWVV84HhMvjnl7yQr6nShHEj98cA7C4+55XQgNlwqbCErN63BjF2pa5x0tqwPWMA/qaPdJ6rSMbrT2uYO7WbejmwV9u9mq62VRRCvlhAbNthp8YOpeE71vtKe+Yr9m7lp2fpllNI6YleSVZwz4YBsS7AP4GHlHiecD3Y0gA1KOGMxDSwnaUecnFDaAGGhi6SmwQ7pvxyPgDyZuS9q/5ilLuSS5N+jTpycRCcatPt2LA/IwHsan+4GOq4HT4tOdFp9uGmB/Pr09bXpjajt5dXgqr36YzD3L6lQ69Y572vtzT6ukXvfEmjm6P/T7AV7fzKu8keerrKk7GA4jSyXcN+nX0oc8/NWl1I/0PYsz9Qfzu/CksA8UzFf40+flcv7pkrmusA/dF4X70MZ974yNHPZm5Dy0xrgnqwZoBAejRVE7SJ/V1Acf/B2GXO3dkiLlwyV076VYN+rbuvfiaoADA82PyKTiQ9cOPtrVwceHdcAONtT/CPwRaqEOZDHTOeKbNO4+8M1xJROxL74NAGZ2DHpH8qR65nJO81IkJxyL+L+p2FyUZ52h9CfeutjCmKHphwj0Xf2qbjAabNrHNuebJYn5pcTXfcqm11wQZMksHTh/nsic+2nlidrmfo7GE21JB88q0jhdkYWs1DuT6R/gID1C4A27PP6OHsdx+a6d0aAzdgnSyPrw+BAiA7t7mi1wDdoCgAHrQNzQEvivAgKjir4cF0R8recIryt0LETbii7EAxb2rMJPqh6uo08cyqmYA9UDADxCoakgCKmsJhhMW+yqoTsWxd8rR8r96Pbp5INCCEyCBLWFEIH7IOpgTxJCACc4df0ItI33tOsENBrS7UhDrqMItdtpaEi3Y1TRtXZzky46h9JhA6QHrqVNRn7wNZ0RuniN+QFwPd25tyYIshZwkAQB2umaNfDlNJqLzXFGlpdiuVtlLEmz+T7OorJMY8NYN9F8r5O++5SV7W+qOOzZVBU5t2G4bDvUmzuSQZPMHzCWcdRvbq38aSMdDQ7RGXYNEbTZJXN94BZtXcyyy5hRNQas4sVbWqZieRE/5YEyRmctQWK9ym/bksEjef7QWBAo1/4pOgMH9ft2sf6a2GuGXuX1tpR0V1TP0UJ0ESWUdcAwkJgAMDFcptAqhoeBLSizPKs3QSG70voLiUldG7IhYV+9YHyj2iN0bMHI9SbDCMZe0maJgV8mnZdEl4TEsJKjK+O5DyN2SFMnZW0hzXjcHBlg1D3PoDzlhMUO1ILHQi1kc0v2mO9MVhRZmEawdhpKCETnJK9bmuwleFIuL6/OXWrq7sXLHQOdQycwjNXxARo27EC+pUW32WKS5rxtXeparVJ+9bjqOUYsGUIbVuUvPAwCq7Kc+q0K6qj0g+QDX0gZFLYz1fYVxOfBCuNthrhXpSuSBnho5ojaS1kNtvn4NBE+7erwM6cYUWfeond/fpiN2ylncM423uP6cBA6ejlzP/OCAIhCaeWjzwfLepQByojsWYYtqxkOWiOMxV8rD1x9OsheHFf61Z+eUliMXtpCI2zzbzVdz990ldEoefXxL0BGjUQb4SOXGgwT/9q0whfy46fje5Br1MMGLUTcoHwPtjHs09ev4ket5k3yeBCU7QImA+qCmLT/HMJDwHHmyYmRZ7DW2FhkIxfDaIjVwoZCdCQheHb1gDXuL2lGa6sQdgZvuGuCdqJZJ+yH+jW6zzo54QgHys+Q6TpmTnkcmQyShL9KBlGadUBY1SRzmpNngW4bHOiAsUu1LMtfc8Z94K9jBCt+S6UKalE391iximczBB9pFuUT3rYllvzJRAbd3SJrjS+PJzM7lrn9JDxm5+nEVy0wLHmdJwQWoF7kxXc3PxNcQ+fm55bhzT8= \ No newline at end of file diff --git a/doc/pool_workflow.svg b/doc/pool_workflow.svg new file mode 100644 index 00000000..1e043eaa --- /dev/null +++ b/doc/pool_workflow.svg @@ -0,0 +1,3 @@ + + +

Simple User request

Simple User request
Give me sync worker (POP operation)
Give me sync worker (POP operation)
3
3
5
5
Get worker
Get worker
Workers Watcher
Workers Watcher
Allocate sync workers
Allocate sync workers
1
1
Initialize
Initialize
Give me SyncWorker
Give me SyncWorker
2
2
6
6
10
10
Send response to the user
Send response to the user
Exec/ExecWithContext
Exec/ExecWithContext
4
4
I have free workers, here you are
I have free workers, here you are
Stack with workers
Stack with workers
9
9
Exec/ExecWithTimeout
Exec/ExecWithTimeout
User request
User...
Send request to the worker
Send request to the worker
Receive response
Receive response
Plugin Initialization
Plugin Initialization
7
7
8
8
Worker
Worker
Exec payload
Exec payload
Reveive response
Reveive response
HTTP plugin
HTTP plugin
Pool
Pool
Golang Worker
Golang Worker
PHP worker
PHP worker
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/events/general.go b/events/general.go new file mode 100755 index 00000000..5cf13e10 --- /dev/null +++ b/events/general.go @@ -0,0 +1,41 @@ +package events + +import ( + "sync" +) + +const UnknownEventType string = "Unknown event type" + +// HandlerImpl helps to broadcast events to multiple listeners. +type HandlerImpl struct { + listeners []Listener + sync.RWMutex // all receivers should be pointers +} + +func NewEventsHandler() Handler { + return &HandlerImpl{listeners: make([]Listener, 0, 2)} +} + +// NumListeners returns number of event listeners. +func (eb *HandlerImpl) NumListeners() int { + eb.Lock() + defer eb.Unlock() + return len(eb.listeners) +} + +// AddListener registers new event listener. +func (eb *HandlerImpl) AddListener(listener Listener) { + eb.Lock() + defer eb.Unlock() + eb.listeners = append(eb.listeners, listener) +} + +// Push broadcast events across all event listeners. +func (eb *HandlerImpl) Push(e interface{}) { + // ReadLock here because we are not changing listeners + eb.RLock() + defer eb.RUnlock() + for k := range eb.listeners { + eb.listeners[k](e) + } +} diff --git a/events/interface.go b/events/interface.go new file mode 100644 index 00000000..7d57e4d0 --- /dev/null +++ b/events/interface.go @@ -0,0 +1,14 @@ +package events + +// Handler interface +type Handler interface { + // NumListeners return number of active listeners + NumListeners() int + // AddListener adds lister to the publisher + AddListener(listener Listener) + // Push pushes event to the listeners + Push(e interface{}) +} + +// Listener .. (type alias) event listener listens for the events produced by worker, worker pool or other service. +type Listener func(event interface{}) diff --git a/events/jobs_events.go b/events/jobs_events.go new file mode 100644 index 00000000..f65ede67 --- /dev/null +++ b/events/jobs_events.go @@ -0,0 +1,81 @@ +package events + +import ( + "time" +) + +const ( + // EventPushOK thrown when new job has been added. JobEvent is passed as context. + EventPushOK J = iota + 12000 + + // EventPushError caused when job can not be registered. + EventPushError + + // EventJobStart thrown when new job received. + EventJobStart + + // EventJobOK thrown when job execution is successfully completed. JobEvent is passed as context. + EventJobOK + + // EventJobError thrown on all job related errors. See JobError as context. + EventJobError + + // EventPipeActive when pipeline has started. + EventPipeActive + + // EventPipeStopped when pipeline has been stopped. + EventPipeStopped + + // EventPipePaused when pipeline has been paused. + EventPipePaused + + // EventPipeError when pipeline specific error happen. + EventPipeError + + // EventDriverReady thrown when broken is ready to accept/serve tasks. + EventDriverReady +) + +type J int64 + +func (ev J) String() string { + switch ev { + case EventPushOK: + return "EventPushOK" + case EventPushError: + return "EventPushError" + case EventJobStart: + return "EventJobStart" + case EventJobOK: + return "EventJobOK" + case EventJobError: + return "EventJobError" + case EventPipeActive: + return "EventPipeActive" + case EventPipeStopped: + return "EventPipeStopped" + case EventPipeError: + return "EventPipeError" + case EventDriverReady: + return "EventDriverReady" + case EventPipePaused: + return "EventPipePaused" + } + return UnknownEventType +} + +// JobEvent represent job event. +type JobEvent struct { + Event J + // String is job id. + ID string + // Pipeline name + Pipeline string + // Associated driver name (amqp, ephemeral, etc) + Driver string + // Error for the jobs/pipes errors + Error error + // event timings + Start time.Time + Elapsed time.Duration +} diff --git a/events/pool_events.go b/events/pool_events.go new file mode 100644 index 00000000..eb28df6a --- /dev/null +++ b/events/pool_events.go @@ -0,0 +1,71 @@ +package events + +const ( + // EventWorkerConstruct thrown when new worker is spawned. + EventWorkerConstruct P = iota + 10000 + + // EventWorkerDestruct thrown after worker destruction. + EventWorkerDestruct + + // EventSupervisorError triggered when supervisor can not complete work. + EventSupervisorError + + // EventWorkerProcessExit triggered on process wait exit + EventWorkerProcessExit + + // EventNoFreeWorkers triggered when there are no free workers in the stack and timeout for worker allocate elapsed + EventNoFreeWorkers + + // EventMaxMemory caused when worker consumes more memory than allowed. + EventMaxMemory + + // EventTTL thrown when worker is removed due TTL being reached. TTL defines maximum time worker is allowed to live (seconds) + EventTTL + + // EventIdleTTL triggered when worker spends too much time at rest. + EventIdleTTL + + // EventExecTTL triggered when worker spends too much time doing the task (max_execution_time). + EventExecTTL + + // EventPoolRestart triggered when pool restart is needed + EventPoolRestart +) + +type P int64 + +func (ev P) String() string { + switch ev { + case EventWorkerProcessExit: + return "EventWorkerProcessExit" + case EventWorkerConstruct: + return "EventWorkerConstruct" + case EventWorkerDestruct: + return "EventWorkerDestruct" + case EventSupervisorError: + return "EventSupervisorError" + case EventNoFreeWorkers: + return "EventNoFreeWorkers" + case EventMaxMemory: + return "EventMaxMemory" + case EventTTL: + return "EventTTL" + case EventIdleTTL: + return "EventIdleTTL" + case EventExecTTL: + return "EventExecTTL" + case EventPoolRestart: + return "EventPoolRestart" + } + return UnknownEventType +} + +// PoolEvent triggered by pool on different events. Pool as also trigger WorkerEvent in case of log. +type PoolEvent struct { + // Event type, see below. + Event P + + // Payload depends on event type, typically it's worker or error. + Payload interface{} + Error error +} diff --git a/events/worker_events.go b/events/worker_events.go new file mode 100644 index 00000000..39c38e57 --- /dev/null +++ b/events/worker_events.go @@ -0,0 +1,36 @@ +package events + +const ( + // EventWorkerError triggered after WorkerProcess. Except payload to be error. + EventWorkerError W = iota + 11000 + // EventWorkerLog triggered on every write to WorkerProcess StdErr pipe (batched). Except payload to be []byte string. + EventWorkerLog + // EventWorkerStderr is the worker standard error output + EventWorkerStderr +) + +type W int64 + +func (ev W) String() string { + switch ev { + case EventWorkerError: + return "EventWorkerError" + case EventWorkerLog: + return "EventWorkerLog" + case EventWorkerStderr: + return "EventWorkerStderr" + } + return UnknownEventType +} + +// WorkerEvent wraps worker events. +type WorkerEvent struct { + // Event id, see below. + Event W + + // Worker triggered the event. + Worker interface{} + + // Event specific payload. + Payload interface{} +} diff --git a/go.mod b/go.mod index 85421a96..892d22bb 100644 --- a/go.mod +++ b/go.mod @@ -3,91 +3,29 @@ module github.com/spiral/roadrunner/v2 go 1.17 require ( - github.com/Shopify/toxiproxy v2.1.4+incompatible - // ========= AWS SDK v2 - github.com/aws/aws-sdk-go-v2 v1.9.0 - github.com/aws/aws-sdk-go-v2/config v1.7.0 - github.com/aws/aws-sdk-go-v2/credentials v1.4.0 - github.com/aws/aws-sdk-go-v2/service/sqs v1.8.0 - github.com/aws/smithy-go v1.8.0 - // ===================== - github.com/beanstalkd/go-beanstalk v0.1.0 - github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b - github.com/cenkalti/backoff/v4 v4.1.1 - github.com/fasthttp/websocket v1.4.3 - github.com/fatih/color v1.12.0 - github.com/go-redis/redis/v8 v8.11.3 - github.com/gofiber/fiber/v2 v2.18.0 - github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 - github.com/json-iterator/go v1.1.11 - github.com/klauspost/compress v1.13.5 - github.com/prometheus/client_golang v1.11.0 - github.com/rabbitmq/amqp091-go v0.0.0-20210823000215-c428a6150891 + github.com/json-iterator/go v1.1.12 github.com/shirou/gopsutil v3.21.8+incompatible - github.com/spf13/viper v1.8.1 - // SPIRAL ==== - github.com/spiral/endure v1.0.4 github.com/spiral/errors v1.0.12 github.com/spiral/goridge/v3 v3.2.1 - // =========== github.com/stretchr/testify v1.7.0 github.com/valyala/tcplisten v1.0.0 - github.com/yookoala/gofast v0.6.0 - go.etcd.io/bbolt v1.3.6 go.uber.org/multierr v1.7.0 - go.uber.org/zap v1.19.0 - golang.org/x/net v0.0.0-20210825183410-e898025ed96a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e - google.golang.org/protobuf v1.27.1 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) require ( github.com/StackExchange/wmi v1.2.1 // indirect - github.com/andybalholm/brotli v1.0.3 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.4.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.7.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-ole/go-ole v1.2.5 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/magiconair/properties v1.8.5 // indirect - github.com/mattn/go-colorable v0.1.8 // indirect - github.com/mattn/go-isatty v0.0.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/kr/pretty v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/pelletier/go-toml v1.9.3 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.30.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/subosito/gotenv v1.2.0 // indirect github.com/tklauser/go-sysconf v0.3.9 // indirect github.com/tklauser/numcpus v0.3.0 // indirect - github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasthttp v1.29.0 // indirect - github.com/vmihailenco/msgpack/v5 v5.3.4 // indirect - github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.5 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index e144019b..3a25bdb1 100644 --- a/go.sum +++ b/go.sum @@ -1,834 +1,72 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.3 h1:fpcw+r1N1h0Poc1F/pHbW40cUm/lMEQslZtCkBQ0UnM= -github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go-v2 v1.9.0 h1:+S+dSqQCN3MSU5vJRu1HqHrq00cJn6heIMU7X9hcsoo= -github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2/config v1.7.0 h1:J2cZ7qe+3IpqBEXnHUrFrOjoB9BlsXg7j53vxcl5IVg= -github.com/aws/aws-sdk-go-v2/config v1.7.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY= -github.com/aws/aws-sdk-go-v2/credentials v1.4.0 h1:kmvesfjY861FzlCU9mvAfe01D9aeXcG2ZuC+k9F2YLM= -github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0 h1:OxTAgH8Y4BXHD6PGCJ8DHx2kaZPCQfSTqmDsdRZFezE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2 h1:d95cddM3yTm4qffj3P6EnP+TzX1SSkWaQypXSgT/hpA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0 h1:VNJ5NLBteVXEwE2F1zEXVmyIH58mZ6kIQGJoC7C+vkg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= -github.com/aws/aws-sdk-go-v2/service/sqs v1.8.0 h1:BI05Jbkaqp5IDxiobr3B59mX07lfpLJDv5NwAEx3wSs= -github.com/aws/aws-sdk-go-v2/service/sqs v1.8.0/go.mod h1:BXA1CVaEd9TBOQ8G2ke7lMWdVggAeh35+h2HDO50z7s= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.0 h1:sHXMIKYS6YiLPzmKSvDpPmOpJDHxmAUgbiF49YNVztg= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.0 h1:1at4e5P+lvHNl2nUktdM2/v+rpICg/QSEr9TO/uW9vU= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM= -github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/beanstalkd/go-beanstalk v0.1.0 h1:IiNwYbAoVBDs5xEOmleGoX+DRD3Moz99EpATbl8672w= -github.com/beanstalkd/go-beanstalk v0.1.0/go.mod h1:/G8YTyChOtpOArwLTQPY1CHB+i212+av35bkPXXj56Y= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0= -github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fasthttp/websocket v1.4.3 h1:qjhRJ/rTy4KB8oBxljEC00SDt6HUY9jLRfM601SUdS4= -github.com/fasthttp/websocket v1.4.3/go.mod h1:5r4oKssgS7W6Zn6mPWap3NWzNPJNzUUh3baWTOhcYQk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= -github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-redis/redis/v8 v8.11.3 h1:GCjoYp8c+yQTJfc0n69iwSiHjvuAdruxl7elnZCxgt8= -github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc= -github.com/go-restit/lzjson v0.0.0-20161206095556-efe3c53acc68/go.mod h1:7vXSKQt83WmbPeyVjCfNT9YDJ5BUFmcwFsEjI9SCvYM= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofiber/fiber/v2 v2.18.0 h1:xCWYSVoTNibHpzfciPwUSZGiTyTpTXYchCwynuJU09s= -github.com/gofiber/fiber/v2 v2.18.0/go.mod h1:/LdZHMUXZvTTo7gU4+b1hclqCAdoQphNQ9bi9gutPyI= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= -github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= -github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rabbitmq/amqp091-go v0.0.0-20210823000215-c428a6150891 h1:13nv5f/LNJxNpvpYm/u0NqrlFebon342f9Xu9GpklKc= -github.com/rabbitmq/amqp091-go v0.0.0-20210823000215-c428a6150891/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/savsgio/gotils v0.0.0-20200608150037-a5f6f5aef16c/go.mod h1:TWNAOTaVzGOXq8RbEvHnhzA/A2sLZzgn0m6URjnukY8= -github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 h1:N3Af8f13ooDKcIhsmFT7Z05CStZWu4C7Md0uDEy4q6o= -github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873/go.mod h1:dmPawKuiAeG/aFYVs2i+Dyosoo7FNcm+Pi8iK6ZUrX8= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.1.1 h1:T/YLemO5Yp7KPzS+lVtu+WsHn8yoSwTfItdAd1r3cck= -github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/spiral/endure v1.0.4 h1:qpProWUVuu6fRceMnIHs9SkpkjlzAxPl7UxSH6zUPDo= -github.com/spiral/endure v1.0.4/go.mod h1:I9IoSCMtqXVmXX0TQ3Gu72Z1uIDVNKlhKXmcCoqnR/w= github.com/spiral/errors v1.0.12 h1:38Waf8ZL/Xvxg4HTYGmrUbvi7TCHivmuatNQZlBhQ8s= github.com/spiral/errors v1.0.12/go.mod h1:j5UReqxZxfkwXkI9mFY87VhEXcXmSg7kAk5Sswy1eEA= github.com/spiral/goridge/v3 v3.2.1 h1:5IJofcvWYjAy+X5XevOhwf/8F0i0Bu/baPsBGiSgqzU= github.com/spiral/goridge/v3 v3.2.1/go.mod h1:jDHXTORSxchJYCv2jG4vtZojsa+4JJyXmfdPefOpJ3c= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.14.0/go.mod h1:ol1PCaL0dX20wC0htZ7sYCsvCYmrouYra0zHzaclZhE= -github.com/valyala/fasthttp v1.29.0 h1:F5GKpytwFk5OhCuRh6H+d4vZAcEeNAwPTdwQnm6IERY= -github.com/valyala/fasthttp v1.29.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc= github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= -github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/yookoala/gofast v0.6.0 h1:E5x2acfUD7GkzCf8bmIMwnV10VxDy5tUCHc5LGhluwc= -github.com/yookoala/gofast v0.6.0/go.mod h1:OJU201Q6HCaE1cASckaTbMm3KB6e0cZxK0mgqfwOKvQ= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200908211811-12e1bf57a112/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 h1:7ZDGnxgHAMw7thfC5bEos0RDAccZKxioiWBhfIe+tvw= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/payload/payload.go b/payload/payload.go new file mode 100755 index 00000000..e1e45ac1 --- /dev/null +++ b/payload/payload.go @@ -0,0 +1,20 @@ +package payload + +import ( + "github.com/spiral/roadrunner/v2/utils" +) + +// Payload carries binary header and body to stack and +// back to the server. +type Payload struct { + // Context represent payload context, might be omitted. + Context []byte + + // body contains binary payload to be processed by WorkerProcess. + Body []byte +} + +// String returns payload body as string +func (p *Payload) String() string { + return utils.AsString(p.Body) +} diff --git a/pkg/bst/bst.go b/pkg/bst/bst.go deleted file mode 100644 index dab9346c..00000000 --- a/pkg/bst/bst.go +++ /dev/null @@ -1,152 +0,0 @@ -package bst - -// BST ... -type BST struct { - // registered topic, not unique - topic string - // associated connections with the topic - uuids map[string]struct{} - - // left and right subtrees - left *BST - right *BST -} - -func NewBST() Storage { - return &BST{ - uuids: make(map[string]struct{}, 10), - } -} - -// Insert uuid to the topic -func (b *BST) Insert(uuid string, topic string) { - curr := b - - for { - if topic == curr.topic { - curr.uuids[uuid] = struct{}{} - return - } - // if topic less than curr topic - if topic < curr.topic { - if curr.left == nil { - curr.left = &BST{ - topic: topic, - uuids: map[string]struct{}{uuid: {}}, - } - return - } - // move forward - curr = curr.left - } else { - if curr.right == nil { - curr.right = &BST{ - topic: topic, - uuids: map[string]struct{}{uuid: {}}, - } - return - } - - curr = curr.right - } - } -} - -func (b *BST) Contains(topic string) bool { - curr := b - for curr != nil { - switch { - case topic < curr.topic: - curr = curr.left - case topic > curr.topic: - curr = curr.right - case topic == curr.topic: - return true - } - } - - return false -} - -func (b *BST) Get(topic string) map[string]struct{} { - curr := b - for curr != nil { - switch { - case topic < curr.topic: - curr = curr.left - case topic > curr.topic: - curr = curr.right - case topic == curr.topic: - return curr.uuids - } - } - - return nil -} - -func (b *BST) Remove(uuid string, topic string) { - b.removeHelper(uuid, topic, nil) -} - -func (b *BST) removeHelper(uuid string, topic string, parent *BST) { - curr := b - for curr != nil { - if topic < curr.topic { //nolint:gocritic - parent = curr - curr = curr.left - } else if topic > curr.topic { - parent = curr - curr = curr.right - } else { - // if more than 1 topic - remove only topic, do not remove the whole vertex - if len(curr.uuids) > 1 { - if _, ok := curr.uuids[uuid]; ok { - delete(curr.uuids, uuid) - return - } - } - - if curr.left != nil && curr.right != nil { //nolint:gocritic - curr.topic, curr.uuids = curr.right.traverseForMinString() - curr.right.removeHelper(curr.topic, uuid, curr) - } else if parent == nil { - if curr.left != nil { //nolint:gocritic - curr.topic = curr.left.topic - curr.uuids = curr.left.uuids - - curr.right = curr.left.right - curr.left = curr.left.left - } else if curr.right != nil { - curr.topic = curr.right.topic - curr.uuids = curr.right.uuids - - curr.left = curr.right.left - curr.right = curr.right.right - } else { //nolint:staticcheck - // single node tree - } - } else if parent.left == curr { - if curr.left != nil { - parent.left = curr.left - } else { - parent.left = curr.right - } - } else if parent.right == curr { - if curr.left != nil { - parent.right = curr.left - } else { - parent.right = curr.right - } - } - break - } - } -} - -//go:inline -func (b *BST) traverseForMinString() (string, map[string]struct{}) { - if b.left == nil { - return b.topic, b.uuids - } - return b.left.traverseForMinString() -} diff --git a/pkg/bst/bst_test.go b/pkg/bst/bst_test.go deleted file mode 100644 index 2271508c..00000000 --- a/pkg/bst/bst_test.go +++ /dev/null @@ -1,325 +0,0 @@ -package bst - -import ( - "math/rand" - "testing" - "time" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" -) - -const predifined = "chat-1-2" - -func TestNewBST(t *testing.T) { - // create a new bst - g := NewBST() - - for i := 0; i < 100; i++ { - g.Insert(uuid.NewString(), "comments") - } - - for i := 0; i < 100; i++ { - g.Insert(uuid.NewString(), "comments2") - } - - for i := 0; i < 100; i++ { - g.Insert(uuid.NewString(), "comments3") - } - - // should be 100 - exist := g.Get("comments") - assert.Len(t, exist, 100) - - // should be 100 - exist2 := g.Get("comments2") - assert.Len(t, exist2, 100) - - // should be 100 - exist3 := g.Get("comments3") - assert.Len(t, exist3, 100) -} - -func BenchmarkGraph(b *testing.B) { - g := NewBST() - - for i := 0; i < 1000; i++ { - uid := uuid.New().String() - g.Insert(uuid.NewString(), uid) - } - - g.Insert(uuid.NewString(), predifined) - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - exist := g.Get(predifined) - _ = exist - } -} - -func BenchmarkBigSearch(b *testing.B) { - g1 := NewBST() - g2 := NewBST() - g3 := NewBST() - - predefinedSlice := make([]string, 0, 1000) - for i := 0; i < 1000; i++ { - predefinedSlice = append(predefinedSlice, uuid.NewString()) - } - if predefinedSlice == nil { - b.FailNow() - } - - for i := 0; i < 1000; i++ { - g1.Insert(uuid.NewString(), uuid.NewString()) - } - for i := 0; i < 1000; i++ { - g2.Insert(uuid.NewString(), uuid.NewString()) - } - for i := 0; i < 1000; i++ { - g3.Insert(uuid.NewString(), uuid.NewString()) - } - - for i := 0; i < 333; i++ { - g1.Insert(uuid.NewString(), predefinedSlice[i]) - } - - for i := 0; i < 333; i++ { - g2.Insert(uuid.NewString(), predefinedSlice[333+i]) - } - - for i := 0; i < 333; i++ { - g3.Insert(uuid.NewString(), predefinedSlice[666+i]) - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - for i := 0; i < 333; i++ { - exist := g1.Get(predefinedSlice[i]) - _ = exist - } - } - for i := 0; i < b.N; i++ { - for i := 0; i < 333; i++ { - exist := g2.Get(predefinedSlice[333+i]) - _ = exist - } - } - for i := 0; i < b.N; i++ { - for i := 0; i < 333; i++ { - exist := g3.Get(predefinedSlice[666+i]) - _ = exist - } - } -} - -func BenchmarkBigSearchWithRemoves(b *testing.B) { - g1 := NewBST() - g2 := NewBST() - g3 := NewBST() - - predefinedSlice := make([]string, 0, 1000) - for i := 0; i < 1000; i++ { - predefinedSlice = append(predefinedSlice, uuid.NewString()) - } - if predefinedSlice == nil { - b.FailNow() - } - - for i := 0; i < 1000; i++ { - g1.Insert(uuid.NewString(), uuid.NewString()) - } - for i := 0; i < 1000; i++ { - g2.Insert(uuid.NewString(), uuid.NewString()) - } - for i := 0; i < 1000; i++ { - g3.Insert(uuid.NewString(), uuid.NewString()) - } - - for i := 0; i < 333; i++ { - g1.Insert(uuid.NewString(), predefinedSlice[i]) - } - - for i := 0; i < 333; i++ { - g2.Insert(uuid.NewString(), predefinedSlice[333+i]) - } - - for i := 0; i < 333; i++ { - g3.Insert(uuid.NewString(), predefinedSlice[666+i]) - } - - go func() { - tt := time.NewTicker(time.Millisecond) - for { - select { - case <-tt.C: - num := rand.Intn(333) //nolint:gosec - values := g1.Get(predefinedSlice[num]) - for k := range values { - g1.Remove(k, predefinedSlice[num]) - } - } - } - }() - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - for i := 0; i < 333; i++ { - exist := g1.Get(predefinedSlice[i]) - _ = exist - } - } - for i := 0; i < b.N; i++ { - for i := 0; i < 333; i++ { - exist := g2.Get(predefinedSlice[333+i]) - _ = exist - } - } - for i := 0; i < b.N; i++ { - for i := 0; i < 333; i++ { - exist := g3.Get(predefinedSlice[666+i]) - _ = exist - } - } -} - -func TestGraph(t *testing.T) { - g := NewBST() - - for i := 0; i < 1000; i++ { - uid := uuid.New().String() - g.Insert(uuid.NewString(), uid) - } - - g.Insert(uuid.NewString(), predifined) - - exist := g.Get(predifined) - assert.NotNil(t, exist) - assert.Len(t, exist, 1) -} - -func TestTreeConcurrentContains(t *testing.T) { - g := NewBST() - - key1 := uuid.NewString() - key2 := uuid.NewString() - key3 := uuid.NewString() - key4 := uuid.NewString() - key5 := uuid.NewString() - - g.Insert(key1, predifined) - g.Insert(key2, predifined) - g.Insert(key3, predifined) - g.Insert(key4, predifined) - g.Insert(key5, predifined) - - for i := 0; i < 100; i++ { - go func() { - _ = g.Get(predifined) - }() - - go func() { - _ = g.Get(predifined) - }() - - go func() { - _ = g.Get(predifined) - }() - - go func() { - _ = g.Get(predifined) - }() - } - - time.Sleep(time.Second * 2) - - exist := g.Get(predifined) - assert.NotNil(t, exist) - assert.Len(t, exist, 5) -} - -func TestGraphRemove(t *testing.T) { - g := NewBST() - - key1 := uuid.NewString() - key2 := uuid.NewString() - key3 := uuid.NewString() - key4 := uuid.NewString() - key5 := uuid.NewString() - - g.Insert(key1, predifined) - g.Insert(key2, predifined) - g.Insert(key3, predifined) - g.Insert(key4, predifined) - g.Insert(key5, predifined) - - exist := g.Get(predifined) - assert.NotNil(t, exist) - assert.Len(t, exist, 5) - - g.Remove(key1, predifined) - - exist = g.Get(predifined) - assert.NotNil(t, exist) - assert.Len(t, exist, 4) -} - -func TestBigSearch(t *testing.T) { - g1 := NewBST() - g2 := NewBST() - g3 := NewBST() - - predefinedSlice := make([]string, 0, 1000) - for i := 0; i < 1000; i++ { - predefinedSlice = append(predefinedSlice, uuid.NewString()) - } - if predefinedSlice == nil { - t.FailNow() - } - - for i := 0; i < 1000; i++ { - g1.Insert(uuid.NewString(), uuid.NewString()) - } - for i := 0; i < 1000; i++ { - g2.Insert(uuid.NewString(), uuid.NewString()) - } - for i := 0; i < 1000; i++ { - g3.Insert(uuid.NewString(), uuid.NewString()) - } - - for i := 0; i < 333; i++ { - g1.Insert(uuid.NewString(), predefinedSlice[i]) - } - - for i := 0; i < 333; i++ { - g2.Insert(uuid.NewString(), predefinedSlice[333+i]) - } - - for i := 0; i < 333; i++ { - g3.Insert(uuid.NewString(), predefinedSlice[666+i]) - } - - for i := 0; i < 333; i++ { - exist := g1.Get(predefinedSlice[i]) - assert.NotNil(t, exist) - assert.Len(t, exist, 1) - } - - for i := 0; i < 333; i++ { - exist := g2.Get(predefinedSlice[333+i]) - assert.NotNil(t, exist) - assert.Len(t, exist, 1) - } - - for i := 0; i < 333; i++ { - exist := g3.Get(predefinedSlice[666+i]) - assert.NotNil(t, exist) - assert.Len(t, exist, 1) - } -} diff --git a/pkg/bst/doc.go b/pkg/bst/doc.go deleted file mode 100644 index abb7e6e9..00000000 --- a/pkg/bst/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -package bst - -/* -Binary search tree for the pubsub - -The vertex may have one or multiply topics associated with the single websocket connection UUID -*/ diff --git a/pkg/bst/interface.go b/pkg/bst/interface.go deleted file mode 100644 index 95b03e11..00000000 --- a/pkg/bst/interface.go +++ /dev/null @@ -1,13 +0,0 @@ -package bst - -// Storage is general in-memory BST storage implementation -type Storage interface { - // Insert inserts to a vertex with topic ident connection uuid - Insert(uuid string, topic string) - // Remove removes uuid from topic, if the uuid is single for a topic, whole vertex will be removed - Remove(uuid, topic string) - // Get will return all connections associated with the topic - Get(topic string) map[string]struct{} - // Contains checks if the BST contains a topic - Contains(topic string) bool -} diff --git a/pkg/doc/README.md b/pkg/doc/README.md deleted file mode 100644 index 709df603..00000000 --- a/pkg/doc/README.md +++ /dev/null @@ -1,21 +0,0 @@ -This is the drawio diagrams showing basic workflows inside RoadRunner 2.0 - -Simple HTTP workflow description: -![alt text](pool_workflow.svg) - -1. Allocate sync workers. When plugin starts (which use workers pool), then it allocates required number of processes - via `cmd.exec` command. - -2. When user send HTTP request to the RR2, HTTP plugin receive it and transfer to the workers pool `Exec/ExecWithContex` -method. And workers pool ask Worker watcher to get free worker. - -3. Workers watcher uses stack data structure under the hood and making POP operation to get first free worker. If there are -no workers in the `stack`, watcher waits for the specified via config (`allocate_timeout`) time. - -4. Stack returns free worker to the watcher. -5. Watcher returns that worker to the `pool`. -6. Pool invoke `Exec/ExecWithTimeout` method on the golang worker with provided request payload. -7. Golang worker send that request to the PHP worker via various set of transports (`pkg/transport` package). -8. PHP worker send back response to the golang worker (or error via stderr). -9. Golang worker return response payload to the pool. -10. Pool process this response and return answer to the user. diff --git a/pkg/doc/pool_workflow.drawio b/pkg/doc/pool_workflow.drawio deleted file mode 100644 index 3f74d0fc..00000000 --- a/pkg/doc/pool_workflow.drawio +++ /dev/null @@ -1 +0,0 @@ -7Vxbd6M4Ev41OWfnoX0Qkrg8Jul09+zOTGc6vSfTjwRkm22MvCB37Pn1IwGydcExsTGO03HOiUEgLqqqr6o+lXwBr2fLj0U0n/5OE5JduE6yvIDvL1wXAMflX6Jl1bQ4jlO3TIo0qduUhrv0byJPbFoXaULKpq1uYpRmLJ3rjTHNcxIzrS0qCvqonzamWaI1zKMJsRru4iizW+/ThE3li3nh5sAnkk6m8tYeRvWRWSTPbl6lnEYJfVSa4M0FvC4oZfXWbHlNMjF8+sB82HJ0/WQFyVmXDn/hCPvfPk++/Ocm+HyPr5Z/TsfvvOYyP6Js0bzyhetl/IJXU37Im4itu3Q25wPiOv8tScG/CvL/BSmZPJHfcnNu865sJUewoIs8IeIZHH74cZoycjePYnH0kSuNuBGbZXwP8M0xzdmHaJZmQl+u6aJIqxv+Qfi4XUVZOsl5e0bG4k4/SMFSLqnLppnR+fr+6qjIN+Snk6XS1IzSR0JnhBUrfkpzFGNcd2mUFrmNFj+qGhA2GjBVpB9ItY0atZusL74RDN9oZPMMOYHQGliScE1tdmnBpnRC8yi72bRe6UO/Oec3KoaqGvD/EcZWjdlFC0Z1cZBlyv4S3Ucubna/KYfeL5tLVzsruZPzF1Z7if1v6sFNv2pPdtSE/2/CHooozUv+0r/TnD4l2pIrSkyeGr0GOaJiQtgT50lTFUP7pKYUJItY+kMHiTahN11vacqfea1hoedpGgYc2SCvUT9q083QnfVz7K9Orm32H8XrcNgS/8pVHgt9p8X3yv7+dfv5ln/ROSn4a9P8l1Zl/C164PCvKZA02ZiLjF/JNtpZmiS1rpIy/Tt6qK4nlGEuXr0aDHx1gd/vpR5PWpKFB2sn0jyFBtNtOPHOGTke0iUZ1HsH6sc7AJ2RDkL8ZsB1lA80VYaOxyU5jrZgS1vgmwbUQnE939e9RT8agAz5hyMHDSZv/7TOxledjdPV2fiaswEv3tnAvn3NQQGGRC7FxvEZ27hU4cNtnJs4QlDHYtiPjUPDxvEogEPZOHTsCEDcae3030RfwTvwdNm7vYgemaJ/B9DIw0MJ347+7iupi8G+j1g8bVGA5+VxaZZd04wWVV84HhMvjnl7yQr6nShHEj98cA7C4+55XQgNlwqbCErN63BjF2pa5x0tqwPWMA/qaPdJ6rSMbrT2uYO7WbejmwV9u9mq62VRRCvlhAbNthp8YOpeE71vtKe+Yr9m7lp2fpllNI6YleSVZwz4YBsS7AP4GHlHiecD3Y0gA1KOGMxDSwnaUecnFDaAGGhi6SmwQ7pvxyPgDyZuS9q/5ilLuSS5N+jTpycRCcatPt2LA/IwHsan+4GOq4HT4tOdFp9uGmB/Pr09bXpjajt5dXgqr36YzD3L6lQ69Y572vtzT6ukXvfEmjm6P/T7AV7fzKu8keerrKk7GA4jSyXcN+nX0oc8/NWl1I/0PYsz9Qfzu/CksA8UzFf40+flcv7pkrmusA/dF4X70MZ974yNHPZm5Dy0xrgnqwZoBAejRVE7SJ/V1Acf/B2GXO3dkiLlwyV076VYN+rbuvfiaoADA82PyKTiQ9cOPtrVwceHdcAONtT/CPwRaqEOZDHTOeKbNO4+8M1xJROxL74NAGZ2DHpH8qR65nJO81IkJxyL+L+p2FyUZ52h9CfeutjCmKHphwj0Xf2qbjAabNrHNuebJYn5pcTXfcqm11wQZMksHTh/nsic+2nlidrmfo7GE21JB88q0jhdkYWs1DuT6R/gID1C4A27PP6OHsdx+a6d0aAzdgnSyPrw+BAiA7t7mi1wDdoCgAHrQNzQEvivAgKjir4cF0R8recIryt0LETbii7EAxb2rMJPqh6uo08cyqmYA9UDADxCoakgCKmsJhhMW+yqoTsWxd8rR8r96Pbp5INCCEyCBLWFEIH7IOpgTxJCACc4df0ItI33tOsENBrS7UhDrqMItdtpaEi3Y1TRtXZzky46h9JhA6QHrqVNRn7wNZ0RuniN+QFwPd25tyYIshZwkAQB2umaNfDlNJqLzXFGlpdiuVtlLEmz+T7OorJMY8NYN9F8r5O++5SV7W+qOOzZVBU5t2G4bDvUmzuSQZPMHzCWcdRvbq38aSMdDQ7RGXYNEbTZJXN94BZtXcyyy5hRNQas4sVbWqZieRE/5YEyRmctQWK9ym/bksEjef7QWBAo1/4pOgMH9ft2sf6a2GuGXuX1tpR0V1TP0UJ0ESWUdcAwkJgAMDFcptAqhoeBLSizPKs3QSG70voLiUldG7IhYV+9YHyj2iN0bMHI9SbDCMZe0maJgV8mnZdEl4TEsJKjK+O5DyN2SFMnZW0hzXjcHBlg1D3PoDzlhMUO1ILHQi1kc0v2mO9MVhRZmEawdhpKCETnJK9bmuwleFIuL6/OXWrq7sXLHQOdQycwjNXxARo27EC+pUW32WKS5rxtXeparVJ+9bjqOUYsGUIbVuUvPAwCq7Kc+q0K6qj0g+QDX0gZFLYz1fYVxOfBCuNthrhXpSuSBnho5ojaS1kNtvn4NBE+7erwM6cYUWfeond/fpiN2ylncM423uP6cBA6ejlzP/OCAIhCaeWjzwfLepQByojsWYYtqxkOWiOMxV8rD1x9OsheHFf61Z+eUliMXtpCI2zzbzVdz990ldEoefXxL0BGjUQb4SOXGgwT/9q0whfy46fje5Br1MMGLUTcoHwPtjHs09ev4ket5k3yeBCU7QImA+qCmLT/HMJDwHHmyYmRZ7DW2FhkIxfDaIjVwoZCdCQheHb1gDXuL2lGa6sQdgZvuGuCdqJZJ+yH+jW6zzo54QgHys+Q6TpmTnkcmQyShL9KBlGadUBY1SRzmpNngW4bHOiAsUu1LMtfc8Z94K9jBCt+S6UKalE391iximczBB9pFuUT3rYllvzJRAbd3SJrjS+PJzM7lrn9JDxm5+nEVy0wLHmdJwQWoF7kxXc3PxNcQ+fm55bhzT8= \ No newline at end of file diff --git a/pkg/doc/pool_workflow.svg b/pkg/doc/pool_workflow.svg deleted file mode 100644 index 1e043eaa..00000000 --- a/pkg/doc/pool_workflow.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -

Simple User request

Simple User request
Give me sync worker (POP operation)
Give me sync worker (POP operation)
3
3
5
5
Get worker
Get worker
Workers Watcher
Workers Watcher
Allocate sync workers
Allocate sync workers
1
1
Initialize
Initialize
Give me SyncWorker
Give me SyncWorker
2
2
6
6
10
10
Send response to the user
Send response to the user
Exec/ExecWithContext
Exec/ExecWithContext
4
4
I have free workers, here you are
I have free workers, here you are
Stack with workers
Stack with workers
9
9
Exec/ExecWithTimeout
Exec/ExecWithTimeout
User request
User...
Send request to the worker
Send request to the worker
Receive response
Receive response
Plugin Initialization
Plugin Initialization
7
7
8
8
Worker
Worker
Exec payload
Exec payload
Reveive response
Reveive response
HTTP plugin
HTTP plugin
Pool
Pool
Golang Worker
Golang Worker
PHP worker
PHP worker
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/pkg/events/general.go b/pkg/events/general.go deleted file mode 100755 index 5cf13e10..00000000 --- a/pkg/events/general.go +++ /dev/null @@ -1,41 +0,0 @@ -package events - -import ( - "sync" -) - -const UnknownEventType string = "Unknown event type" - -// HandlerImpl helps to broadcast events to multiple listeners. -type HandlerImpl struct { - listeners []Listener - sync.RWMutex // all receivers should be pointers -} - -func NewEventsHandler() Handler { - return &HandlerImpl{listeners: make([]Listener, 0, 2)} -} - -// NumListeners returns number of event listeners. -func (eb *HandlerImpl) NumListeners() int { - eb.Lock() - defer eb.Unlock() - return len(eb.listeners) -} - -// AddListener registers new event listener. -func (eb *HandlerImpl) AddListener(listener Listener) { - eb.Lock() - defer eb.Unlock() - eb.listeners = append(eb.listeners, listener) -} - -// Push broadcast events across all event listeners. -func (eb *HandlerImpl) Push(e interface{}) { - // ReadLock here because we are not changing listeners - eb.RLock() - defer eb.RUnlock() - for k := range eb.listeners { - eb.listeners[k](e) - } -} diff --git a/pkg/events/interface.go b/pkg/events/interface.go deleted file mode 100644 index 7d57e4d0..00000000 --- a/pkg/events/interface.go +++ /dev/null @@ -1,14 +0,0 @@ -package events - -// Handler interface -type Handler interface { - // NumListeners return number of active listeners - NumListeners() int - // AddListener adds lister to the publisher - AddListener(listener Listener) - // Push pushes event to the listeners - Push(e interface{}) -} - -// Listener .. (type alias) event listener listens for the events produced by worker, worker pool or other service. -type Listener func(event interface{}) diff --git a/pkg/events/jobs_events.go b/pkg/events/jobs_events.go deleted file mode 100644 index f65ede67..00000000 --- a/pkg/events/jobs_events.go +++ /dev/null @@ -1,81 +0,0 @@ -package events - -import ( - "time" -) - -const ( - // EventPushOK thrown when new job has been added. JobEvent is passed as context. - EventPushOK J = iota + 12000 - - // EventPushError caused when job can not be registered. - EventPushError - - // EventJobStart thrown when new job received. - EventJobStart - - // EventJobOK thrown when job execution is successfully completed. JobEvent is passed as context. - EventJobOK - - // EventJobError thrown on all job related errors. See JobError as context. - EventJobError - - // EventPipeActive when pipeline has started. - EventPipeActive - - // EventPipeStopped when pipeline has been stopped. - EventPipeStopped - - // EventPipePaused when pipeline has been paused. - EventPipePaused - - // EventPipeError when pipeline specific error happen. - EventPipeError - - // EventDriverReady thrown when broken is ready to accept/serve tasks. - EventDriverReady -) - -type J int64 - -func (ev J) String() string { - switch ev { - case EventPushOK: - return "EventPushOK" - case EventPushError: - return "EventPushError" - case EventJobStart: - return "EventJobStart" - case EventJobOK: - return "EventJobOK" - case EventJobError: - return "EventJobError" - case EventPipeActive: - return "EventPipeActive" - case EventPipeStopped: - return "EventPipeStopped" - case EventPipeError: - return "EventPipeError" - case EventDriverReady: - return "EventDriverReady" - case EventPipePaused: - return "EventPipePaused" - } - return UnknownEventType -} - -// JobEvent represent job event. -type JobEvent struct { - Event J - // String is job id. - ID string - // Pipeline name - Pipeline string - // Associated driver name (amqp, ephemeral, etc) - Driver string - // Error for the jobs/pipes errors - Error error - // event timings - Start time.Time - Elapsed time.Duration -} diff --git a/pkg/events/pool_events.go b/pkg/events/pool_events.go deleted file mode 100644 index 4d4cae5d..00000000 --- a/pkg/events/pool_events.go +++ /dev/null @@ -1,70 +0,0 @@ -package events - -const ( - // EventWorkerConstruct thrown when new worker is spawned. - EventWorkerConstruct P = iota + 10000 - - // EventWorkerDestruct thrown after worker destruction. - EventWorkerDestruct - - // EventPoolError caused on pool wide errors. - EventPoolError - - // EventSupervisorError triggered when supervisor can not complete work. - EventSupervisorError - - // EventNoFreeWorkers triggered when there are no free workers in the stack and timeout for worker allocate elapsed - EventNoFreeWorkers - - // EventMaxMemory caused when worker consumes more memory than allowed. - EventMaxMemory - - // EventTTL thrown when worker is removed due TTL being reached. TTL defines maximum time worker is allowed to live (seconds) - EventTTL - - // EventIdleTTL triggered when worker spends too much time at rest. - EventIdleTTL - - // EventExecTTL triggered when worker spends too much time doing the task (max_execution_time). - EventExecTTL - - // EventPoolRestart triggered when pool restart is needed - EventPoolRestart -) - -type P int64 - -func (ev P) String() string { - switch ev { - case EventWorkerConstruct: - return "EventWorkerConstruct" - case EventWorkerDestruct: - return "EventWorkerDestruct" - case EventPoolError: - return "EventPoolError" - case EventSupervisorError: - return "EventSupervisorError" - case EventNoFreeWorkers: - return "EventNoFreeWorkers" - case EventMaxMemory: - return "EventMaxMemory" - case EventTTL: - return "EventTTL" - case EventIdleTTL: - return "EventIdleTTL" - case EventExecTTL: - return "EventExecTTL" - case EventPoolRestart: - return "EventPoolRestart" - } - return UnknownEventType -} - -// PoolEvent triggered by pool on different events. Pool as also trigger WorkerEvent in case of log. -type PoolEvent struct { - // Event type, see below. - Event P - - // Payload depends on event type, typically it's worker or error. - Payload interface{} -} diff --git a/pkg/events/worker_events.go b/pkg/events/worker_events.go deleted file mode 100644 index 39c38e57..00000000 --- a/pkg/events/worker_events.go +++ /dev/null @@ -1,36 +0,0 @@ -package events - -const ( - // EventWorkerError triggered after WorkerProcess. Except payload to be error. - EventWorkerError W = iota + 11000 - // EventWorkerLog triggered on every write to WorkerProcess StdErr pipe (batched). Except payload to be []byte string. - EventWorkerLog - // EventWorkerStderr is the worker standard error output - EventWorkerStderr -) - -type W int64 - -func (ev W) String() string { - switch ev { - case EventWorkerError: - return "EventWorkerError" - case EventWorkerLog: - return "EventWorkerLog" - case EventWorkerStderr: - return "EventWorkerStderr" - } - return UnknownEventType -} - -// WorkerEvent wraps worker events. -type WorkerEvent struct { - // Event id, see below. - Event W - - // Worker triggered the event. - Worker interface{} - - // Event specific payload. - Payload interface{} -} diff --git a/pkg/payload/payload.go b/pkg/payload/payload.go deleted file mode 100755 index e1e45ac1..00000000 --- a/pkg/payload/payload.go +++ /dev/null @@ -1,20 +0,0 @@ -package payload - -import ( - "github.com/spiral/roadrunner/v2/utils" -) - -// Payload carries binary header and body to stack and -// back to the server. -type Payload struct { - // Context represent payload context, might be omitted. - Context []byte - - // body contains binary payload to be processed by WorkerProcess. - Body []byte -} - -// String returns payload body as string -func (p *Payload) String() string { - return utils.AsString(p.Body) -} diff --git a/pkg/pool/config.go b/pkg/pool/config.go deleted file mode 100644 index 3a058956..00000000 --- a/pkg/pool/config.go +++ /dev/null @@ -1,75 +0,0 @@ -package pool - -import ( - "runtime" - "time" -) - -// Config .. Pool config Configures the pool behavior. -type Config struct { - // Debug flag creates new fresh worker before every request. - Debug bool - - // NumWorkers defines how many sub-processes can be run at once. This value - // might be doubled by Swapper while hot-swap. Defaults to number of CPU cores. - NumWorkers uint64 `mapstructure:"num_workers"` - - // MaxJobs defines how many executions is allowed for the worker until - // it's destruction. set 1 to create new process for each new task, 0 to let - // worker handle as many tasks as it can. - MaxJobs uint64 `mapstructure:"max_jobs"` - - // AllocateTimeout defines for how long pool will be waiting for a worker to - // be freed to handle the task. Defaults to 60s. - AllocateTimeout time.Duration `mapstructure:"allocate_timeout"` - - // DestroyTimeout defines for how long pool should be waiting for worker to - // properly destroy, if timeout reached worker will be killed. Defaults to 60s. - DestroyTimeout time.Duration `mapstructure:"destroy_timeout"` - - // Supervision config to limit worker and pool memory usage. - Supervisor *SupervisorConfig `mapstructure:"supervisor"` -} - -// InitDefaults enables default config values. -func (cfg *Config) InitDefaults() { - if cfg.NumWorkers == 0 { - cfg.NumWorkers = uint64(runtime.NumCPU()) - } - - if cfg.AllocateTimeout == 0 { - cfg.AllocateTimeout = time.Minute - } - - if cfg.DestroyTimeout == 0 { - cfg.DestroyTimeout = time.Minute - } - if cfg.Supervisor == nil { - return - } - cfg.Supervisor.InitDefaults() -} - -type SupervisorConfig struct { - // WatchTick defines how often to check the state of worker. - WatchTick time.Duration `mapstructure:"watch_tick"` - - // TTL defines maximum time worker is allowed to live. - TTL time.Duration `mapstructure:"ttl"` - - // IdleTTL defines maximum duration worker can spend in idle mode. Disabled when 0. - IdleTTL time.Duration `mapstructure:"idle_ttl"` - - // ExecTTL defines maximum lifetime per job. - ExecTTL time.Duration `mapstructure:"exec_ttl"` - - // MaxWorkerMemory limits memory per worker. - MaxWorkerMemory uint64 `mapstructure:"max_worker_memory"` -} - -// InitDefaults enables default config values. -func (cfg *SupervisorConfig) InitDefaults() { - if cfg.WatchTick == 0 { - cfg.WatchTick = time.Second - } -} diff --git a/pkg/pool/interface.go b/pkg/pool/interface.go deleted file mode 100644 index 4049122c..00000000 --- a/pkg/pool/interface.go +++ /dev/null @@ -1,53 +0,0 @@ -package pool - -import ( - "context" - - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -// Pool managed set of inner worker processes. -type Pool interface { - // GetConfig returns pool configuration. - GetConfig() interface{} - - // Exec executes task with payload - Exec(rqs *payload.Payload) (*payload.Payload, error) - - // Workers returns worker list associated with the pool. - Workers() (workers []worker.BaseProcess) - - // RemoveWorker removes worker from the pool. - RemoveWorker(worker worker.BaseProcess) error - - // Destroy all underlying stack (but let them to complete the task). - Destroy(ctx context.Context) - - // ExecWithContext executes task with context which is used with timeout - execWithTTL(ctx context.Context, rqs *payload.Payload) (*payload.Payload, error) -} - -// Watcher is an interface for the Sync workers lifecycle -type Watcher interface { - // Watch used to add workers to the container - Watch(workers []worker.BaseProcess) error - - // Take takes the first free worker - Take(ctx context.Context) (worker.BaseProcess, error) - - // Release releases the worker putting it back to the queue - Release(w worker.BaseProcess) - - // Allocate - allocates new worker and put it into the WorkerWatcher - Allocate() error - - // Destroy destroys the underlying container - Destroy(ctx context.Context) - - // List return all container w/o removing it from internal storage - List() []worker.BaseProcess - - // Remove will remove worker from the container - Remove(wb worker.BaseProcess) -} diff --git a/pkg/pool/static_pool.go b/pkg/pool/static_pool.go deleted file mode 100755 index 7e190846..00000000 --- a/pkg/pool/static_pool.go +++ /dev/null @@ -1,374 +0,0 @@ -package pool - -import ( - "context" - "os/exec" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/transport" - "github.com/spiral/roadrunner/v2/pkg/worker" - workerWatcher "github.com/spiral/roadrunner/v2/pkg/worker_watcher" - "github.com/spiral/roadrunner/v2/utils" -) - -// StopRequest can be sent by worker to indicate that restart is required. -const StopRequest = "{\"stop\":true}" - -// ErrorEncoder encode error or make a decision based on the error type -type ErrorEncoder func(err error, w worker.BaseProcess) (*payload.Payload, error) - -type Options func(p *StaticPool) - -type Command func() *exec.Cmd - -// StaticPool controls worker creation, destruction and task routing. Pool uses fixed amount of stack. -type StaticPool struct { - cfg *Config - - // worker command creator - cmd Command - - // creates and connects to stack - factory transport.Factory - - // distributes the events - events events.Handler - - // saved list of event listeners - listeners []events.Listener - - // manages worker states and TTLs - ww Watcher - - // allocate new worker - allocator worker.Allocator - - // errEncoder is the default Exec error encoder - errEncoder ErrorEncoder -} - -// Initialize creates new worker pool and task multiplexer. StaticPool will initiate with one worker. -func Initialize(ctx context.Context, cmd Command, factory transport.Factory, cfg *Config, options ...Options) (Pool, error) { - const op = errors.Op("static_pool_initialize") - if factory == nil { - return nil, errors.E(op, errors.Str("no factory initialized")) - } - cfg.InitDefaults() - - if cfg.Debug { - cfg.NumWorkers = 0 - cfg.MaxJobs = 1 - } - - p := &StaticPool{ - cfg: cfg, - cmd: cmd, - factory: factory, - events: events.NewEventsHandler(), - } - - // add pool options - for i := 0; i < len(options); i++ { - options[i](p) - } - - // set up workers allocator - p.allocator = p.newPoolAllocator(ctx, p.cfg.AllocateTimeout, factory, cmd) - // set up workers watcher - p.ww = workerWatcher.NewSyncWorkerWatcher(p.allocator, p.cfg.NumWorkers, p.events, p.cfg.AllocateTimeout) - - // allocate requested number of workers - workers, err := p.allocateWorkers(p.cfg.NumWorkers) - if err != nil { - return nil, errors.E(op, err) - } - - // add workers to the watcher - err = p.ww.Watch(workers) - if err != nil { - return nil, errors.E(op, err) - } - - p.errEncoder = defaultErrEncoder(p) - - // if supervised config not nil, guess, that pool wanted to be supervised - if cfg.Supervisor != nil { - sp := supervisorWrapper(p, p.events, p.cfg.Supervisor) - // start watcher timer - sp.Start() - return sp, nil - } - - return p, nil -} - -func AddListeners(listeners ...events.Listener) Options { - return func(p *StaticPool) { - p.listeners = listeners - for i := 0; i < len(listeners); i++ { - p.addListener(listeners[i]) - } - } -} - -// AddListener connects event listener to the pool. -func (sp *StaticPool) addListener(listener events.Listener) { - sp.events.AddListener(listener) -} - -// GetConfig returns associated pool configuration. Immutable. -func (sp *StaticPool) GetConfig() interface{} { - return sp.cfg -} - -// Workers returns worker list associated with the pool. -func (sp *StaticPool) Workers() (workers []worker.BaseProcess) { - return sp.ww.List() -} - -func (sp *StaticPool) RemoveWorker(wb worker.BaseProcess) error { - sp.ww.Remove(wb) - return nil -} - -// Exec executes provided payload on the worker -func (sp *StaticPool) Exec(p *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("static_pool_exec") - if sp.cfg.Debug { - return sp.execDebug(p) - } - ctxGetFree, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout) - defer cancel() - w, err := sp.takeWorker(ctxGetFree, op) - if err != nil { - return nil, errors.E(op, err) - } - - rsp, err := w.(worker.SyncWorker).Exec(p) - if err != nil { - return sp.errEncoder(err, w) - } - - // worker want's to be terminated - if len(rsp.Body) == 0 && utils.AsString(rsp.Context) == StopRequest { - sp.stopWorker(w) - return sp.Exec(p) - } - - if sp.cfg.MaxJobs != 0 { - sp.checkMaxJobs(w) - return rsp, nil - } - // return worker back - sp.ww.Release(w) - return rsp, nil -} - -// Be careful, sync with pool.Exec method -func (sp *StaticPool) execWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("static_pool_exec_with_context") - if sp.cfg.Debug { - return sp.execDebugWithTTL(ctx, p) - } - - ctxAlloc, cancel := context.WithTimeout(context.Background(), sp.cfg.AllocateTimeout) - defer cancel() - w, err := sp.takeWorker(ctxAlloc, op) - if err != nil { - return nil, errors.E(op, err) - } - - rsp, err := w.(worker.SyncWorker).ExecWithTTL(ctx, p) - if err != nil { - return sp.errEncoder(err, w) - } - - // worker want's to be terminated - if len(rsp.Body) == 0 && utils.AsString(rsp.Context) == StopRequest { - sp.stopWorker(w) - return sp.execWithTTL(ctx, p) - } - - if sp.cfg.MaxJobs != 0 { - sp.checkMaxJobs(w) - return rsp, nil - } - - // return worker back - sp.ww.Release(w) - return rsp, nil -} - -func (sp *StaticPool) stopWorker(w worker.BaseProcess) { - const op = errors.Op("static_pool_stop_worker") - w.State().Set(worker.StateInvalid) - err := w.Stop() - if err != nil { - sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)}) - } -} - -// checkMaxJobs check for worker number of executions and kill workers if that number more than sp.cfg.MaxJobs -//go:inline -func (sp *StaticPool) checkMaxJobs(w worker.BaseProcess) { - if w.State().NumExecs() >= sp.cfg.MaxJobs { - w.State().Set(worker.StateMaxJobsReached) - sp.ww.Release(w) - return - } - - sp.ww.Release(w) -} - -func (sp *StaticPool) takeWorker(ctxGetFree context.Context, op errors.Op) (worker.BaseProcess, error) { - // Get function consumes context with timeout - w, err := sp.ww.Take(ctxGetFree) - if err != nil { - // if the error is of kind NoFreeWorkers, it means, that we can't get worker from the stack during the allocate timeout - if errors.Is(errors.NoFreeWorkers, err) { - sp.events.Push(events.PoolEvent{Event: events.EventNoFreeWorkers, Payload: errors.E(op, err)}) - return nil, errors.E(op, err) - } - // else if err not nil - return error - return nil, errors.E(op, err) - } - return w, nil -} - -// Destroy all underlying stack (but let them complete the task). -func (sp *StaticPool) Destroy(ctx context.Context) { - sp.ww.Destroy(ctx) -} - -func defaultErrEncoder(sp *StaticPool) ErrorEncoder { - return func(err error, w worker.BaseProcess) (*payload.Payload, error) { - const op = errors.Op("error_encoder") - // just push event if on any stage was timeout error - switch { - case errors.Is(errors.ExecTTL, err): - sp.events.Push(events.PoolEvent{Event: events.EventExecTTL, Payload: errors.E(op, err)}) - w.State().Set(worker.StateInvalid) - return nil, err - - case errors.Is(errors.SoftJob, err): - sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)}) - - // if max jobs exceed - if sp.cfg.MaxJobs != 0 && w.State().NumExecs() >= sp.cfg.MaxJobs { - // mark old as invalid and stop - w.State().Set(worker.StateInvalid) - errS := w.Stop() - if errS != nil { - return nil, errors.E(op, errors.SoftJob, errors.Errorf("err: %v\nerrStop: %v", err, errS)) - } - - return nil, err - } - - // soft jobs errors are allowed, just put the worker back - sp.ww.Release(w) - - return nil, err - case errors.Is(errors.Network, err): - // in case of network error, we can't stop the worker, we should kill it - w.State().Set(worker.StateInvalid) - sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: w, Payload: errors.E(op, err)}) - - // kill the worker instead of sending net packet to it - _ = w.Kill() - - return nil, err - default: - w.State().Set(worker.StateInvalid) - sp.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w}) - // stop the worker, worker here might be in the broken state (network) - errS := w.Stop() - if errS != nil { - return nil, errors.E(op, errors.Errorf("err: %v\nerrStop: %v", err, errS)) - } - - return nil, errors.E(op, err) - } - } -} - -func (sp *StaticPool) newPoolAllocator(ctx context.Context, timeout time.Duration, factory transport.Factory, cmd func() *exec.Cmd) worker.Allocator { - return func() (worker.SyncWorker, error) { - ctxT, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - w, err := factory.SpawnWorkerWithTimeout(ctxT, cmd(), sp.listeners...) - if err != nil { - return nil, err - } - - // wrap sync worker - sw := worker.From(w) - - sp.events.Push(events.PoolEvent{ - Event: events.EventWorkerConstruct, - Payload: sw, - }) - return sw, nil - } -} - -// execDebug used when debug mode was not set and exec_ttl is 0 -func (sp *StaticPool) execDebug(p *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("static_pool_exec_debug") - sw, err := sp.allocator() - if err != nil { - return nil, err - } - - // redirect call to the workers' exec method (without ttl) - r, err := sw.Exec(p) - if err != nil { - return nil, errors.E(op, err) - } - - // destroy the worker - sw.State().Set(worker.StateDestroyed) - err = sw.Kill() - if err != nil { - sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: sw, Payload: err}) - return nil, errors.E(op, err) - } - - return r, nil -} - -// execDebugWithTTL used when user set debug mode and exec_ttl -func (sp *StaticPool) execDebugWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) { - sw, err := sp.allocator() - if err != nil { - return nil, err - } - - // redirect call to the worker with TTL - r, err := sw.ExecWithTTL(ctx, p) - if stopErr := sw.Stop(); stopErr != nil { - sp.events.Push(events.WorkerEvent{Event: events.EventWorkerError, Worker: sw, Payload: err}) - } - - return r, err -} - -// allocate required number of stack -func (sp *StaticPool) allocateWorkers(numWorkers uint64) ([]worker.BaseProcess, error) { - const op = errors.Op("static_pool_allocate_workers") - workers := make([]worker.BaseProcess, 0, numWorkers) - - // constant number of stack simplify logic - for i := uint64(0); i < numWorkers; i++ { - w, err := sp.allocator() - if err != nil { - return nil, errors.E(op, errors.WorkerAllocate, err) - } - - workers = append(workers, w) - } - return workers, nil -} diff --git a/pkg/pool/static_pool_test.go b/pkg/pool/static_pool_test.go deleted file mode 100755 index cb6578a8..00000000 --- a/pkg/pool/static_pool_test.go +++ /dev/null @@ -1,721 +0,0 @@ -package pool - -import ( - "context" - "log" - "os/exec" - "runtime" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/transport/pipe" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/spiral/roadrunner/v2/utils" - "github.com/stretchr/testify/assert" -) - -var cfg = &Config{ - NumWorkers: uint64(runtime.NumCPU()), - AllocateTimeout: time.Second * 5, - DestroyTimeout: time.Second * 5, -} - -func Test_NewPool(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - cfg, - ) - assert.NoError(t, err) - - defer p.Destroy(ctx) - - assert.NotNil(t, p) -} - -func Test_StaticPool_Invalid(t *testing.T) { - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("php", "../../tests/invalid.php") }, - pipe.NewPipeFactory(), - cfg, - ) - - assert.Nil(t, p) - assert.Error(t, err) -} - -func Test_ConfigNoErrorInitDefaults(t *testing.T) { - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - - assert.NotNil(t, p) - assert.NoError(t, err) -} - -func Test_StaticPool_Echo(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - cfg, - ) - assert.NoError(t, err) - - defer p.Destroy(ctx) - - assert.NotNil(t, p) - - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_StaticPool_Echo_NilContext(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - cfg, - ) - assert.NoError(t, err) - - defer p.Destroy(ctx) - - assert.NotNil(t, p) - - res, err := p.Exec(&payload.Payload{Body: []byte("hello"), Context: nil}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_StaticPool_Echo_Context(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "head", "pipes") }, - pipe.NewPipeFactory(), - cfg, - ) - assert.NoError(t, err) - - defer p.Destroy(ctx) - - assert.NotNil(t, p) - - res, err := p.Exec(&payload.Payload{Body: []byte("hello"), Context: []byte("world")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.Empty(t, res.Body) - assert.NotNil(t, res.Context) - - assert.Equal(t, "world", string(res.Context)) -} - -func Test_StaticPool_JobError(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "error", "pipes") }, - pipe.NewPipeFactory(), - cfg, - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - time.Sleep(time.Second * 2) - - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - assert.Error(t, err) - assert.Nil(t, res) - - if errors.Is(errors.SoftJob, err) == false { - t.Fatal("error should be of type errors.Exec") - } - - assert.Contains(t, err.Error(), "hello") - p.Destroy(ctx) -} - -func Test_StaticPool_Broken_Replace(t *testing.T) { - ctx := context.Background() - block := make(chan struct{}, 10) - - listener := func(event interface{}) { - if wev, ok := event.(events.WorkerEvent); ok { - if wev.Event == events.EventWorkerStderr { - e := string(wev.Payload.([]byte)) - if strings.ContainsAny(e, "undefined_function()") { - block <- struct{}{} - return - } - } - } - } - - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "broken", "pipes") }, - pipe.NewPipeFactory(), - cfg, - AddListeners(listener), - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - time.Sleep(time.Second) - res, err := p.execWithTTL(ctx, &payload.Payload{Body: []byte("hello")}) - assert.Error(t, err) - assert.Nil(t, res) - - <-block - - p.Destroy(ctx) -} - -func Test_StaticPool_Broken_FromOutside(t *testing.T) { - ctx := context.Background() - // Run pool events - ev := make(chan struct{}, 1) - listener := func(event interface{}) { - if pe, ok := event.(events.PoolEvent); ok { - if pe.Event == events.EventWorkerConstruct { - ev <- struct{}{} - } - } - } - - var cfg2 = &Config{ - NumWorkers: 1, - AllocateTimeout: time.Second * 5, - DestroyTimeout: time.Second * 5, - } - - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - cfg2, - AddListeners(listener), - ) - assert.NoError(t, err) - assert.NotNil(t, p) - defer p.Destroy(ctx) - time.Sleep(time.Second) - - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) - assert.Equal(t, 1, len(p.Workers())) - - // first creation - <-ev - // killing random worker and expecting pool to replace it - err = p.Workers()[0].Kill() - if err != nil { - t.Errorf("error killing the process: error %v", err) - } - - // re-creation - <-ev - - list := p.Workers() - for _, w := range list { - assert.Equal(t, worker.StateReady, w.State().Value()) - } -} - -func Test_StaticPool_AllocateTimeout(t *testing.T) { - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 1, - AllocateTimeout: time.Nanosecond * 1, - DestroyTimeout: time.Second * 2, - }, - ) - assert.Error(t, err) - if !errors.Is(errors.WorkerAllocate, err) { - t.Fatal("error should be of type WorkerAllocate") - } - assert.Nil(t, p) -} - -func Test_StaticPool_Replace_Worker(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 1, - MaxJobs: 1, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - defer p.Destroy(ctx) - // prevent process is not ready - time.Sleep(time.Second) - - var lastPID string - lastPID = strconv.Itoa(int(p.Workers()[0].Pid())) - - res, _ := p.Exec(&payload.Payload{Body: []byte("hello")}) - assert.Equal(t, lastPID, string(res.Body)) - - for i := 0; i < 10; i++ { - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.NotEqual(t, lastPID, string(res.Body)) - lastPID = string(res.Body) - } -} - -func Test_StaticPool_Debug_Worker(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - Debug: true, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - defer p.Destroy(ctx) - - // prevent process is not ready - time.Sleep(time.Second) - assert.Len(t, p.Workers(), 0) - - var lastPID string - res, _ := p.Exec(&payload.Payload{Body: []byte("hello")}) - assert.NotEqual(t, lastPID, string(res.Body)) - - assert.Len(t, p.Workers(), 0) - - for i := 0; i < 10; i++ { - assert.Len(t, p.Workers(), 0) - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.NotEqual(t, lastPID, string(res.Body)) - lastPID = string(res.Body) - } -} - -// identical to replace but controlled on worker side -func Test_StaticPool_Stop_Worker(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "stop", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 1, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - defer p.Destroy(ctx) - time.Sleep(time.Second) - - var lastPID string - lastPID = strconv.Itoa(int(p.Workers()[0].Pid())) - - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, lastPID, string(res.Body)) - - for i := 0; i < 10; i++ { - res, err := p.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.NotEqual(t, lastPID, string(res.Body)) - lastPID = string(res.Body) - } -} - -// identical to replace but controlled on worker side -func Test_Static_Pool_Destroy_And_Close(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 1, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - - assert.NotNil(t, p) - assert.NoError(t, err) - - p.Destroy(ctx) - _, err = p.Exec(&payload.Payload{Body: []byte("100")}) - assert.Error(t, err) -} - -// identical to replace but controlled on worker side -func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 1, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - - assert.NotNil(t, p) - assert.NoError(t, err) - - go func() { - _, errP := p.Exec(&payload.Payload{Body: []byte("100")}) - if errP != nil { - t.Errorf("error executing payload: error %v", err) - } - }() - time.Sleep(time.Millisecond * 100) - - p.Destroy(ctx) - _, err = p.Exec(&payload.Payload{Body: []byte("100")}) - assert.Error(t, err) -} - -// identical to replace but controlled on worker side -func Test_Static_Pool_Handle_Dead(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 5, - AllocateTimeout: time.Second * 100, - DestroyTimeout: time.Second, - }, - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - time.Sleep(time.Second) - for i := range p.Workers() { - p.Workers()[i].State().Set(worker.StateErrored) - } - - _, err = p.Exec(&payload.Payload{Body: []byte("hello")}) - assert.NoError(t, err) - p.Destroy(ctx) -} - -// identical to replace but controlled on worker side -func Test_Static_Pool_Slow_Destroy(t *testing.T) { - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 5, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - - p.Destroy(context.Background()) -} - -func Test_StaticPool_NoFreeWorkers(t *testing.T) { - ctx := context.Background() - block := make(chan struct{}, 10) - - listener := func(event interface{}) { - if ev, ok := event.(events.PoolEvent); ok { - if ev.Event == events.EventNoFreeWorkers { - block <- struct{}{} - } - } - } - - p, err := Initialize( - ctx, - // sleep for the 3 seconds - func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - Debug: false, - NumWorkers: 1, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: nil, - }, - AddListeners(listener), - ) - assert.NoError(t, err) - assert.NotNil(t, p) - - go func() { - _, _ = p.execWithTTL(ctx, &payload.Payload{Body: []byte("hello")}) - }() - - time.Sleep(time.Second) - res, err := p.execWithTTL(ctx, &payload.Payload{Body: []byte("hello")}) - assert.Error(t, err) - assert.Nil(t, res) - - <-block - - p.Destroy(ctx) -} - -// identical to replace but controlled on worker side -func Test_Static_Pool_WrongCommand1(t *testing.T) { - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("phg", "../../tests/slow-destroy.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 5, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - - assert.Error(t, err) - assert.Nil(t, p) -} - -// identical to replace but controlled on worker side -func Test_Static_Pool_WrongCommand2(t *testing.T) { - p, err := Initialize( - context.Background(), - func() *exec.Cmd { return exec.Command("php", "", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 5, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - - assert.Error(t, err) - assert.Nil(t, p) -} - -/* PTR: -Benchmark_Pool_Echo-32 49076 29926 ns/op 8016 B/op 20 allocs/op -Benchmark_Pool_Echo-32 47257 30779 ns/op 8047 B/op 20 allocs/op -Benchmark_Pool_Echo-32 46737 29440 ns/op 8065 B/op 20 allocs/op -Benchmark_Pool_Echo-32 51177 29074 ns/op 7981 B/op 20 allocs/op -Benchmark_Pool_Echo-32 51764 28319 ns/op 8012 B/op 20 allocs/op -Benchmark_Pool_Echo-32 54054 30714 ns/op 7987 B/op 20 allocs/op -Benchmark_Pool_Echo-32 54391 30689 ns/op 8055 B/op 20 allocs/op - -VAL: -Benchmark_Pool_Echo-32 47936 28679 ns/op 7942 B/op 19 allocs/op -Benchmark_Pool_Echo-32 49010 29830 ns/op 7970 B/op 19 allocs/op -Benchmark_Pool_Echo-32 46771 29031 ns/op 8014 B/op 19 allocs/op -Benchmark_Pool_Echo-32 47760 30517 ns/op 7955 B/op 19 allocs/op -Benchmark_Pool_Echo-32 48148 29816 ns/op 7950 B/op 19 allocs/op -Benchmark_Pool_Echo-32 52705 29809 ns/op 7979 B/op 19 allocs/op -Benchmark_Pool_Echo-32 54374 27776 ns/op 7947 B/op 19 allocs/op -*/ -func Benchmark_Pool_Echo(b *testing.B) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - cfg, - ) - if err != nil { - b.Fatal(err) - } - - bd := make([]byte, 1024) - c := make([]byte, 1024) - - pld := &payload.Payload{ - Context: c, - Body: bd, - } - - b.ResetTimer() - b.ReportAllocs() - for n := 0; n < b.N; n++ { - if _, err := p.Exec(pld); err != nil { - b.Fail() - } - } -} - -// Benchmark_Pool_Echo_Batched-32 366996 2873 ns/op 1233 B/op 24 allocs/op -// PTR -> Benchmark_Pool_Echo_Batched-32 406839 2900 ns/op 1059 B/op 23 allocs/op -// PTR -> Benchmark_Pool_Echo_Batched-32 413312 2904 ns/op 1067 B/op 23 allocs/op -func Benchmark_Pool_Echo_Batched(b *testing.B) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: uint64(runtime.NumCPU()), - AllocateTimeout: time.Second * 100, - DestroyTimeout: time.Second, - }, - ) - assert.NoError(b, err) - defer p.Destroy(ctx) - - bd := make([]byte, 1024) - c := make([]byte, 1024) - - pld := &payload.Payload{ - Context: c, - Body: bd, - } - - b.ResetTimer() - b.ReportAllocs() - - var wg sync.WaitGroup - for i := 0; i < b.N; i++ { - wg.Add(1) - go func() { - defer wg.Done() - if _, err := p.Exec(pld); err != nil { - b.Fail() - log.Println(err) - } - }() - } - - wg.Wait() -} - -// Benchmark_Pool_Echo_Replaced-32 104/100 10900218 ns/op 52365 B/op 125 allocs/op -func Benchmark_Pool_Echo_Replaced(b *testing.B) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") }, - pipe.NewPipeFactory(), - &Config{ - NumWorkers: 1, - MaxJobs: 1, - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - }, - ) - assert.NoError(b, err) - defer p.Destroy(ctx) - b.ResetTimer() - b.ReportAllocs() - - for n := 0; n < b.N; n++ { - if _, err := p.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - log.Println(err) - } - } -} - -// BenchmarkToStringUnsafe-12 566317729 1.91 ns/op 0 B/op 0 allocs/op -// BenchmarkToStringUnsafe-32 1000000000 0.4434 ns/op 0 B/op 0 allocs/op -func BenchmarkToStringUnsafe(b *testing.B) { - testPayload := []byte("falsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtoj") - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - res := utils.AsString(testPayload) - _ = res - } -} - -// BenchmarkToStringSafe-32 8017846 182.5 ns/op 896 B/op 1 allocs/op -// inline BenchmarkToStringSafe-12 28926276 46.6 ns/op 128 B/op 1 allocs/op -func BenchmarkToStringSafe(b *testing.B) { - testPayload := []byte("falsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtojfalsflasjlifjwpoihejfoiwejow{}{}{}{}jelfjasjfhwaopiehjtopwhtgohrgouahsgkljasdlfjasl;fjals;jdflkndgouwhetopwqhjtoj") - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - res := toStringNotFun(testPayload) - _ = res - } -} - -func toStringNotFun(data []byte) string { - return string(data) -} diff --git a/pkg/pool/supervisor_pool.go b/pkg/pool/supervisor_pool.go deleted file mode 100755 index e6b2bd7c..00000000 --- a/pkg/pool/supervisor_pool.go +++ /dev/null @@ -1,230 +0,0 @@ -package pool - -import ( - "context" - "sync" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/state/process" - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -const MB = 1024 * 1024 - -// NSEC_IN_SEC nanoseconds in second -const NSEC_IN_SEC int64 = 1000000000 //nolint:stylecheck - -type Supervised interface { - Pool - // Start used to start watching process for all pool workers - Start() -} - -type supervised struct { - cfg *SupervisorConfig - events events.Handler - pool Pool - stopCh chan struct{} - mu *sync.RWMutex -} - -func supervisorWrapper(pool Pool, events events.Handler, cfg *SupervisorConfig) Supervised { - sp := &supervised{ - cfg: cfg, - events: events, - pool: pool, - mu: &sync.RWMutex{}, - stopCh: make(chan struct{}), - } - - return sp -} - -func (sp *supervised) execWithTTL(_ context.Context, _ *payload.Payload) (*payload.Payload, error) { - panic("used to satisfy pool interface") -} - -func (sp *supervised) Exec(rqs *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("supervised_exec_with_context") - if sp.cfg.ExecTTL == 0 { - return sp.pool.Exec(rqs) - } - - ctx, cancel := context.WithTimeout(context.Background(), sp.cfg.ExecTTL) - defer cancel() - - res, err := sp.pool.execWithTTL(ctx, rqs) - if err != nil { - return nil, errors.E(op, err) - } - - return res, nil -} - -func (sp *supervised) GetConfig() interface{} { - return sp.pool.GetConfig() -} - -func (sp *supervised) Workers() (workers []worker.BaseProcess) { - sp.mu.Lock() - defer sp.mu.Unlock() - return sp.pool.Workers() -} - -func (sp *supervised) RemoveWorker(worker worker.BaseProcess) error { - return sp.pool.RemoveWorker(worker) -} - -func (sp *supervised) Destroy(ctx context.Context) { - sp.pool.Destroy(ctx) -} - -func (sp *supervised) Start() { - go func() { - watchTout := time.NewTicker(sp.cfg.WatchTick) - for { - select { - case <-sp.stopCh: - watchTout.Stop() - return - // stop here - case <-watchTout.C: - sp.mu.Lock() - sp.control() - sp.mu.Unlock() - } - } - }() -} - -func (sp *supervised) Stop() { - sp.stopCh <- struct{}{} -} - -func (sp *supervised) control() { //nolint:gocognit - now := time.Now() - - // MIGHT BE OUTDATED - // It's a copy of the Workers pointers - workers := sp.pool.Workers() - - for i := 0; i < len(workers); i++ { - // if worker not in the Ready OR working state - // skip such worker - switch workers[i].State().Value() { - case - worker.StateInvalid, - worker.StateErrored, - worker.StateDestroyed, - worker.StateInactive, - worker.StateStopped, - worker.StateStopping, - worker.StateKilling: - continue - } - - s, err := process.WorkerProcessState(workers[i]) - if err != nil { - // worker not longer valid for supervision - continue - } - - if sp.cfg.TTL != 0 && now.Sub(workers[i].Created()).Seconds() >= sp.cfg.TTL.Seconds() { - /* - worker at this point might be in the middle of request execution: - - ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Release - ^ - TTL Reached, state - invalid | - -----> Worker Stopped here - */ - - if workers[i].State().Value() != worker.StateWorking { - workers[i].State().Set(worker.StateInvalid) - _ = workers[i].Stop() - } - // just to double check - workers[i].State().Set(worker.StateInvalid) - sp.events.Push(events.PoolEvent{Event: events.EventTTL, Payload: workers[i]}) - continue - } - - if sp.cfg.MaxWorkerMemory != 0 && s.MemoryUsage >= sp.cfg.MaxWorkerMemory*MB { - /* - worker at this point might be in the middle of request execution: - - ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Release - ^ - TTL Reached, state - invalid | - -----> Worker Stopped here - */ - - if workers[i].State().Value() != worker.StateWorking { - workers[i].State().Set(worker.StateInvalid) - _ = workers[i].Stop() - } - // just to double check - workers[i].State().Set(worker.StateInvalid) - sp.events.Push(events.PoolEvent{Event: events.EventMaxMemory, Payload: workers[i]}) - continue - } - - // firs we check maxWorker idle - if sp.cfg.IdleTTL != 0 { - // then check for the worker state - if workers[i].State().Value() != worker.StateReady { - continue - } - - /* - Calculate idle time - If worker in the StateReady, we read it LastUsed timestamp as UnixNano uint64 - 2. For example maxWorkerIdle is equal to 5sec, then, if (time.Now - LastUsed) > maxWorkerIdle - we are guessing that worker overlap idle time and has to be killed - */ - - // 1610530005534416045 lu - // lu - now = -7811150814 - nanoseconds - // 7.8 seconds - // get last used unix nano - lu := workers[i].State().LastUsed() - // worker not used, skip - if lu == 0 { - continue - } - - // convert last used to unixNano and sub time.now to seconds - // negative number, because lu always in the past, except for the `back to the future` :) - res := ((int64(lu) - now.UnixNano()) / NSEC_IN_SEC) * -1 - - // maxWorkerIdle more than diff between now and last used - // for example: - // After exec worker goes to the rest - // And resting for the 5 seconds - // IdleTTL is 1 second. - // After the control check, res will be 5, idle is 1 - // 5 - 1 = 4, more than 0, YOU ARE FIRED (removed). Done. - if int64(sp.cfg.IdleTTL.Seconds())-res <= 0 { - /* - worker at this point might be in the middle of request execution: - - ---> REQ ---> WORKER -----------------> RESP (at this point we should not set the Ready state) ------> | ----> Worker gets between supervisor checks and get killed in the ww.Release - ^ - TTL Reached, state - invalid | - -----> Worker Stopped here - */ - - if workers[i].State().Value() != worker.StateWorking { - workers[i].State().Set(worker.StateInvalid) - _ = workers[i].Stop() - } - // just to double-check - workers[i].State().Set(worker.StateInvalid) - sp.events.Push(events.PoolEvent{Event: events.EventIdleTTL, Payload: workers[i]}) - } - } - } -} diff --git a/pkg/pool/supervisor_test.go b/pkg/pool/supervisor_test.go deleted file mode 100644 index 14df513e..00000000 --- a/pkg/pool/supervisor_test.go +++ /dev/null @@ -1,413 +0,0 @@ -package pool - -import ( - "context" - "os" - "os/exec" - "testing" - "time" - - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/transport/pipe" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var cfgSupervised = &Config{ - NumWorkers: uint64(1), - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 100 * time.Second, - IdleTTL: 100 * time.Second, - ExecTTL: 100 * time.Second, - MaxWorkerMemory: 100, - }, -} - -func TestSupervisedPool_Exec(t *testing.T) { - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/memleak.php", "pipes") }, - pipe.NewPipeFactory(), - cfgSupervised, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - - time.Sleep(time.Second) - - pidBefore := p.Workers()[0].Pid() - - for i := 0; i < 100; i++ { - time.Sleep(time.Millisecond * 100) - _, err = p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - assert.NoError(t, err) - } - - assert.NotEqual(t, pidBefore, p.Workers()[0].Pid()) - - p.Destroy(context.Background()) -} - -// This test should finish without freezes -func TestSupervisedPool_ExecWithDebugMode(t *testing.T) { - var cfgSupervised = cfgSupervised - cfgSupervised.Debug = true - - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/supervised.php") }, - pipe.NewPipeFactory(), - cfgSupervised, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - - time.Sleep(time.Second) - - for i := 0; i < 100; i++ { - time.Sleep(time.Millisecond * 500) - _, err = p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - assert.NoError(t, err) - } - - p.Destroy(context.Background()) -} - -func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(1), - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 100 * time.Second, - IdleTTL: 100 * time.Second, - ExecTTL: 1 * time.Second, - MaxWorkerMemory: 100, - }, - } - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") }, - pipe.NewPipeFactory(), - cfgExecTTL, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - defer p.Destroy(context.Background()) - - pid := p.Workers()[0].Pid() - - resp, err := p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.Error(t, err) - assert.Empty(t, resp) - - time.Sleep(time.Second * 1) - // should be new worker with new pid - assert.NotEqual(t, pid, p.Workers()[0].Pid()) -} - -func TestSupervisedPool_ExecTTL_WorkerRestarted(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(1), - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 5 * time.Second, - }, - } - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/sleep-ttl.php") }, - pipe.NewPipeFactory(), - cfgExecTTL, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - - pid := p.Workers()[0].Pid() - - resp, err := p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.NoError(t, err) - assert.Equal(t, string(resp.Body), "hello world") - assert.Empty(t, resp.Context) - - time.Sleep(time.Second) - assert.NotEqual(t, pid, p.Workers()[0].Pid()) - require.Equal(t, p.Workers()[0].State().Value(), worker.StateReady) - pid = p.Workers()[0].Pid() - - resp, err = p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.NoError(t, err) - assert.Equal(t, string(resp.Body), "hello world") - assert.Empty(t, resp.Context) - - time.Sleep(time.Second) - // should be new worker with new pid - assert.NotEqual(t, pid, p.Workers()[0].Pid()) - require.Equal(t, p.Workers()[0].State().Value(), worker.StateReady) - - p.Destroy(context.Background()) -} - -func TestSupervisedPool_Idle(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(1), - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 100 * time.Second, - IdleTTL: 1 * time.Second, - ExecTTL: 100 * time.Second, - MaxWorkerMemory: 100, - }, - } - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/idle.php", "pipes") }, - pipe.NewPipeFactory(), - cfgExecTTL, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - - pid := p.Workers()[0].Pid() - - resp, err := p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.Nil(t, err) - assert.Empty(t, resp.Body) - assert.Empty(t, resp.Context) - - time.Sleep(time.Second * 5) - - // worker should be marked as invalid and reallocated - _, err = p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - assert.NoError(t, err) - // should be new worker with new pid - assert.NotEqual(t, pid, p.Workers()[0].Pid()) - p.Destroy(context.Background()) -} - -func TestSupervisedPool_IdleTTL_StateAfterTimeout(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(1), - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 1 * time.Second, - IdleTTL: 1 * time.Second, - MaxWorkerMemory: 100, - }, - } - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/exec_ttl.php", "pipes") }, - pipe.NewPipeFactory(), - cfgExecTTL, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - defer p.Destroy(context.Background()) - - pid := p.Workers()[0].Pid() - - time.Sleep(time.Millisecond * 100) - resp, err := p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.NoError(t, err) - assert.Empty(t, resp.Body) - assert.Empty(t, resp.Context) - - time.Sleep(time.Second * 2) - // should be destroyed, state should be Ready, not Invalid - assert.NotEqual(t, pid, p.Workers()[0].Pid()) - assert.Equal(t, int64(1), p.Workers()[0].State().Value()) -} - -func TestSupervisedPool_ExecTTL_OK(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(1), - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 100 * time.Second, - IdleTTL: 100 * time.Second, - ExecTTL: 4 * time.Second, - MaxWorkerMemory: 100, - }, - } - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/exec_ttl.php", "pipes") }, - pipe.NewPipeFactory(), - cfgExecTTL, - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - defer p.Destroy(context.Background()) - - pid := p.Workers()[0].Pid() - - time.Sleep(time.Millisecond * 100) - resp, err := p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.NoError(t, err) - assert.Empty(t, resp.Body) - assert.Empty(t, resp.Context) - - time.Sleep(time.Second * 1) - // should be the same pid - assert.Equal(t, pid, p.Workers()[0].Pid()) -} - -func TestSupervisedPool_MaxMemoryReached(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(1), - AllocateTimeout: time.Second, - DestroyTimeout: time.Second, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 100 * time.Second, - IdleTTL: 100 * time.Second, - ExecTTL: 4 * time.Second, - MaxWorkerMemory: 1, - }, - } - - block := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.PoolEvent); ok { - if ev.Event == events.EventMaxMemory { - block <- struct{}{} - } - } - } - - // constructed - // max memory - // constructed - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/memleak.php", "pipes") }, - pipe.NewPipeFactory(), - cfgExecTTL, - AddListeners(listener), - ) - - assert.NoError(t, err) - assert.NotNil(t, p) - - resp, err := p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - assert.NoError(t, err) - assert.Empty(t, resp.Body) - assert.Empty(t, resp.Context) - - <-block - p.Destroy(context.Background()) -} - -func TestSupervisedPool_AllocateFailedOK(t *testing.T) { - var cfgExecTTL = &Config{ - NumWorkers: uint64(2), - AllocateTimeout: time.Second * 15, - DestroyTimeout: time.Second * 5, - Supervisor: &SupervisorConfig{ - WatchTick: 1 * time.Second, - TTL: 5 * time.Second, - }, - } - - ctx := context.Background() - p, err := Initialize( - ctx, - func() *exec.Cmd { return exec.Command("php", "../../tests/allocate-failed.php") }, - pipe.NewPipeFactory(), - cfgExecTTL, - ) - - assert.NoError(t, err) - require.NotNil(t, p) - - time.Sleep(time.Second) - - // should be ok - _, err = p.Exec(&payload.Payload{ - Context: []byte(""), - Body: []byte("foo"), - }) - - require.NoError(t, err) - - // after creating this file, PHP will fail - file, err := os.Create("break") - require.NoError(t, err) - - time.Sleep(time.Second * 5) - assert.NoError(t, file.Close()) - assert.NoError(t, os.Remove("break")) - - defer func() { - if r := recover(); r != nil { - assert.Fail(t, "panic should not be fired!") - } else { - p.Destroy(context.Background()) - } - }() -} diff --git a/pkg/priority_queue/binary_heap.go b/pkg/priority_queue/binary_heap.go deleted file mode 100644 index fc043927..00000000 --- a/pkg/priority_queue/binary_heap.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -binary heap (min-heap) algorithm used as a core for the priority queue -*/ - -package priorityqueue - -import ( - "sync" - "sync/atomic" -) - -type BinHeap struct { - items []Item - // find a way to use pointer to the raw data - len uint64 - maxLen uint64 - cond sync.Cond -} - -func NewBinHeap(maxLen uint64) *BinHeap { - return &BinHeap{ - items: make([]Item, 0, 1000), - len: 0, - maxLen: maxLen, - cond: sync.Cond{L: &sync.Mutex{}}, - } -} - -func (bh *BinHeap) fixUp() { - k := bh.len - 1 - p := (k - 1) >> 1 // k-1 / 2 - - for k > 0 { - cur, par := (bh.items)[k], (bh.items)[p] - - if cur.Priority() < par.Priority() { - bh.swap(k, p) - k = p - p = (k - 1) >> 1 - } else { - return - } - } -} - -func (bh *BinHeap) swap(i, j uint64) { - (bh.items)[i], (bh.items)[j] = (bh.items)[j], (bh.items)[i] -} - -func (bh *BinHeap) fixDown(curr, end int) { - cOneIdx := (curr << 1) + 1 - for cOneIdx <= end { - cTwoIdx := -1 - if (curr<<1)+2 <= end { - cTwoIdx = (curr << 1) + 2 - } - - idxToSwap := cOneIdx - if cTwoIdx > -1 && (bh.items)[cTwoIdx].Priority() < (bh.items)[cOneIdx].Priority() { - idxToSwap = cTwoIdx - } - if (bh.items)[idxToSwap].Priority() < (bh.items)[curr].Priority() { - bh.swap(uint64(curr), uint64(idxToSwap)) - curr = idxToSwap - cOneIdx = (curr << 1) + 1 - } else { - return - } - } -} - -func (bh *BinHeap) Len() uint64 { - return atomic.LoadUint64(&bh.len) -} - -func (bh *BinHeap) Insert(item Item) { - bh.cond.L.Lock() - - // check the binary heap len before insertion - if bh.Len() > bh.maxLen { - // unlock the mutex to proceed to get-max - bh.cond.L.Unlock() - - // signal waiting goroutines - for bh.Len() > 0 { - // signal waiting goroutines - bh.cond.Signal() - } - // lock mutex to proceed inserting into the empty slice - bh.cond.L.Lock() - } - - bh.items = append(bh.items, item) - - // add len to the slice - atomic.AddUint64(&bh.len, 1) - - // fix binary heap up - bh.fixUp() - bh.cond.L.Unlock() - - // signal the goroutine on wait - bh.cond.Signal() -} - -func (bh *BinHeap) ExtractMin() Item { - bh.cond.L.Lock() - - // if len == 0, wait for the signal - for bh.Len() == 0 { - bh.cond.Wait() - } - - bh.swap(0, bh.len-1) - - item := (bh.items)[int(bh.len)-1] - bh.items = (bh).items[0 : int(bh.len)-1] - bh.fixDown(0, int(bh.len-2)) - - // reduce len - atomic.AddUint64(&bh.len, ^uint64(0)) - - bh.cond.L.Unlock() - return item -} diff --git a/pkg/priority_queue/binary_heap_test.go b/pkg/priority_queue/binary_heap_test.go deleted file mode 100644 index ab0f9266..00000000 --- a/pkg/priority_queue/binary_heap_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package priorityqueue - -import ( - "fmt" - "math/rand" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -type Test int - -func (t Test) Ack() error { - return nil -} - -func (t Test) Nack() error { - return nil -} - -func (t Test) Requeue(_ map[string][]string, _ int64) error { - return nil -} - -func (t Test) Body() []byte { - return nil -} - -func (t Test) Context() ([]byte, error) { - return nil, nil -} - -func (t Test) ID() string { - return "none" -} - -func (t Test) Priority() int64 { - return int64(t) -} - -func TestBinHeap_Init(t *testing.T) { - a := []Item{Test(2), Test(23), Test(33), Test(44), Test(1), Test(2), Test(2), Test(2), Test(4), Test(6), Test(99)} - - bh := NewBinHeap(12) - - for i := 0; i < len(a); i++ { - bh.Insert(a[i]) - } - - expected := []Item{Test(1), Test(2), Test(2), Test(2), Test(2), Test(4), Test(6), Test(23), Test(33), Test(44), Test(99)} - - res := make([]Item, 0, 12) - - for i := 0; i < 11; i++ { - item := bh.ExtractMin() - res = append(res, item) - } - - require.Equal(t, expected, res) -} - -func TestBinHeap_MaxLen(t *testing.T) { - a := []Item{Test(2), Test(23), Test(33), Test(44), Test(1), Test(2), Test(2), Test(2), Test(4), Test(6), Test(99)} - - bh := NewBinHeap(1) - - go func() { - expected := []Item{Test(2), Test(23), Test(33), Test(44), Test(1), Test(2), Test(2), Test(2), Test(4), Test(6), Test(99)} - - res := make([]Item, 0, 12) - - for i := 0; i < 11; i++ { - item := bh.ExtractMin() - res = append(res, item) - } - require.Equal(t, expected, res) - return - }() - - time.Sleep(time.Second) - for i := 0; i < len(a); i++ { - bh.Insert(a[i]) - } - - time.Sleep(time.Second) -} - -func TestNewPriorityQueue(t *testing.T) { - insertsPerSec := uint64(0) - getPerSec := uint64(0) - stopCh := make(chan struct{}, 1) - pq := NewBinHeap(1000) - - go func() { - tt3 := time.NewTicker(time.Millisecond * 10) - for { - select { - case <-tt3.C: - require.Less(t, pq.Len(), uint64(1002)) - case <-stopCh: - return - } - } - }() - - go func() { - tt := time.NewTicker(time.Second) - - for { - select { - case <-tt.C: - fmt.Println(fmt.Sprintf("Insert per second: %d", atomic.LoadUint64(&insertsPerSec))) - atomic.StoreUint64(&insertsPerSec, 0) - fmt.Println(fmt.Sprintf("ExtractMin per second: %d", atomic.LoadUint64(&getPerSec))) - atomic.StoreUint64(&getPerSec, 0) - case <-stopCh: - tt.Stop() - return - } - } - }() - - go func() { - for { - select { - case <-stopCh: - return - default: - pq.ExtractMin() - atomic.AddUint64(&getPerSec, 1) - } - } - }() - - go func() { - for { - select { - case <-stopCh: - return - default: - pq.Insert(Test(rand.Int())) //nolint:gosec - atomic.AddUint64(&insertsPerSec, 1) - } - } - }() - - time.Sleep(time.Second * 5) - stopCh <- struct{}{} - stopCh <- struct{}{} - stopCh <- struct{}{} - stopCh <- struct{}{} -} diff --git a/pkg/priority_queue/interface.go b/pkg/priority_queue/interface.go deleted file mode 100644 index 9efa4652..00000000 --- a/pkg/priority_queue/interface.go +++ /dev/null @@ -1,31 +0,0 @@ -package priorityqueue - -type Queue interface { - Insert(item Item) - ExtractMin() Item - Len() uint64 -} - -// Item represents binary heap item -type Item interface { - // ID is a unique item identifier - ID() string - - // Priority returns the Item's priority to sort - Priority() int64 - - // Body is the Item payload - Body() []byte - - // Context is the Item meta information - Context() ([]byte, error) - - // Ack - acknowledge the Item after processing - Ack() error - - // Nack - discard the Item - Nack() error - - // Requeue - put the message back to the queue with the optional delay - Requeue(headers map[string][]string, delay int64) error -} diff --git a/pkg/state/job/state.go b/pkg/state/job/state.go deleted file mode 100644 index 56050084..00000000 --- a/pkg/state/job/state.go +++ /dev/null @@ -1,19 +0,0 @@ -package job - -// State represents job's state -type State struct { - // Pipeline name - Pipeline string - // Driver name - Driver string - // Queue name (tube for the beanstalk) - Queue string - // Active jobs which are consumed from the driver but not handled by the PHP worker yet - Active int64 - // Delayed jobs - Delayed int64 - // Reserved jobs which are in the driver but not consumed yet - Reserved int64 - // Status - 1 Ready, 0 - Paused - Ready bool -} diff --git a/pkg/state/process/state.go b/pkg/state/process/state.go deleted file mode 100644 index bfc3a287..00000000 --- a/pkg/state/process/state.go +++ /dev/null @@ -1,76 +0,0 @@ -package process - -import ( - "github.com/shirou/gopsutil/process" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -// State provides information about specific worker. -type State struct { - // Pid contains process id. - Pid int `json:"pid"` - - // Status of the worker. - Status string `json:"status"` - - // Number of worker executions. - NumJobs uint64 `json:"numExecs"` - - // Created is unix nano timestamp of worker creation time. - Created int64 `json:"created"` - - // MemoryUsage holds the information about worker memory usage in bytes. - // Values might vary for different operating systems and based on RSS. - MemoryUsage uint64 `json:"memoryUsage"` - - // CPU_Percent returns how many percent of the CPU time this process uses - CPUPercent float64 - - // Command used in the service plugin and shows a command for the particular service - Command string -} - -// WorkerProcessState creates new worker state definition. -func WorkerProcessState(w worker.BaseProcess) (*State, error) { - const op = errors.Op("worker_process_state") - p, _ := process.NewProcess(int32(w.Pid())) - i, err := p.MemoryInfo() - if err != nil { - return nil, errors.E(op, err) - } - - percent, err := p.CPUPercent() - if err != nil { - return nil, err - } - - return &State{ - CPUPercent: percent, - Pid: int(w.Pid()), - Status: w.State().String(), - NumJobs: w.State().NumExecs(), - Created: w.Created().UnixNano(), - MemoryUsage: i.RSS, - }, nil -} - -func GeneralProcessState(pid int, command string) (State, error) { - const op = errors.Op("process_state") - p, _ := process.NewProcess(int32(pid)) - i, err := p.MemoryInfo() - if err != nil { - return State{}, errors.E(op, err) - } - percent, err := p.CPUPercent() - if err != nil { - return State{}, err - } - - return State{ - CPUPercent: percent, - Pid: pid, - MemoryUsage: i.RSS, - Command: command, - }, nil -} diff --git a/pkg/transport/interface.go b/pkg/transport/interface.go deleted file mode 100644 index 1b072378..00000000 --- a/pkg/transport/interface.go +++ /dev/null @@ -1,21 +0,0 @@ -package transport - -import ( - "context" - "os/exec" - - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -// Factory is responsible for wrapping given command into tasks WorkerProcess. -type Factory interface { - // SpawnWorkerWithTimeout creates new WorkerProcess process based on given command with context. - // Process must not be started. - SpawnWorkerWithTimeout(context.Context, *exec.Cmd, ...events.Listener) (*worker.Process, error) - // SpawnWorker creates new WorkerProcess process based on given command. - // Process must not be started. - SpawnWorker(*exec.Cmd, ...events.Listener) (*worker.Process, error) - // Close the factory and underlying connections. - Close() error -} diff --git a/pkg/transport/pipe/pipe_factory.go b/pkg/transport/pipe/pipe_factory.go deleted file mode 100755 index 9433a510..00000000 --- a/pkg/transport/pipe/pipe_factory.go +++ /dev/null @@ -1,197 +0,0 @@ -package pipe - -import ( - "context" - "os/exec" - - "github.com/spiral/errors" - "github.com/spiral/goridge/v3/pkg/pipe" - "github.com/spiral/roadrunner/v2/internal" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/worker" - "go.uber.org/multierr" -) - -// Factory connects to stack using standard -// streams (STDIN, STDOUT pipes). -type Factory struct{} - -// NewPipeFactory returns new factory instance and starts -// listening -func NewPipeFactory() *Factory { - return &Factory{} -} - -type sr struct { - w *worker.Process - err error -} - -// SpawnWorkerWithTimeout creates new Process and connects it to goridge relay, -// method Wait() must be handled on level above. -func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, listeners ...events.Listener) (*worker.Process, error) { //nolint:gocognit - spCh := make(chan sr) - const op = errors.Op("factory_spawn_worker_with_timeout") - go func() { - w, err := worker.InitBaseWorker(cmd, worker.AddListeners(listeners...)) - if err != nil { - select { - case spCh <- sr{ - w: nil, - err: errors.E(op, err), - }: - return - default: - return - } - } - - in, err := cmd.StdoutPipe() - if err != nil { - select { - case spCh <- sr{ - w: nil, - err: errors.E(op, err), - }: - return - default: - return - } - } - - out, err := cmd.StdinPipe() - if err != nil { - select { - case spCh <- sr{ - w: nil, - err: errors.E(op, err), - }: - return - default: - return - } - } - - // Init new PIPE relay - relay := pipe.NewPipeRelay(in, out) - w.AttachRelay(relay) - - // Start the worker - err = w.Start() - if err != nil { - select { - case spCh <- sr{ - w: nil, - err: errors.E(op, err), - }: - return - default: - return - } - } - - pid, err := internal.FetchPID(relay) - if err != nil { - err = multierr.Combine( - err, - w.Kill(), - w.Wait(), - ) - select { - case spCh <- sr{ - w: nil, - err: errors.E(op, err), - }: - return - default: - _ = w.Kill() - return - } - } - - if pid != w.Pid() { - select { - case spCh <- sr{ - w: nil, - err: errors.E(op, errors.Errorf("pid mismatches, get: %d, want: %d", pid, w.Pid())), - }: - return - default: - _ = w.Kill() - return - } - } - - select { - case - // return worker - spCh <- sr{ - w: w, - err: nil, - }: - // everything ok, set ready state - w.State().Set(worker.StateReady) - return - default: - _ = w.Kill() - return - } - }() - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case res := <-spCh: - if res.err != nil { - return nil, res.err - } - return res.w, nil - } -} - -func (f *Factory) SpawnWorker(cmd *exec.Cmd, listeners ...events.Listener) (*worker.Process, error) { - const op = errors.Op("factory_spawn_worker") - w, err := worker.InitBaseWorker(cmd, worker.AddListeners(listeners...)) - if err != nil { - return nil, errors.E(op, err) - } - - in, err := cmd.StdoutPipe() - if err != nil { - return nil, errors.E(op, err) - } - - out, err := cmd.StdinPipe() - if err != nil { - return nil, errors.E(op, err) - } - - // Init new PIPE relay - relay := pipe.NewPipeRelay(in, out) - w.AttachRelay(relay) - - // Start the worker - err = w.Start() - if err != nil { - return nil, errors.E(op, err) - } - - // errors bundle - if pid, err := internal.FetchPID(relay); pid != w.Pid() { - err = multierr.Combine( - err, - w.Kill(), - w.Wait(), - ) - return nil, errors.E(op, err) - } - - // everything ok, set ready state - w.State().Set(worker.StateReady) - return w, nil -} - -// Close the factory. -func (f *Factory) Close() error { - return nil -} diff --git a/pkg/transport/pipe/pipe_factory_spawn_test.go b/pkg/transport/pipe/pipe_factory_spawn_test.go deleted file mode 100644 index f5e9669b..00000000 --- a/pkg/transport/pipe/pipe_factory_spawn_test.go +++ /dev/null @@ -1,461 +0,0 @@ -package pipe - -import ( - "os/exec" - "strings" - "sync" - "testing" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/stretchr/testify/assert" -) - -func Test_GetState2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorker(cmd) - go func() { - assert.NoError(t, w.Wait()) - assert.Equal(t, worker.StateStopped, w.State().Value()) - }() - - assert.NoError(t, err) - assert.NotNil(t, w) - - assert.Equal(t, worker.StateReady, w.State().Value()) - assert.NoError(t, w.Stop()) -} - -func Test_Kill2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorker(cmd) - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - assert.Error(t, w.Wait()) - assert.Equal(t, worker.StateErrored, w.State().Value()) - }() - - assert.NoError(t, err) - assert.NotNil(t, w) - - assert.Equal(t, worker.StateReady, w.State().Value()) - err = w.Kill() - if err != nil { - t.Errorf("error killing the Process: error %v", err) - } - wg.Wait() -} - -func Test_Pipe_Start2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorker(cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - go func() { - assert.NoError(t, w.Wait()) - }() - - assert.NoError(t, w.Stop()) -} - -func Test_Pipe_StartError2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - err := cmd.Start() - if err != nil { - t.Errorf("error running the command: error %v", err) - } - - w, err := NewPipeFactory().SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_PipeError3(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - _, err := cmd.StdinPipe() - if err != nil { - t.Errorf("error creating the STDIN pipe: error %v", err) - } - - w, err := NewPipeFactory().SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_PipeError4(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - _, err := cmd.StdinPipe() - if err != nil { - t.Errorf("error creating the STDIN pipe: error %v", err) - } - - w, err := NewPipeFactory().SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_Failboot2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/failboot.php") - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "failboot") { - finish <- struct{}{} - } - } - } - } - w, err := NewPipeFactory().SpawnWorker(cmd, listener) - - assert.Nil(t, w) - assert.Error(t, err) - <-finish -} - -func Test_Pipe_Invalid2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/invalid.php") - w, err := NewPipeFactory().SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_Echo2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - w, err := NewPipeFactory().SpawnWorker(cmd) - if err != nil { - t.Fatal(err) - } - defer func() { - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_Pipe_Broken2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes") - w, err := NewPipeFactory().SpawnWorker(cmd) - if err != nil { - t.Fatal(err) - } - defer func() { - time.Sleep(time.Second) - err = w.Stop() - assert.Error(t, err) - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Error(t, err) - assert.Nil(t, res) -} - -func Benchmark_Pipe_SpawnWorker_Stop2(b *testing.B) { - f := NewPipeFactory() - for n := 0; n < b.N; n++ { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - w, _ := f.SpawnWorker(cmd) - go func() { - if w.Wait() != nil { - b.Fail() - } - }() - - err := w.Stop() - if err != nil { - b.Errorf("error stopping the worker: error %v", err) - } - } -} - -func Benchmark_Pipe_Worker_ExecEcho2(b *testing.B) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorker(cmd) - sw := worker.From(w) - - b.ReportAllocs() - b.ResetTimer() - go func() { - err := w.Wait() - if err != nil { - b.Errorf("error waiting the worker: error %v", err) - } - }() - defer func() { - err := w.Stop() - if err != nil { - b.Errorf("error stopping the worker: error %v", err) - } - }() - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Benchmark_Pipe_Worker_ExecEcho4(b *testing.B) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - w, err := NewPipeFactory().SpawnWorker(cmd) - if err != nil { - b.Fatal(err) - } - - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Benchmark_Pipe_Worker_ExecEchoWithoutContext2(b *testing.B) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - w, err := NewPipeFactory().SpawnWorker(cmd) - if err != nil { - b.Fatal(err) - } - - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Test_Echo2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorker(cmd) - if err != nil { - t.Fatal(err) - } - - sw := worker.From(w) - - go func() { - assert.NoError(t, sw.Wait()) - }() - defer func() { - err = sw.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Nil(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_BadPayload2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorker(cmd) - - sw := worker.From(w) - - go func() { - assert.NoError(t, sw.Wait()) - }() - defer func() { - err := sw.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - res, err := sw.Exec(&payload.Payload{}) - - assert.Error(t, err) - assert.Nil(t, res) - - assert.Contains(t, err.Error(), "payload can not be empty") -} - -func Test_String2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorker(cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - assert.Contains(t, w.String(), "php ../../../tests/client.php echo pipes") - assert.Contains(t, w.String(), "ready") - assert.Contains(t, w.String(), "numExecs: 0") -} - -func Test_Echo_Slow2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "pipes", "10", "10") - - w, _ := NewPipeFactory().SpawnWorker(cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Nil(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_Broken2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes") - data := "" - mu := &sync.Mutex{} - listener := func(event interface{}) { - if wev, ok := event.(events.WorkerEvent); ok { - mu.Lock() - data = string(wev.Payload.([]byte)) - mu.Unlock() - } - } - - w, err := NewPipeFactory().SpawnWorker(cmd, listener) - if err != nil { - t.Fatal(err) - } - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - assert.NotNil(t, err) - assert.Nil(t, res) - - time.Sleep(time.Second * 3) - mu.Lock() - if strings.ContainsAny(data, "undefined_function()") == false { - t.Fail() - } - mu.Unlock() - assert.Error(t, w.Stop()) -} - -func Test_Error2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "error", "pipes") - - w, _ := NewPipeFactory().SpawnWorker(cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - assert.NotNil(t, err) - assert.Nil(t, res) - - if errors.Is(errors.SoftJob, err) == false { - t.Fatal("error should be of type errors.ErrSoftJob") - } - assert.Contains(t, err.Error(), "hello") -} - -func Test_NumExecs2(t *testing.T) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorker(cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Errorf("fail to execute payload: error %v", err) - } - assert.Equal(t, uint64(1), w.State().NumExecs()) - - _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Errorf("fail to execute payload: error %v", err) - } - assert.Equal(t, uint64(2), w.State().NumExecs()) - - _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Errorf("fail to execute payload: error %v", err) - } - assert.Equal(t, uint64(3), w.State().NumExecs()) -} diff --git a/pkg/transport/pipe/pipe_factory_test.go b/pkg/transport/pipe/pipe_factory_test.go deleted file mode 100755 index e396fe57..00000000 --- a/pkg/transport/pipe/pipe_factory_test.go +++ /dev/null @@ -1,503 +0,0 @@ -package pipe - -import ( - "context" - "os/exec" - "strings" - "sync" - "testing" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/stretchr/testify/assert" -) - -func Test_GetState(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - go func() { - assert.NoError(t, w.Wait()) - assert.Equal(t, worker.StateStopped, w.State().Value()) - }() - - assert.NoError(t, err) - assert.NotNil(t, w) - - assert.Equal(t, worker.StateReady, w.State().Value()) - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Kill(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - assert.Error(t, w.Wait()) - assert.Equal(t, worker.StateErrored, w.State().Value()) - }() - - assert.NoError(t, err) - assert.NotNil(t, w) - - assert.Equal(t, worker.StateReady, w.State().Value()) - err = w.Kill() - if err != nil { - t.Errorf("error killing the Process: error %v", err) - } - wg.Wait() -} - -func Test_Pipe_Start(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - go func() { - assert.NoError(t, w.Wait()) - }() - - assert.NoError(t, w.Stop()) -} - -func Test_Pipe_StartError(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - err := cmd.Start() - if err != nil { - t.Errorf("error running the command: error %v", err) - } - - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_PipeError(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - _, err := cmd.StdinPipe() - if err != nil { - t.Errorf("error creating the STDIN pipe: error %v", err) - } - - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_PipeError2(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - // error cause - _, err := cmd.StdinPipe() - if err != nil { - t.Errorf("error creating the STDIN pipe: error %v", err) - } - - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_Failboot(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/failboot.php") - ctx := context.Background() - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "failboot") { - finish <- struct{}{} - } - } - } - } - - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd, listener) - - assert.Nil(t, w) - assert.Error(t, err) - <-finish -} - -func Test_Pipe_Invalid(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/invalid.php") - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Pipe_Echo(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - t.Fatal(err) - } - defer func() { - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_Pipe_Broken(t *testing.T) { - t.Parallel() - cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes") - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - t.Fatal(err) - } - defer func() { - time.Sleep(time.Second) - err = w.Stop() - assert.Error(t, err) - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Error(t, err) - assert.Nil(t, res) -} - -func Benchmark_Pipe_SpawnWorker_Stop(b *testing.B) { - f := NewPipeFactory() - for n := 0; n < b.N; n++ { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - w, _ := f.SpawnWorkerWithTimeout(context.Background(), cmd) - go func() { - if w.Wait() != nil { - b.Fail() - } - }() - - err := w.Stop() - if err != nil { - b.Errorf("error stopping the worker: error %v", err) - } - } -} - -func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorkerWithTimeout(context.Background(), cmd) - sw := worker.From(w) - - b.ReportAllocs() - b.ResetTimer() - go func() { - err := w.Wait() - if err != nil { - b.Errorf("error waiting the worker: error %v", err) - } - }() - defer func() { - err := w.Stop() - if err != nil { - b.Errorf("error stopping the worker: error %v", err) - } - }() - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Benchmark_Pipe_Worker_ExecEcho3(b *testing.B) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - b.Fatal(err) - } - - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Benchmark_Pipe_Worker_ExecEchoWithoutContext(b *testing.B) { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - ctx := context.Background() - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - b.Fatal(err) - } - - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Test_Echo(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - t.Fatal(err) - } - - sw := worker.From(w) - go func() { - assert.NoError(t, sw.Wait()) - }() - defer func() { - err = sw.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Nil(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_BadPayload(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - - sw := worker.From(w) - - go func() { - assert.NoError(t, sw.Wait()) - }() - defer func() { - err := sw.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - res, err := sw.Exec(&payload.Payload{}) - - assert.Error(t, err) - assert.Nil(t, res) - - assert.Contains(t, err.Error(), "payload can not be empty") -} - -func Test_String(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - assert.Contains(t, w.String(), "php ../../../tests/client.php echo pipes") - assert.Contains(t, w.String(), "ready") - assert.Contains(t, w.String(), "numExecs: 0") -} - -func Test_Echo_Slow(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "pipes", "10", "10") - - w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Nil(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_Broken(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes") - data := "" - mu := &sync.Mutex{} - listener := func(event interface{}) { - if wev, ok := event.(events.WorkerEvent); ok { - mu.Lock() - data = string(wev.Payload.([]byte)) - mu.Unlock() - } - } - - w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd, listener) - if err != nil { - t.Fatal(err) - } - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - assert.NotNil(t, err) - assert.Nil(t, res) - - time.Sleep(time.Second * 3) - mu.Lock() - if strings.ContainsAny(data, "undefined_function()") == false { - t.Fail() - } - mu.Unlock() - assert.Error(t, w.Stop()) -} - -func Test_Error(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "error", "pipes") - - w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - assert.NotNil(t, err) - assert.Nil(t, res) - - if errors.Is(errors.SoftJob, err) == false { - t.Fatal("error should be of type errors.ErrSoftJob") - } - assert.Contains(t, err.Error(), "hello") -} - -func Test_NumExecs(t *testing.T) { - t.Parallel() - ctx := context.Background() - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - - w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err := w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Errorf("fail to execute payload: error %v", err) - } - assert.Equal(t, uint64(1), w.State().NumExecs()) - - _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Errorf("fail to execute payload: error %v", err) - } - assert.Equal(t, uint64(2), w.State().NumExecs()) - - _, err = sw.Exec(&payload.Payload{Body: []byte("hello")}) - if err != nil { - t.Errorf("fail to execute payload: error %v", err) - } - assert.Equal(t, uint64(3), w.State().NumExecs()) -} diff --git a/pkg/transport/socket/socket_factory.go b/pkg/transport/socket/socket_factory.go deleted file mode 100755 index dc2b75cf..00000000 --- a/pkg/transport/socket/socket_factory.go +++ /dev/null @@ -1,255 +0,0 @@ -package socket - -import ( - "context" - "fmt" - "net" - "os/exec" - "sync" - "time" - - "github.com/shirou/gopsutil/process" - "github.com/spiral/errors" - "github.com/spiral/goridge/v3/pkg/relay" - "github.com/spiral/goridge/v3/pkg/socket" - "github.com/spiral/roadrunner/v2/internal" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/worker" - - "go.uber.org/multierr" - "golang.org/x/sync/errgroup" -) - -// Factory connects to external stack using socket server. -type Factory struct { - // listens for incoming connections from underlying processes - ls net.Listener - - // relay connection timeout - tout time.Duration - - // sockets which are waiting for process association - relays sync.Map -} - -// NewSocketServer returns Factory attached to a given socket listener. -// tout specifies for how long factory should serve for incoming relay connection -func NewSocketServer(ls net.Listener, tout time.Duration) *Factory { - f := &Factory{ - ls: ls, - tout: tout, - relays: sync.Map{}, - } - - // Be careful - // https://github.com/go101/go101/wiki/About-memory-ordering-guarantees-made-by-atomic-operations-in-Go - // https://github.com/golang/go/issues/5045 - go func() { - err := f.listen() - // there is no logger here, use fmt - if err != nil { - fmt.Printf("[WARN]: socket server listen, error: %v\n", err) - } - }() - - return f -} - -// blocking operation, returns an error -func (f *Factory) listen() error { - errGr := &errgroup.Group{} - errGr.Go(func() error { - for { - conn, err := f.ls.Accept() - if err != nil { - return err - } - - rl := socket.NewSocketRelay(conn) - pid, err := internal.FetchPID(rl) - if err != nil { - return err - } - - f.attachRelayToPid(pid, rl) - } - }) - - return errGr.Wait() -} - -type socketSpawn struct { - w *worker.Process - err error -} - -// SpawnWorkerWithTimeout creates Process and connects it to appropriate relay or return an error -func (f *Factory) SpawnWorkerWithTimeout(ctx context.Context, cmd *exec.Cmd, listeners ...events.Listener) (*worker.Process, error) { - const op = errors.Op("factory_spawn_worker_with_timeout") - c := make(chan socketSpawn) - go func() { - ctxT, cancel := context.WithTimeout(ctx, f.tout) - defer cancel() - w, err := worker.InitBaseWorker(cmd, worker.AddListeners(listeners...)) - if err != nil { - select { - case c <- socketSpawn{ - w: nil, - err: errors.E(op, err), - }: - return - default: - return - } - } - - err = w.Start() - if err != nil { - select { - case c <- socketSpawn{ - w: nil, - err: errors.E(op, err), - }: - return - default: - return - } - } - - rl, err := f.findRelayWithContext(ctxT, w) - if err != nil { - err = multierr.Combine( - err, - w.Kill(), - w.Wait(), - ) - - select { - // try to write result - case c <- socketSpawn{ - w: nil, - err: errors.E(op, err), - }: - return - // if no receivers - return - default: - return - } - } - - w.AttachRelay(rl) - w.State().Set(worker.StateReady) - - select { - case c <- socketSpawn{ - w: w, - err: nil, - }: - return - default: - _ = w.Kill() - return - } - }() - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case res := <-c: - if res.err != nil { - return nil, res.err - } - - return res.w, nil - } -} - -func (f *Factory) SpawnWorker(cmd *exec.Cmd, listeners ...events.Listener) (*worker.Process, error) { - const op = errors.Op("factory_spawn_worker") - w, err := worker.InitBaseWorker(cmd, worker.AddListeners(listeners...)) - if err != nil { - return nil, err - } - - err = w.Start() - if err != nil { - return nil, errors.E(op, err) - } - - rl, err := f.findRelay(w) - if err != nil { - err = multierr.Combine( - err, - w.Kill(), - w.Wait(), - ) - return nil, err - } - - w.AttachRelay(rl) - - // errors bundle - if pid, err := internal.FetchPID(rl); pid != w.Pid() { - err = multierr.Combine( - err, - w.Kill(), - w.Wait(), - ) - return nil, errors.E(op, err) - } - - w.State().Set(worker.StateReady) - - return w, nil -} - -// Close socket factory and underlying socket connection. -func (f *Factory) Close() error { - return f.ls.Close() -} - -// waits for Process to connect over socket and returns associated relay of timeout -func (f *Factory) findRelayWithContext(ctx context.Context, w worker.BaseProcess) (*socket.Relay, error) { - ticker := time.NewTicker(time.Millisecond * 10) - for { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-ticker.C: - // check for the process exists - _, err := process.NewProcess(int32(w.Pid())) - if err != nil { - return nil, err - } - default: - tmp, ok := f.relays.LoadAndDelete(w.Pid()) - if !ok { - continue - } - return tmp.(*socket.Relay), nil - } - } -} - -func (f *Factory) findRelay(w worker.BaseProcess) (*socket.Relay, error) { - const op = errors.Op("factory_find_relay") - // poll every 1ms for the relay - pollDone := time.NewTimer(f.tout) - for { - select { - case <-pollDone.C: - return nil, errors.E(op, errors.Str("relay timeout")) - default: - tmp, ok := f.relays.Load(w.Pid()) - if !ok { - continue - } - return tmp.(*socket.Relay), nil - } - } -} - -// chan to store relay associated with specific pid -func (f *Factory) attachRelayToPid(pid int64, relay relay.Relay) { - f.relays.Store(pid, relay) -} diff --git a/pkg/transport/socket/socket_factory_spawn_test.go b/pkg/transport/socket/socket_factory_spawn_test.go deleted file mode 100644 index 905a3b6b..00000000 --- a/pkg/transport/socket/socket_factory_spawn_test.go +++ /dev/null @@ -1,533 +0,0 @@ -package socket - -import ( - "net" - "os/exec" - "strings" - "sync" - "syscall" - "testing" - "time" - - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/stretchr/testify/assert" -) - -func Test_Tcp_Start2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - go func() { - assert.NoError(t, w.Wait()) - }() - - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Tcp_StartCloseFactory2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - f := NewSocketServer(ls, time.Minute) - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - - w, err := f.SpawnWorker(cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Tcp_StartError2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - err = cmd.Start() - if err != nil { - t.Errorf("error executing the command: error %v", err) - } - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Tcp_Failboot2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err3 := ls.Close() - if err3 != nil { - t.Errorf("error closing the listener: error %v", err3) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/failboot.php") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "failboot") { - finish <- struct{}{} - } - } - } - } - - w, err2 := NewSocketServer(ls, time.Second*5).SpawnWorker(cmd, listener) - assert.Nil(t, w) - assert.Error(t, err2) - <-finish -} - -func Test_Tcp_Invalid2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/invalid.php") - - w, err := NewSocketServer(ls, time.Second*1).SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Tcp_Broken2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "broken", "tcp") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "undefined_function()") { - finish <- struct{}{} - } - } - } - } - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd, listener) - if err != nil { - t.Fatal(err) - } - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - errW := w.Wait() - assert.Error(t, errW) - }() - - defer func() { - time.Sleep(time.Second) - err2 := w.Stop() - // write tcp 127.0.0.1:9007->127.0.0.1:34204: use of closed network connection - assert.Error(t, err2) - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - assert.Error(t, err) - assert.Nil(t, res) - wg.Wait() - <-finish -} - -func Test_Tcp_Echo2(t *testing.T) { - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, _ := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_Unix_Start2(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(t, err) - defer func() { - err = ls.Close() - assert.NoError(t, err) - }() - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - go func() { - assert.NoError(t, w.Wait()) - }() - - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Unix_Failboot2(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(t, err) - defer func() { - err = ls.Close() - assert.NoError(t, err) - }() - - cmd := exec.Command("php", "../../../tests/failboot.php") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "failboot") { - finish <- struct{}{} - } - } - } - } - - w, err := NewSocketServer(ls, time.Second*5).SpawnWorker(cmd, listener) - assert.Nil(t, w) - assert.Error(t, err) - <-finish -} - -func Test_Unix_Timeout2(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(t, err) - defer func() { - err = ls.Close() - assert.NoError(t, err) - }() - - cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "unix", "200", "0") - - w, err := NewSocketServer(ls, time.Millisecond*100).SpawnWorker(cmd) - assert.Nil(t, w) - assert.Error(t, err) - assert.Contains(t, err.Error(), "relay timeout") -} - -func Test_Unix_Invalid2(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(t, err) - defer func() { - err = ls.Close() - assert.NoError(t, err) - }() - - cmd := exec.Command("php", "../../../tests/invalid.php") - - w, err := NewSocketServer(ls, time.Second*10).SpawnWorker(cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Unix_Broken2(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(t, err) - defer func() { - errC := ls.Close() - assert.NoError(t, errC) - }() - - cmd := exec.Command("php", "../../../tests/client.php", "broken", "unix") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "undefined_function()") { - finish <- struct{}{} - } - } - } - } - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd, listener) - if err != nil { - t.Fatal(err) - } - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - errW := w.Wait() - assert.Error(t, errW) - }() - - defer func() { - time.Sleep(time.Second) - err = w.Stop() - assert.Error(t, err) - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Error(t, err) - assert.Nil(t, res) - wg.Wait() - <-finish -} - -func Test_Unix_Echo2(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(t, err) - defer func() { - err = ls.Close() - assert.NoError(t, err) - }() - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - if err != nil { - t.Fatal(err) - } - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Benchmark_Tcp_SpawnWorker_Stop2(b *testing.B) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(b, err) - defer func() { - err = ls.Close() - assert.NoError(b, err) - }() - - f := NewSocketServer(ls, time.Minute) - for n := 0; n < b.N; n++ { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, err := f.SpawnWorker(cmd) - if err != nil { - b.Fatal(err) - } - go func() { - assert.NoError(b, w.Wait()) - }() - - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - } -} - -func Benchmark_Tcp_Worker_ExecEcho2(b *testing.B) { - ls, err := net.Listen("unix", "sock.unix") - assert.NoError(b, err) - defer func() { - err = ls.Close() - assert.NoError(b, err) - }() - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - if err != nil { - b.Fatal(err) - } - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Benchmark_Unix_SpawnWorker_Stop2(b *testing.B) { - defer func() { - _ = syscall.Unlink("sock.unix") - }() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - errC := ls.Close() - if errC != nil { - b.Errorf("error closing the listener: error %v", err) - } - }() - } else { - b.Skip("socket is busy") - } - - f := NewSocketServer(ls, time.Minute) - for n := 0; n < b.N; n++ { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := f.SpawnWorker(cmd) - if err != nil { - b.Fatal(err) - } - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - } -} - -func Benchmark_Unix_Worker_ExecEcho2(b *testing.B) { - defer func() { - _ = syscall.Unlink("sock.unix") - }() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - errC := ls.Close() - if errC != nil { - b.Errorf("error closing the listener: error %v", err) - } - }() - } else { - b.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd) - if err != nil { - b.Fatal(err) - } - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} diff --git a/pkg/transport/socket/socket_factory_test.go b/pkg/transport/socket/socket_factory_test.go deleted file mode 100755 index 17437e2f..00000000 --- a/pkg/transport/socket/socket_factory_test.go +++ /dev/null @@ -1,622 +0,0 @@ -package socket - -import ( - "context" - "net" - "os/exec" - "strings" - "sync" - "testing" - "time" - - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/stretchr/testify/assert" -) - -func Test_Tcp_Start(t *testing.T) { - ctx := context.Background() - time.Sleep(time.Millisecond * 10) // to ensure free socket - - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - go func() { - assert.NoError(t, w.Wait()) - }() - - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Tcp_StartCloseFactory(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - f := NewSocketServer(ls, time.Minute) - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - - w, err := f.SpawnWorkerWithTimeout(ctx, cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Tcp_StartError(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes") - err = cmd.Start() - if err != nil { - t.Errorf("error executing the command: error %v", err) - } - - serv := NewSocketServer(ls, time.Minute) - time.Sleep(time.Second * 2) - w, err := serv.SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Tcp_Failboot(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx := context.Background() - - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err3 := ls.Close() - if err3 != nil { - t.Errorf("error closing the listener: error %v", err3) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/failboot.php") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "failboot") { - finish <- struct{}{} - } - } - } - } - - w, err2 := NewSocketServer(ls, time.Second*5).SpawnWorkerWithTimeout(ctx, cmd, listener) - assert.Nil(t, w) - assert.Error(t, err2) - <-finish -} - -func Test_Tcp_Timeout(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "tcp", "200", "0") - - w, err := NewSocketServer(ls, time.Millisecond*1).SpawnWorkerWithTimeout(ctx, cmd) - assert.Nil(t, w) - assert.Error(t, err) - assert.Contains(t, err.Error(), "context deadline exceeded") -} - -func Test_Tcp_Invalid(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/invalid.php") - - w, err := NewSocketServer(ls, time.Second*1).SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Tcp_Broken(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "broken", "tcp") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "undefined_function()") { - finish <- struct{}{} - } - } - } - } - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd, listener) - if err != nil { - t.Fatal(err) - } - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - errW := w.Wait() - assert.Error(t, errW) - }() - - defer func() { - time.Sleep(time.Second) - err2 := w.Stop() - // write tcp 127.0.0.1:9007->127.0.0.1:34204: use of closed network connection - assert.Error(t, err2) - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - assert.Error(t, err) - assert.Nil(t, res) - wg.Wait() - <-finish -} - -func Test_Tcp_Echo(t *testing.T) { - time.Sleep(time.Millisecond * 10) // to ensure free socket - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if assert.NoError(t, err) { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, _ := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd) - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Test_Unix_Start(t *testing.T) { - ctx := context.Background() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd) - assert.NoError(t, err) - assert.NotNil(t, w) - - go func() { - assert.NoError(t, w.Wait()) - }() - - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } -} - -func Test_Unix_Failboot(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - ctx := context.Background() - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/failboot.php") - - finish := make(chan struct{}, 10) - listener := func(event interface{}) { - if ev, ok := event.(events.WorkerEvent); ok { - if ev.Event == events.EventWorkerStderr { - if strings.Contains(string(ev.Payload.([]byte)), "failboot") { - finish <- struct{}{} - } - } - } - } - - w, err := NewSocketServer(ls, time.Second*5).SpawnWorkerWithTimeout(ctx, cmd, listener) - assert.Nil(t, w) - assert.Error(t, err) - <-finish -} - -func Test_Unix_Timeout(t *testing.T) { - ls, err := net.Listen("unix", "sock.unix") - ctx := context.Background() - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "unix", "200", "0") - - w, err := NewSocketServer(ls, time.Millisecond*100).SpawnWorkerWithTimeout(ctx, cmd) - assert.Nil(t, w) - assert.Error(t, err) - assert.Contains(t, err.Error(), "context deadline exceeded") -} - -func Test_Unix_Invalid(t *testing.T) { - ctx := context.Background() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/invalid.php") - - w, err := NewSocketServer(ls, time.Second*10).SpawnWorkerWithTimeout(ctx, cmd) - assert.Error(t, err) - assert.Nil(t, w) -} - -func Test_Unix_Broken(t *testing.T) { - ctx := context.Background() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - errC := ls.Close() - if errC != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "broken", "unix") - - block := make(chan struct{}, 10) - listener := func(event interface{}) { - if wev, ok := event.(events.WorkerEvent); ok { - if wev.Event == events.EventWorkerStderr { - e := string(wev.Payload.([]byte)) - if strings.ContainsAny(e, "undefined_function()") { - block <- struct{}{} - return - } - } - } - } - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd, listener) - if err != nil { - t.Fatal(err) - } - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - errW := w.Wait() - assert.Error(t, errW) - }() - - defer func() { - time.Sleep(time.Second) - err = w.Stop() - assert.Error(t, err) - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Error(t, err) - assert.Nil(t, res) - <-block - wg.Wait() -} - -func Test_Unix_Echo(t *testing.T) { - ctx := context.Background() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - t.Errorf("error closing the listener: error %v", err) - } - }() - } else { - t.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - t.Fatal(err) - } - go func() { - assert.NoError(t, w.Wait()) - }() - defer func() { - err = w.Stop() - if err != nil { - t.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.NoError(t, err) - assert.NotNil(t, res) - assert.NotNil(t, res.Body) - assert.Empty(t, res.Context) - - assert.Equal(t, "hello", res.String()) -} - -func Benchmark_Tcp_SpawnWorker_Stop(b *testing.B) { - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - b.Errorf("error closing the listener: error %v", err) - } - }() - } else { - b.Skip("socket is busy") - } - - f := NewSocketServer(ls, time.Minute) - for n := 0; n < b.N; n++ { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, err := f.SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - b.Fatal(err) - } - go func() { - assert.NoError(b, w.Wait()) - }() - - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - } -} - -func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) { - ctx := context.Background() - ls, err := net.Listen("tcp", "127.0.0.1:9007") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - b.Errorf("error closing the listener: error %v", err) - } - }() - } else { - b.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - b.Fatal(err) - } - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} - -func Benchmark_Unix_SpawnWorker_Stop(b *testing.B) { - ctx := context.Background() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - b.Errorf("error closing the listener: error %v", err) - } - }() - } else { - b.Skip("socket is busy") - } - - f := NewSocketServer(ls, time.Minute) - for n := 0; n < b.N; n++ { - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := f.SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - b.Fatal(err) - } - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - } -} - -func Benchmark_Unix_Worker_ExecEcho(b *testing.B) { - ctx := context.Background() - ls, err := net.Listen("unix", "sock.unix") - if err == nil { - defer func() { - err = ls.Close() - if err != nil { - b.Errorf("error closing the listener: error %v", err) - } - }() - } else { - b.Skip("socket is busy") - } - - cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix") - - w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd) - if err != nil { - b.Fatal(err) - } - defer func() { - err = w.Stop() - if err != nil { - b.Errorf("error stopping the Process: error %v", err) - } - }() - - sw := worker.From(w) - - for n := 0; n < b.N; n++ { - if _, err := sw.Exec(&payload.Payload{Body: []byte("hello")}); err != nil { - b.Fail() - } - } -} diff --git a/pkg/worker/interface.go b/pkg/worker/interface.go deleted file mode 100644 index ed8704bb..00000000 --- a/pkg/worker/interface.go +++ /dev/null @@ -1,74 +0,0 @@ -package worker - -import ( - "context" - "fmt" - "time" - - "github.com/spiral/goridge/v3/pkg/relay" - "github.com/spiral/roadrunner/v2/pkg/payload" -) - -// State represents WorkerProcess status and updated time. -type State interface { - fmt.Stringer - // Value returns StateImpl value - Value() int64 - // Set sets the StateImpl - Set(value int64) - // NumExecs shows how many times WorkerProcess was invoked - NumExecs() uint64 - // IsActive returns true if WorkerProcess not Inactive or Stopped - IsActive() bool - // RegisterExec using to registering php executions - RegisterExec() - // SetLastUsed sets worker last used time - SetLastUsed(lu uint64) - // LastUsed return worker last used time - LastUsed() uint64 -} - -type BaseProcess interface { - fmt.Stringer - - // Pid returns worker pid. - Pid() int64 - - // Created returns time worker was created at. - Created() time.Time - - // State return receive-only WorkerProcess state object, state can be used to safely access - // WorkerProcess status, time when status changed and number of WorkerProcess executions. - State() State - - // Start used to run Cmd and immediately return - Start() error - - // Wait must be called once for each WorkerProcess, call will be released once WorkerProcess is - // complete and will return process error (if any), if stderr is presented it's value - // will be wrapped as WorkerError. Method will return error code if php process fails - // to find or Start the script. - Wait() error - - // Stop sends soft termination command to the WorkerProcess and waits for process completion. - Stop() error - - // Kill kills underlying process, make sure to call Wait() func to gather - // error log from the stderr. Does not waits for process completion! - Kill() error - - // Relay returns attached to worker goridge relay - Relay() relay.Relay - - // AttachRelay used to attach goridge relay to the worker process - AttachRelay(rl relay.Relay) -} - -type SyncWorker interface { - // BaseProcess provides basic functionality for the SyncWorker - BaseProcess - // Exec used to execute payload on the SyncWorker, there is no TIMEOUTS - Exec(rqs *payload.Payload) (*payload.Payload, error) - // ExecWithTTL used to handle Exec with TTL - ExecWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) -} diff --git a/pkg/worker/state.go b/pkg/worker/state.go deleted file mode 100755 index bf152e8b..00000000 --- a/pkg/worker/state.go +++ /dev/null @@ -1,111 +0,0 @@ -package worker - -import ( - "sync/atomic" -) - -// SYNC WITH worker_watcher.GET -const ( - // StateInactive - no associated process - StateInactive int64 = iota - - // StateReady - ready for job. - StateReady - - // StateWorking - working on given payload. - StateWorking - - // StateInvalid - indicates that WorkerProcess is being disabled and will be removed. - StateInvalid - - // StateStopping - process is being softly stopped. - StateStopping - - // StateKilling - process is being forcibly stopped - StateKilling - - // StateDestroyed State of worker, when no need to allocate new one - StateDestroyed - - // StateMaxJobsReached State of worker, when it reached executions limit - StateMaxJobsReached - - // StateStopped - process has been terminated. - StateStopped - - // StateErrored - error StateImpl (can't be used). - StateErrored -) - -type StateImpl struct { - value int64 - numExecs uint64 - // to be lightweight, use UnixNano - lastUsed uint64 -} - -// NewWorkerState initializes a state for the sync.Worker -func NewWorkerState(value int64) *StateImpl { - return &StateImpl{value: value} -} - -// String returns current StateImpl as string. -func (s *StateImpl) String() string { - switch s.Value() { - case StateInactive: - return "inactive" - case StateReady: - return "ready" - case StateWorking: - return "working" - case StateInvalid: - return "invalid" - case StateStopping: - return "stopping" - case StateStopped: - return "stopped" - case StateKilling: - return "killing" - case StateErrored: - return "errored" - case StateDestroyed: - return "destroyed" - } - - return "undefined" -} - -// NumExecs returns number of registered WorkerProcess execs. -func (s *StateImpl) NumExecs() uint64 { - return atomic.LoadUint64(&s.numExecs) -} - -// Value StateImpl returns StateImpl value -func (s *StateImpl) Value() int64 { - return atomic.LoadInt64(&s.value) -} - -// IsActive returns true if WorkerProcess not Inactive or Stopped -func (s *StateImpl) IsActive() bool { - val := s.Value() - return val == StateWorking || val == StateReady -} - -// Set change StateImpl value (status) -func (s *StateImpl) Set(value int64) { - atomic.StoreInt64(&s.value, value) -} - -// RegisterExec register new execution atomically -func (s *StateImpl) RegisterExec() { - atomic.AddUint64(&s.numExecs, 1) -} - -// SetLastUsed Update last used time -func (s *StateImpl) SetLastUsed(lu uint64) { - atomic.StoreUint64(&s.lastUsed, lu) -} - -func (s *StateImpl) LastUsed() uint64 { - return atomic.LoadUint64(&s.lastUsed) -} diff --git a/pkg/worker/state_test.go b/pkg/worker/state_test.go deleted file mode 100755 index c67182d6..00000000 --- a/pkg/worker/state_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package worker - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_NewState(t *testing.T) { - st := NewWorkerState(StateErrored) - - assert.Equal(t, "errored", st.String()) - - assert.Equal(t, "inactive", NewWorkerState(StateInactive).String()) - assert.Equal(t, "ready", NewWorkerState(StateReady).String()) - assert.Equal(t, "working", NewWorkerState(StateWorking).String()) - assert.Equal(t, "stopped", NewWorkerState(StateStopped).String()) - assert.Equal(t, "undefined", NewWorkerState(1000).String()) -} - -func Test_IsActive(t *testing.T) { - assert.False(t, NewWorkerState(StateInactive).IsActive()) - assert.True(t, NewWorkerState(StateReady).IsActive()) - assert.True(t, NewWorkerState(StateWorking).IsActive()) - assert.False(t, NewWorkerState(StateStopped).IsActive()) - assert.False(t, NewWorkerState(StateErrored).IsActive()) -} diff --git a/pkg/worker/sync_worker.go b/pkg/worker/sync_worker.go deleted file mode 100755 index 74e29b71..00000000 --- a/pkg/worker/sync_worker.go +++ /dev/null @@ -1,283 +0,0 @@ -package worker - -import ( - "bytes" - "context" - "sync" - "time" - - "github.com/spiral/errors" - "github.com/spiral/goridge/v3/pkg/frame" - "github.com/spiral/goridge/v3/pkg/relay" - "github.com/spiral/roadrunner/v2/pkg/payload" - "go.uber.org/multierr" -) - -// Allocator is responsible for worker allocation in the pool -type Allocator func() (SyncWorker, error) - -type SyncWorkerImpl struct { - process *Process - fPool sync.Pool - bPool sync.Pool -} - -// From creates SyncWorker from BaseProcess -func From(process *Process) *SyncWorkerImpl { - return &SyncWorkerImpl{ - process: process, - fPool: sync.Pool{New: func() interface{} { - return frame.NewFrame() - }}, - bPool: sync.Pool{New: func() interface{} { - return new(bytes.Buffer) - }}, - } -} - -// Exec payload without TTL timeout. -func (tw *SyncWorkerImpl) Exec(p *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("sync_worker_exec") - if len(p.Body) == 0 && len(p.Context) == 0 { - return nil, errors.E(op, errors.Str("payload can not be empty")) - } - - if tw.process.State().Value() != StateReady { - return nil, errors.E(op, errors.Errorf("Process is not ready (%s)", tw.process.State().String())) - } - - // set last used time - tw.process.State().SetLastUsed(uint64(time.Now().UnixNano())) - tw.process.State().Set(StateWorking) - - rsp, err := tw.execPayload(p) - if err != nil { - // just to be more verbose - if !errors.Is(errors.SoftJob, err) { - tw.process.State().Set(StateErrored) - tw.process.State().RegisterExec() - } - return nil, errors.E(op, err) - } - - // supervisor may set state of the worker during the work - // in this case we should not re-write the worker state - if tw.process.State().Value() != StateWorking { - tw.process.State().RegisterExec() - return rsp, nil - } - - tw.process.State().Set(StateReady) - tw.process.State().RegisterExec() - - return rsp, nil -} - -type wexec struct { - payload *payload.Payload - err error -} - -// ExecWithTTL executes payload without TTL timeout. -func (tw *SyncWorkerImpl) ExecWithTTL(ctx context.Context, p *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("sync_worker_exec_worker_with_timeout") - c := make(chan wexec, 1) - - go func() { - if len(p.Body) == 0 && len(p.Context) == 0 { - c <- wexec{ - err: errors.E(op, errors.Str("payload can not be empty")), - } - return - } - - if tw.process.State().Value() != StateReady { - c <- wexec{ - err: errors.E(op, errors.Errorf("Process is not ready (%s)", tw.process.State().String())), - } - return - } - - // set last used time - tw.process.State().SetLastUsed(uint64(time.Now().UnixNano())) - tw.process.State().Set(StateWorking) - - rsp, err := tw.execPayload(p) - if err != nil { - // just to be more verbose - if errors.Is(errors.SoftJob, err) == false { //nolint:gosimple - tw.process.State().Set(StateErrored) - tw.process.State().RegisterExec() - } - c <- wexec{ - err: errors.E(op, err), - } - return - } - - if tw.process.State().Value() != StateWorking { - tw.process.State().RegisterExec() - c <- wexec{ - payload: rsp, - err: nil, - } - return - } - - tw.process.State().Set(StateReady) - tw.process.State().RegisterExec() - - c <- wexec{ - payload: rsp, - err: nil, - } - }() - - select { - // exec TTL reached - case <-ctx.Done(): - err := multierr.Combine(tw.Kill()) - if err != nil { - // append timeout error - err = multierr.Append(err, errors.E(op, errors.ExecTTL)) - return nil, multierr.Append(err, ctx.Err()) - } - return nil, errors.E(op, errors.ExecTTL, ctx.Err()) - case res := <-c: - if res.err != nil { - return nil, res.err - } - return res.payload, nil - } -} - -func (tw *SyncWorkerImpl) execPayload(p *payload.Payload) (*payload.Payload, error) { - const op = errors.Op("sync_worker_exec_payload") - - // get a frame - fr := tw.getFrame() - defer tw.putFrame(fr) - - // can be 0 here - fr.WriteVersion(fr.Header(), frame.VERSION_1) - - // obtain a buffer - buf := tw.get() - - buf.Write(p.Context) - buf.Write(p.Body) - - // Context offset - fr.WriteOptions(fr.HeaderPtr(), uint32(len(p.Context))) - fr.WritePayloadLen(fr.Header(), uint32(buf.Len())) - fr.WritePayload(buf.Bytes()) - - fr.WriteCRC(fr.Header()) - - // return buffer - tw.put(buf) - - err := tw.Relay().Send(fr) - if err != nil { - return nil, errors.E(op, errors.Network, err) - } - - frameR := tw.getFrame() - defer tw.putFrame(frameR) - - err = tw.process.Relay().Receive(frameR) - if err != nil { - return nil, errors.E(op, errors.Network, err) - } - if frameR == nil { - return nil, errors.E(op, errors.Network, errors.Str("nil fr received")) - } - - if !frameR.VerifyCRC(frameR.Header()) { - return nil, errors.E(op, errors.Network, errors.Str("failed to verify CRC")) - } - - flags := frameR.ReadFlags() - - if flags&frame.ERROR != byte(0) { - return nil, errors.E(op, errors.SoftJob, errors.Str(string(frameR.Payload()))) - } - - options := frameR.ReadOptions(frameR.Header()) - if len(options) != 1 { - return nil, errors.E(op, errors.Decode, errors.Str("options length should be equal 1 (body offset)")) - } - - pld := &payload.Payload{ - Body: make([]byte, len(frameR.Payload()[options[0]:])), - Context: make([]byte, len(frameR.Payload()[:options[0]])), - } - - // by copying we free frame's payload slice - // we do not hold the pointer from the smaller slice to the initial (which should be in the sync.Pool) - // https://blog.golang.org/slices-intro#TOC_6. - copy(pld.Body, frameR.Payload()[options[0]:]) - copy(pld.Context, frameR.Payload()[:options[0]]) - - return pld, nil -} - -func (tw *SyncWorkerImpl) String() string { - return tw.process.String() -} - -func (tw *SyncWorkerImpl) Pid() int64 { - return tw.process.Pid() -} - -func (tw *SyncWorkerImpl) Created() time.Time { - return tw.process.Created() -} - -func (tw *SyncWorkerImpl) State() State { - return tw.process.State() -} - -func (tw *SyncWorkerImpl) Start() error { - return tw.process.Start() -} - -func (tw *SyncWorkerImpl) Wait() error { - return tw.process.Wait() -} - -func (tw *SyncWorkerImpl) Stop() error { - return tw.process.Stop() -} - -func (tw *SyncWorkerImpl) Kill() error { - return tw.process.Kill() -} - -func (tw *SyncWorkerImpl) Relay() relay.Relay { - return tw.process.Relay() -} - -func (tw *SyncWorkerImpl) AttachRelay(rl relay.Relay) { - tw.process.AttachRelay(rl) -} - -// Private - -func (tw *SyncWorkerImpl) get() *bytes.Buffer { - return tw.bPool.Get().(*bytes.Buffer) -} - -func (tw *SyncWorkerImpl) put(b *bytes.Buffer) { - b.Reset() - tw.bPool.Put(b) -} - -func (tw *SyncWorkerImpl) getFrame() *frame.Frame { - return tw.fPool.Get().(*frame.Frame) -} - -func (tw *SyncWorkerImpl) putFrame(f *frame.Frame) { - f.Reset() - tw.fPool.Put(f) -} diff --git a/pkg/worker/sync_worker_test.go b/pkg/worker/sync_worker_test.go deleted file mode 100755 index 64580f9f..00000000 --- a/pkg/worker/sync_worker_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package worker - -import ( - "os/exec" - "testing" - - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/stretchr/testify/assert" -) - -func Test_NotStarted_String(t *testing.T) { - cmd := exec.Command("php", "tests/client.php", "echo", "pipes") - - w, _ := InitBaseWorker(cmd) - assert.Contains(t, w.String(), "php tests/client.php echo pipes") - assert.Contains(t, w.String(), "inactive") - assert.Contains(t, w.String(), "numExecs: 0") -} - -func Test_NotStarted_Exec(t *testing.T) { - cmd := exec.Command("php", "tests/client.php", "echo", "pipes") - - w, _ := InitBaseWorker(cmd) - - sw := From(w) - - res, err := sw.Exec(&payload.Payload{Body: []byte("hello")}) - - assert.Error(t, err) - assert.Nil(t, res) - - assert.Contains(t, err.Error(), "Process is not ready (inactive)") -} diff --git a/pkg/worker/worker.go b/pkg/worker/worker.go deleted file mode 100755 index fa74e7b5..00000000 --- a/pkg/worker/worker.go +++ /dev/null @@ -1,220 +0,0 @@ -package worker - -import ( - "fmt" - "os" - "os/exec" - "strconv" - "strings" - "time" - - "github.com/spiral/errors" - "github.com/spiral/goridge/v3/pkg/relay" - "github.com/spiral/roadrunner/v2/internal" - "github.com/spiral/roadrunner/v2/pkg/events" - "go.uber.org/multierr" -) - -type Options func(p *Process) - -// Process - supervised process with api over goridge.Relay. -type Process struct { - // created indicates at what time Process has been created. - created time.Time - - // updates parent supervisor or pool about Process events - events events.Handler - - // state holds information about current Process state, - // number of Process executions, buf status change time. - // publicly this object is receive-only and protected using Mutex - // and atomic counter. - state *StateImpl - - // underlying command with associated process, command must be - // provided to Process from outside in non-started form. CmdSource - // stdErr direction will be handled by Process to aggregate error message. - cmd *exec.Cmd - - // pid of the process, points to pid of underlying process and - // can be nil while process is not started. - pid int - - // communication bus with underlying process. - relay relay.Relay -} - -// InitBaseWorker creates new Process over given exec.cmd. -func InitBaseWorker(cmd *exec.Cmd, options ...Options) (*Process, error) { - if cmd.Process != nil { - return nil, fmt.Errorf("can't attach to running process") - } - w := &Process{ - created: time.Now(), - events: events.NewEventsHandler(), - cmd: cmd, - state: NewWorkerState(StateInactive), - } - - // set self as stderr implementation (Writer interface) - w.cmd.Stderr = w - - // add options - for i := 0; i < len(options); i++ { - options[i](w) - } - - return w, nil -} - -func AddListeners(listeners ...events.Listener) Options { - return func(p *Process) { - for i := 0; i < len(listeners); i++ { - p.addListener(listeners[i]) - } - } -} - -// Pid returns worker pid. -func (w *Process) Pid() int64 { - return int64(w.pid) -} - -// Created returns time worker was created at. -func (w *Process) Created() time.Time { - return w.created -} - -// AddListener registers new worker event listener. -func (w *Process) addListener(listener events.Listener) { - w.events.AddListener(listener) -} - -// State return receive-only Process state object, state can be used to safely access -// Process status, time when status changed and number of Process executions. -func (w *Process) State() State { - return w.state -} - -// AttachRelay attaches relay to the worker -func (w *Process) AttachRelay(rl relay.Relay) { - w.relay = rl -} - -// Relay returns relay attached to the worker -func (w *Process) Relay() relay.Relay { - return w.relay -} - -// String returns Process description. fmt.Stringer interface -func (w *Process) String() string { - st := w.state.String() - // we can safely compare pid to 0 - if w.pid != 0 { - st = st + ", pid:" + strconv.Itoa(w.pid) - } - - return fmt.Sprintf( - "(`%s` [%s], numExecs: %v)", - strings.Join(w.cmd.Args, " "), - st, - w.state.NumExecs(), - ) -} - -func (w *Process) Start() error { - err := w.cmd.Start() - if err != nil { - return err - } - w.pid = w.cmd.Process.Pid - return nil -} - -// Wait must be called once for each Process, call will be released once Process is -// complete and will return process error (if any), if stderr is presented it's value -// will be wrapped as WorkerError. Method will return error code if php process fails -// to find or Start the script. -func (w *Process) Wait() error { - const op = errors.Op("process_wait") - var err error - err = w.cmd.Wait() - - // If worker was destroyed, just exit - if w.State().Value() == StateDestroyed { - return nil - } - - // If state is different, and err is not nil, append it to the errors - if err != nil { - w.State().Set(StateErrored) - err = multierr.Combine(err, errors.E(op, err)) - } - - // closeRelay - // at this point according to the documentation (see cmd.Wait comment) - // if worker finishes with an error, message will be written to the stderr first - // and then process.cmd.Wait return an error - err2 := w.closeRelay() - if err2 != nil { - w.State().Set(StateErrored) - return multierr.Append(err, errors.E(op, err2)) - } - - if w.cmd.ProcessState.Success() { - w.State().Set(StateStopped) - return nil - } - - return err -} - -func (w *Process) closeRelay() error { - if w.relay != nil { - err := w.relay.Close() - if err != nil { - return err - } - } - return nil -} - -// Stop sends soft termination command to the Process and waits for process completion. -func (w *Process) Stop() error { - const op = errors.Op("process_stop") - w.state.Set(StateStopping) - err := internal.SendControl(w.relay, &internal.StopCommand{Stop: true}) - if err != nil { - w.state.Set(StateKilling) - _ = w.cmd.Process.Signal(os.Kill) - return errors.E(op, errors.Network, err) - } - w.state.Set(StateStopped) - return nil -} - -// Kill kills underlying process, make sure to call Wait() func to gather -// error log from the stderr. Does not wait for process completion! -func (w *Process) Kill() error { - if w.State().Value() == StateDestroyed { - err := w.cmd.Process.Signal(os.Kill) - if err != nil { - return err - } - return nil - } - - w.state.Set(StateKilling) - err := w.cmd.Process.Signal(os.Kill) - if err != nil { - return err - } - w.state.Set(StateStopped) - return nil -} - -// Worker stderr -func (w *Process) Write(p []byte) (n int, err error) { - w.events.Push(events.WorkerEvent{Event: events.EventWorkerStderr, Worker: w, Payload: p}) - return len(p), nil -} diff --git a/pkg/worker/worker_test.go b/pkg/worker/worker_test.go deleted file mode 100755 index 805f66b5..00000000 --- a/pkg/worker/worker_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package worker - -import ( - "os/exec" - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_OnStarted(t *testing.T) { - cmd := exec.Command("php", "tests/client.php", "broken", "pipes") - assert.Nil(t, cmd.Start()) - - w, err := InitBaseWorker(cmd) - assert.Nil(t, w) - assert.NotNil(t, err) - - assert.Equal(t, "can't attach to running process", err.Error()) -} diff --git a/pkg/worker_handler/constants.go b/pkg/worker_handler/constants.go deleted file mode 100644 index 3355d9c2..00000000 --- a/pkg/worker_handler/constants.go +++ /dev/null @@ -1,8 +0,0 @@ -package handler - -import "net/http" - -var http2pushHeaderKey = http.CanonicalHeaderKey("http2-push") - -// TrailerHeaderKey http header key -var TrailerHeaderKey = http.CanonicalHeaderKey("trailer") diff --git a/pkg/worker_handler/errors.go b/pkg/worker_handler/errors.go deleted file mode 100644 index c3352a52..00000000 --- a/pkg/worker_handler/errors.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows -// +build !windows - -package handler - -import ( - "errors" - "net" - "os" - "syscall" -) - -// Broken pipe -var errEPIPE = errors.New("EPIPE(32) -> connection reset by peer") - -// handleWriteError just check if error was caused by aborted connection on linux -func handleWriteError(err error) error { - if netErr, ok2 := err.(*net.OpError); ok2 { - if syscallErr, ok3 := netErr.Err.(*os.SyscallError); ok3 { - if errors.Is(syscallErr.Err, syscall.EPIPE) { - return errEPIPE - } - } - } - return err -} diff --git a/pkg/worker_handler/errors_windows.go b/pkg/worker_handler/errors_windows.go deleted file mode 100644 index 3c6c2186..00000000 --- a/pkg/worker_handler/errors_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows -// +build windows - -package handler - -import ( - "errors" - "net" - "os" - "syscall" -) - -//Software caused connection abort. -//An established connection was aborted by the software in your host computer, -//possibly due to a data transmission time-out or protocol error. -var errEPIPE = errors.New("WSAECONNABORTED (10053) -> an established connection was aborted by peer") - -// handleWriteError just check if error was caused by aborted connection on windows -func handleWriteError(err error) error { - if netErr, ok2 := err.(*net.OpError); ok2 { - if syscallErr, ok3 := netErr.Err.(*os.SyscallError); ok3 { - if syscallErr.Err == syscall.WSAECONNABORTED { - return errEPIPE - } - } - } - return err -} diff --git a/pkg/worker_handler/handler.go b/pkg/worker_handler/handler.go deleted file mode 100644 index fc03563b..00000000 --- a/pkg/worker_handler/handler.go +++ /dev/null @@ -1,246 +0,0 @@ -package handler - -import ( - "net" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/pool" - "github.com/spiral/roadrunner/v2/plugins/http/config" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -// MB is 1024 bytes -const MB uint64 = 1024 * 1024 - -// ErrorEvent represents singular http error event. -type ErrorEvent struct { - // Request contains client request, must not be stored. - Request *http.Request - - // Error - associated error, if any. - Error error - - // event timings - start time.Time - elapsed time.Duration -} - -// Elapsed returns duration of the invocation. -func (e *ErrorEvent) Elapsed() time.Duration { - return e.elapsed -} - -// ResponseEvent represents singular http response event. -type ResponseEvent struct { - // Request contains client request, must not be stored. - Request *Request - - // Response contains service response. - Response *Response - - // event timings - start time.Time - elapsed time.Duration -} - -// Elapsed returns duration of the invocation. -func (e *ResponseEvent) Elapsed() time.Duration { - return e.elapsed -} - -// Handler serves http connections to underlying PHP application using PSR-7 protocol. Context will include request headers, -// parsed files and query, payload will include parsed form dataTree (if any). -type Handler struct { - maxRequestSize uint64 - uploads config.Uploads - trusted config.Cidrs - log logger.Logger - pool pool.Pool - mul sync.Mutex - lsn []events.Listener - internalHTTPCode uint64 -} - -// NewHandler return handle interface implementation -func NewHandler(maxReqSize uint64, internalHTTPCode uint64, uploads config.Uploads, trusted config.Cidrs, pool pool.Pool) (*Handler, error) { - if pool == nil { - return nil, errors.E(errors.Str("pool should be initialized")) - } - return &Handler{ - maxRequestSize: maxReqSize * MB, - uploads: uploads, - pool: pool, - trusted: trusted, - internalHTTPCode: internalHTTPCode, - }, nil -} - -// AddListener attaches handler event controller. -func (h *Handler) AddListener(l ...events.Listener) { - h.mul.Lock() - defer h.mul.Unlock() - - h.lsn = l -} - -// mdwr serve using PSR-7 requests passed to underlying application. Attempts to serve static files first if enabled. -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - const op = errors.Op("serve_http") - start := time.Now() - - // validating request size - if h.maxRequestSize != 0 { - const op = errors.Op("http_handler_max_size") - if length := r.Header.Get("content-length"); length != "" { - // try to parse the value from the `content-length` header - size, err := strconv.ParseInt(length, 10, 64) - if err != nil { - // if got an error while parsing -> assign 500 code to the writer and return - http.Error(w, "", 500) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, errors.Str("error while parsing value from the `content-length` header")), start: start, elapsed: time.Since(start)}) - return - } - - if size > int64(h.maxRequestSize) { - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, errors.Str("request body max size is exceeded")), start: start, elapsed: time.Since(start)}) - http.Error(w, errors.E(op, errors.Str("request body max size is exceeded")).Error(), http.StatusBadRequest) - return - } - } - } - - req, err := NewRequest(r, h.uploads) - if err != nil { - // if pipe is broken, there is no sense to write the header - // in this case we just report about error - if err == errEPIPE { - h.sendEvent(ErrorEvent{Request: r, Error: err, start: start, elapsed: time.Since(start)}) - return - } - - http.Error(w, errors.E(op, err).Error(), 500) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)}) - return - } - - // proxy IP resolution - h.resolveIP(req) - - req.Open(h.log) - defer req.Close(h.log) - - p, err := req.Payload() - if err != nil { - h.handleError(w, r, start, err) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)}) - return - } - - rsp, err := h.pool.Exec(p) - if err != nil { - h.handleError(w, r, start, err) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)}) - return - } - - resp, err := NewResponse(rsp) - if err != nil { - h.handleError(w, r, start, err) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)}) - return - } - - h.handleResponse(req, resp, start) - err = resp.Write(w) - if err != nil { - http.Error(w, errors.E(op, err).Error(), 500) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)}) - } -} - -// handleError will handle internal RR errors and return 500 -func (h *Handler) handleError(w http.ResponseWriter, r *http.Request, start time.Time, err error) { - const op = errors.Op("handle_error") - // internal error types, user should not see them - if errors.Is(errors.SoftJob, err) || - errors.Is(errors.WatcherStopped, err) || - errors.Is(errors.WorkerAllocate, err) || - errors.Is(errors.NoFreeWorkers, err) || - errors.Is(errors.ExecTTL, err) || - errors.Is(errors.IdleTTL, err) || - errors.Is(errors.TTL, err) || - errors.Is(errors.Encode, err) || - errors.Is(errors.Decode, err) || - errors.Is(errors.Network, err) { - // write an internal server error - w.WriteHeader(int(h.internalHTTPCode)) - h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)}) - } -} - -// handleResponse triggers response event. -func (h *Handler) handleResponse(req *Request, resp *Response, start time.Time) { - h.sendEvent(ResponseEvent{Request: req, Response: resp, start: start, elapsed: time.Since(start)}) -} - -// sendEvent invokes event handler if any. -func (h *Handler) sendEvent(event interface{}) { - if h.lsn != nil { - for i := range h.lsn { - // do not block the pipeline - // TODO not a good approach, redesign event bus - i := i - go func() { - h.lsn[i](event) - }() - } - } -} - -// get real ip passing multiple proxy -func (h *Handler) resolveIP(r *Request) { - if h.trusted.IsTrusted(r.RemoteAddr) == false { //nolint:gosimple - return - } - - if r.Header.Get("X-Forwarded-For") != "" { - ips := strings.Split(r.Header.Get("X-Forwarded-For"), ",") - ipCount := len(ips) - - for i := ipCount - 1; i >= 0; i-- { - addr := strings.TrimSpace(ips[i]) - if net.ParseIP(addr) != nil { - r.RemoteAddr = addr - return - } - } - - return - } - - // The logic here is the following: - // In general case, we only expect X-Real-Ip header. If it exist, we get the IP address from header and set request Remote address - // But, if there is no X-Real-Ip header, we also trying to check CloudFlare headers - // True-Client-IP is a general CF header in which copied information from X-Real-Ip in CF. - // CF-Connecting-IP is an Enterprise feature and we check it last in order. - // This operations are near O(1) because Headers struct are the map type -> type MIMEHeader map[string][]string - if r.Header.Get("X-Real-Ip") != "" { - r.RemoteAddr = FetchIP(r.Header.Get("X-Real-Ip")) - return - } - - if r.Header.Get("True-Client-IP") != "" { - r.RemoteAddr = FetchIP(r.Header.Get("True-Client-IP")) - return - } - - if r.Header.Get("CF-Connecting-IP") != "" { - r.RemoteAddr = FetchIP(r.Header.Get("CF-Connecting-IP")) - } -} diff --git a/pkg/worker_handler/parse.go b/pkg/worker_handler/parse.go deleted file mode 100644 index 2790da2a..00000000 --- a/pkg/worker_handler/parse.go +++ /dev/null @@ -1,149 +0,0 @@ -package handler - -import ( - "net/http" - - "github.com/spiral/roadrunner/v2/plugins/http/config" -) - -// MaxLevel defines maximum tree depth for incoming request data and files. -const MaxLevel = 127 - -type dataTree map[string]interface{} -type fileTree map[string]interface{} - -// parseData parses incoming request body into data tree. -func parseData(r *http.Request) dataTree { - data := make(dataTree) - if r.PostForm != nil { - for k, v := range r.PostForm { - data.push(k, v) - } - } - - if r.MultipartForm != nil { - for k, v := range r.MultipartForm.Value { - data.push(k, v) - } - } - - return data -} - -// pushes value into data tree. -func (d dataTree) push(k string, v []string) { - keys := FetchIndexes(k) - if len(keys) <= MaxLevel { - d.mount(keys, v) - } -} - -// mount mounts data tree recursively. -func (d dataTree) mount(i []string, v []string) { - if len(i) == 1 { - // single value context (last element) - d[i[0]] = v[len(v)-1] - return - } - - if len(i) == 2 && i[1] == "" { - // non associated array of elements - d[i[0]] = v - return - } - - if p, ok := d[i[0]]; ok { - p.(dataTree).mount(i[1:], v) - return - } - - d[i[0]] = make(dataTree) - d[i[0]].(dataTree).mount(i[1:], v) -} - -// parse incoming dataTree request into JSON (including contentMultipart form dataTree) -func parseUploads(r *http.Request, cfg config.Uploads) *Uploads { - u := &Uploads{ - cfg: cfg, - tree: make(fileTree), - list: make([]*FileUpload, 0), - } - - for k, v := range r.MultipartForm.File { - files := make([]*FileUpload, 0, len(v)) - for _, f := range v { - files = append(files, NewUpload(f)) - } - - u.list = append(u.list, files...) - u.tree.push(k, files) - } - - return u -} - -// pushes new file upload into it's proper place. -func (d fileTree) push(k string, v []*FileUpload) { - keys := FetchIndexes(k) - if len(keys) <= MaxLevel { - d.mount(keys, v) - } -} - -// mount mounts data tree recursively. -func (d fileTree) mount(i []string, v []*FileUpload) { - if len(i) == 1 { - // single value context - d[i[0]] = v[0] - return - } - - if len(i) == 2 && i[1] == "" { - // non associated array of elements - d[i[0]] = v - return - } - - if p, ok := d[i[0]]; ok { - p.(fileTree).mount(i[1:], v) - return - } - - d[i[0]] = make(fileTree) - d[i[0]].(fileTree).mount(i[1:], v) -} - -// FetchIndexes parses input name and splits it into separate indexes list. -func FetchIndexes(s string) []string { - var ( - pos int - ch string - keys = make([]string, 1) - ) - - for _, c := range s { - ch = string(c) - switch ch { - case " ": - // ignore all spaces - continue - case "[": - pos = 1 - continue - case "]": - if pos == 1 { - keys = append(keys, "") - } - pos = 2 - default: - if pos == 1 || pos == 2 { - keys = append(keys, "") - } - - keys[len(keys)-1] += ch - pos = 0 - } - } - - return keys -} diff --git a/pkg/worker_handler/request.go b/pkg/worker_handler/request.go deleted file mode 100644 index 3d60897b..00000000 --- a/pkg/worker_handler/request.go +++ /dev/null @@ -1,189 +0,0 @@ -package handler - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - - j "github.com/json-iterator/go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/payload" - "github.com/spiral/roadrunner/v2/plugins/http/attributes" - "github.com/spiral/roadrunner/v2/plugins/http/config" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -var json = j.ConfigCompatibleWithStandardLibrary - -const ( - defaultMaxMemory = 32 << 20 // 32 MB - contentNone = iota + 900 - contentStream - contentMultipart - contentFormData -) - -// Request maps net/http requests to PSR7 compatible structure and managed state of temporary uploaded files. -type Request struct { - // RemoteAddr contains ip address of client, make sure to check X-Real-Ip and X-Forwarded-For for real client address. - RemoteAddr string `json:"remoteAddr"` - - // Protocol includes HTTP protocol version. - Protocol string `json:"protocol"` - - // Method contains name of HTTP method used for the request. - Method string `json:"method"` - - // URI contains full request URI with scheme and query. - URI string `json:"uri"` - - // Header contains list of request headers. - Header http.Header `json:"headers"` - - // Cookies contains list of request cookies. - Cookies map[string]string `json:"cookies"` - - // RawQuery contains non parsed query string (to be parsed on php end). - RawQuery string `json:"rawQuery"` - - // Parsed indicates that request body has been parsed on RR end. - Parsed bool `json:"parsed"` - - // Uploads contains list of uploaded files, their names, sized and associations with temporary files. - Uploads *Uploads `json:"uploads"` - - // Attributes can be set by chained mdwr to safely pass value from Golang to PHP. See: GetAttribute, SetAttribute functions. - Attributes map[string]interface{} `json:"attributes"` - - // request body can be parsedData or []byte - body interface{} -} - -func FetchIP(pair string) string { - if !strings.ContainsRune(pair, ':') { - return pair - } - - addr, _, _ := net.SplitHostPort(pair) - return addr -} - -// NewRequest creates new PSR7 compatible request using net/http request. -func NewRequest(r *http.Request, cfg config.Uploads) (*Request, error) { - req := &Request{ - RemoteAddr: FetchIP(r.RemoteAddr), - Protocol: r.Proto, - Method: r.Method, - URI: URI(r), - Header: r.Header, - Cookies: make(map[string]string), - RawQuery: r.URL.RawQuery, - Attributes: attributes.All(r), - } - - for _, c := range r.Cookies() { - if v, err := url.QueryUnescape(c.Value); err == nil { - req.Cookies[c.Name] = v - } - } - - switch req.contentType() { - case contentNone: - return req, nil - - case contentStream: - var err error - req.body, err = ioutil.ReadAll(r.Body) - return req, err - - case contentMultipart: - if err := r.ParseMultipartForm(defaultMaxMemory); err != nil { - return nil, err - } - - req.Uploads = parseUploads(r, cfg) - fallthrough - case contentFormData: - if err := r.ParseForm(); err != nil { - return nil, err - } - - req.body = parseData(r) - } - - req.Parsed = true - return req, nil -} - -// Open moves all uploaded files to temporary directory so it can be given to php later. -func (r *Request) Open(log logger.Logger) { - if r.Uploads == nil { - return - } - - r.Uploads.Open(log) -} - -// Close clears all temp file uploads -func (r *Request) Close(log logger.Logger) { - if r.Uploads == nil { - return - } - - r.Uploads.Clear(log) -} - -// Payload request marshaled RoadRunner payload based on PSR7 data. values encode method is JSON. Make sure to open -// files prior to calling this method. -func (r *Request) Payload() (*payload.Payload, error) { - const op = errors.Op("marshal_payload") - p := &payload.Payload{} - - var err error - if p.Context, err = json.Marshal(r); err != nil { - return nil, errors.E(op, errors.Encode, err) - } - - if r.Parsed { - if p.Body, err = json.Marshal(r.body); err != nil { - return nil, errors.E(op, errors.Encode, err) - } - } else if r.body != nil { - p.Body = r.body.([]byte) - } - - return p, nil -} - -// contentType returns the payload content type. -func (r *Request) contentType() int { - if r.Method == "HEAD" || r.Method == "OPTIONS" { - return contentNone - } - - ct := r.Header.Get("content-type") - if strings.Contains(ct, "application/x-www-form-urlencoded") { - return contentFormData - } - - if strings.Contains(ct, "multipart/form-data") { - return contentMultipart - } - - return contentStream -} - -// URI fetches full uri from request in a form of string (including https scheme if TLS connection is enabled). -func URI(r *http.Request) string { - if r.URL.Host != "" { - return r.URL.String() - } - if r.TLS != nil { - return fmt.Sprintf("https://%s%s", r.Host, r.URL.String()) - } - - return fmt.Sprintf("http://%s%s", r.Host, r.URL.String()) -} diff --git a/pkg/worker_handler/response.go b/pkg/worker_handler/response.go deleted file mode 100644 index d22f09d4..00000000 --- a/pkg/worker_handler/response.go +++ /dev/null @@ -1,105 +0,0 @@ -package handler - -import ( - "io" - "net/http" - "strings" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/payload" -) - -// Response handles PSR7 response logic. -type Response struct { - // Status contains response status. - Status int `json:"status"` - - // Header contains list of response headers. - Headers map[string][]string `json:"headers"` - - // associated Body payload. - Body interface{} -} - -// NewResponse creates new response based on given pool payload. -func NewResponse(p *payload.Payload) (*Response, error) { - const op = errors.Op("http_response") - r := &Response{Body: p.Body} - if err := json.Unmarshal(p.Context, r); err != nil { - return nil, errors.E(op, errors.Decode, err) - } - - return r, nil -} - -// Write writes response headers, status and body into ResponseWriter. -func (r *Response) Write(w http.ResponseWriter) error { - // INFO map is the reference type in golang - p := handlePushHeaders(r.Headers) - if pusher, ok := w.(http.Pusher); ok { - for _, v := range p { - err := pusher.Push(v, nil) - if err != nil { - return err - } - } - } - - handleTrailers(r.Headers) - for n, h := range r.Headers { - for _, v := range h { - w.Header().Add(n, v) - } - } - - w.WriteHeader(r.Status) - - if data, ok := r.Body.([]byte); ok { - _, err := w.Write(data) - if err != nil { - return handleWriteError(err) - } - } - - if rc, ok := r.Body.(io.Reader); ok { - if _, err := io.Copy(w, rc); err != nil { - return err - } - } - - return nil -} - -func handlePushHeaders(h map[string][]string) []string { - var p []string - pushHeader, ok := h[http2pushHeaderKey] - if !ok { - return p - } - - p = append(p, pushHeader...) - - delete(h, http2pushHeaderKey) - - return p -} - -func handleTrailers(h map[string][]string) { - trailers, ok := h[TrailerHeaderKey] - if !ok { - return - } - - for _, tr := range trailers { - for _, n := range strings.Split(tr, ",") { - n = strings.Trim(n, "\t ") - if v, ok := h[n]; ok { - h["Trailer:"+n] = v - - delete(h, n) - } - } - } - - delete(h, TrailerHeaderKey) -} diff --git a/pkg/worker_handler/uploads.go b/pkg/worker_handler/uploads.go deleted file mode 100644 index e695000e..00000000 --- a/pkg/worker_handler/uploads.go +++ /dev/null @@ -1,159 +0,0 @@ -package handler - -import ( - "github.com/spiral/roadrunner/v2/plugins/http/config" - "github.com/spiral/roadrunner/v2/plugins/logger" - - "io" - "io/ioutil" - "mime/multipart" - "os" - "sync" -) - -const ( - // UploadErrorOK - no error, the file uploaded with success. - UploadErrorOK = 0 - - // UploadErrorNoFile - no file was uploaded. - UploadErrorNoFile = 4 - - // UploadErrorNoTmpDir - missing a temporary folder. - UploadErrorNoTmpDir = 6 - - // UploadErrorCantWrite - failed to write file to disk. - UploadErrorCantWrite = 7 - - // UploadErrorExtension - forbidden file extension. - UploadErrorExtension = 8 -) - -// Uploads tree manages uploaded files tree and temporary files. -type Uploads struct { - // associated temp directory and forbidden extensions. - cfg config.Uploads - - // pre processed data tree for Uploads. - tree fileTree - - // flat list of all file Uploads. - list []*FileUpload -} - -// MarshalJSON marshal tree tree into JSON. -func (u *Uploads) MarshalJSON() ([]byte, error) { - return json.Marshal(u.tree) -} - -// Open moves all uploaded files to temp directory, return error in case of issue with temp directory. File errors -// will be handled individually. -func (u *Uploads) Open(log logger.Logger) { - var wg sync.WaitGroup - for _, f := range u.list { - wg.Add(1) - go func(f *FileUpload) { - defer wg.Done() - err := f.Open(u.cfg) - if err != nil && log != nil { - log.Error("error opening the file", "err", err) - } - }(f) - } - - wg.Wait() -} - -// Clear deletes all temporary files. -func (u *Uploads) Clear(log logger.Logger) { - for _, f := range u.list { - if f.TempFilename != "" && exists(f.TempFilename) { - err := os.Remove(f.TempFilename) - if err != nil && log != nil { - log.Error("error removing the file", "err", err) - } - } - } -} - -// FileUpload represents singular file NewUpload. -type FileUpload struct { - // ID contains filename specified by the client. - Name string `json:"name"` - - // Mime contains mime-type provided by the client. - Mime string `json:"mime"` - - // Size of the uploaded file. - Size int64 `json:"size"` - - // Error indicates file upload error (if any). See http://php.net/manual/en/features.file-upload.errors.php - Error int `json:"error"` - - // TempFilename points to temporary file location. - TempFilename string `json:"tmpName"` - - // associated file header - header *multipart.FileHeader -} - -// NewUpload wraps net/http upload into PRS-7 compatible structure. -func NewUpload(f *multipart.FileHeader) *FileUpload { - return &FileUpload{ - Name: f.Filename, - Mime: f.Header.Get("Content-Type"), - Error: UploadErrorOK, - header: f, - } -} - -// Open moves file content into temporary file available for PHP. -// NOTE: -// There is 2 deferred functions, and in case of getting 2 errors from both functions -// error from close of temp file would be overwritten by error from the main file -// STACK -// DEFER FILE CLOSE (2) -// DEFER TMP CLOSE (1) -func (f *FileUpload) Open(cfg config.Uploads) (err error) { - if cfg.Forbids(f.Name) { - f.Error = UploadErrorExtension - return nil - } - - file, err := f.header.Open() - if err != nil { - f.Error = UploadErrorNoFile - return err - } - - defer func() { - // close the main file - err = file.Close() - }() - - tmp, err := ioutil.TempFile(cfg.TmpDir(), "upload") - if err != nil { - // most likely cause of this issue is missing tmp dir - f.Error = UploadErrorNoTmpDir - return err - } - - f.TempFilename = tmp.Name() - defer func() { - // close the temp file - err = tmp.Close() - }() - - if f.Size, err = io.Copy(tmp, file); err != nil { - f.Error = UploadErrorCantWrite - } - - return err -} - -// exists if file exists. -func exists(path string) bool { - if _, err := os.Stat(path); os.IsNotExist(err) { - return false - } - return true -} diff --git a/pkg/worker_watcher/container/channel/vec.go b/pkg/worker_watcher/container/channel/vec.go deleted file mode 100644 index 5605f1e0..00000000 --- a/pkg/worker_watcher/container/channel/vec.go +++ /dev/null @@ -1,107 +0,0 @@ -package channel - -import ( - "context" - "sync" - "sync/atomic" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -type Vec struct { - sync.RWMutex - // destroy signal - destroy uint64 - // channel with the workers - workers chan worker.BaseProcess -} - -func NewVector(len uint64) *Vec { - vec := &Vec{ - destroy: 0, - workers: make(chan worker.BaseProcess, len), - } - - return vec -} - -// Push is O(1) operation -// In case of TTL and full channel O(n) worst case, where n is len of the channel -func (v *Vec) Push(w worker.BaseProcess) { - // Non-blocking channel send - select { - case v.workers <- w: - // default select branch is only possible when dealing with TTL - // because in that case, workers in the v.workers channel can be TTL-ed and killed - // but presenting in the channel - default: - // Stop Pop operations - v.Lock() - defer v.Unlock() - - /* - we can be in the default branch by the following reasons: - 1. TTL is set with no requests during the TTL - 2. Violated Get <-> Release operation (how ??) - */ - for i := 0; i < len(v.workers); i++ { - /* - We need to drain vector until we found a worker in the Invalid/Killing/Killed/etc states. - */ - wrk := <-v.workers - switch wrk.State().Value() { - // skip good states, put worker back - case worker.StateWorking, worker.StateReady: - // put the worker back - // generally, while send and receive operations are concurrent (from the channel), channel behave - // like a FIFO, but when re-sending from the same goroutine it behaves like a FILO - v.workers <- wrk - continue - /* - Bad states are here. - */ - default: - // kill the current worker (just to be sure it's dead) - if wrk != nil { - _ = wrk.Kill() - } - // replace with the new one and return from the loop - // new worker can be ttl-ed at this moment, it's possible to replace TTL-ed worker with new TTL-ed worker - // But this case will be handled in the worker_watcher::Get - v.workers <- w - return - } - } - } -} - -func (v *Vec) Remove(_ int64) {} - -func (v *Vec) Pop(ctx context.Context) (worker.BaseProcess, error) { - /* - if *addr == old { - *addr = new - return true - } - */ - - if atomic.LoadUint64(&v.destroy) == 1 { - return nil, errors.E(errors.WatcherStopped) - } - - // used only for the TTL-ed workers - v.RLock() - defer v.RUnlock() - - select { - case w := <-v.workers: - return w, nil - case <-ctx.Done(): - return nil, errors.E(ctx.Err(), errors.NoFreeWorkers) - } -} - -func (v *Vec) Destroy() { - atomic.StoreUint64(&v.destroy, 1) -} diff --git a/pkg/worker_watcher/container/queue/queue.go b/pkg/worker_watcher/container/queue/queue.go deleted file mode 100644 index edf81d60..00000000 --- a/pkg/worker_watcher/container/queue/queue.go +++ /dev/null @@ -1,102 +0,0 @@ -package queue - -import ( - "context" - "sync" - "sync/atomic" - - "github.com/spiral/roadrunner/v2/pkg/worker" -) - -const ( - initialSize = 1 - maxInitialSize = 8 - maxInternalSliceSize = 10 -) - -type Node struct { - w []worker.BaseProcess - // LL - n *Node -} - -type Queue struct { - mu sync.Mutex - - head *Node - tail *Node - - curr uint64 - len uint64 - - sliceSize uint64 -} - -func NewQueue() *Queue { - q := &Queue{ - mu: sync.Mutex{}, - head: nil, - tail: nil, - curr: 0, - len: 0, - sliceSize: 0, - } - - return q -} - -func (q *Queue) Push(w worker.BaseProcess) { - q.mu.Lock() - - if q.head == nil { - h := newNode(initialSize) - q.head = h - q.tail = h - q.sliceSize = maxInitialSize - } else if uint64(len(q.tail.w)) >= atomic.LoadUint64(&q.sliceSize) { - n := newNode(maxInternalSliceSize) - q.tail.n = n - q.tail = n - q.sliceSize = maxInternalSliceSize - } - - q.tail.w = append(q.tail.w, w) - - atomic.AddUint64(&q.len, 1) - - q.mu.Unlock() -} - -func (q *Queue) Pop(ctx context.Context) (worker.BaseProcess, error) { - q.mu.Lock() - - if q.head == nil { - return nil, nil - } - - w := q.head.w[q.curr] - q.head.w[q.curr] = nil - atomic.AddUint64(&q.len, ^uint64(0)) - atomic.AddUint64(&q.curr, 1) - - if atomic.LoadUint64(&q.curr) >= uint64(len(q.head.w)) { - n := q.head.n - q.head.n = nil - q.head = n - q.curr = 0 - } - - q.mu.Unlock() - - return w, nil -} - -func (q *Queue) Replace(oldPid int64, newWorker worker.BaseProcess) { - -} - -func (q *Queue) Destroy() {} - -func newNode(capacity int) *Node { - return &Node{w: make([]worker.BaseProcess, 0, capacity)} -} diff --git a/pkg/worker_watcher/worker_watcher.go b/pkg/worker_watcher/worker_watcher.go deleted file mode 100755 index 83f8e627..00000000 --- a/pkg/worker_watcher/worker_watcher.go +++ /dev/null @@ -1,318 +0,0 @@ -package worker_watcher //nolint:stylecheck - -import ( - "context" - "sync" - "sync/atomic" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/pkg/worker" - "github.com/spiral/roadrunner/v2/pkg/worker_watcher/container/channel" - "github.com/spiral/roadrunner/v2/utils" -) - -// Vector interface represents vector container -type Vector interface { - // Push used to put worker to the vector - Push(worker.BaseProcess) - // Pop used to get worker from the vector - Pop(ctx context.Context) (worker.BaseProcess, error) - // Remove worker with provided pid - Remove(pid int64) - // Destroy used to stop releasing the workers - Destroy() - - // TODO Add Replace method, and remove `Remove` method. Replace will do removal and allocation - // Replace(prevPid int64, newWorker worker.BaseProcess) -} - -type workerWatcher struct { - sync.RWMutex - container Vector - // used to control Destroy stage (that all workers are in the container) - numWorkers *uint64 - - workers []worker.BaseProcess - - allocator worker.Allocator - allocateTimeout time.Duration - events events.Handler -} - -// NewSyncWorkerWatcher is a constructor for the Watcher -func NewSyncWorkerWatcher(allocator worker.Allocator, numWorkers uint64, events events.Handler, allocateTimeout time.Duration) *workerWatcher { - ww := &workerWatcher{ - container: channel.NewVector(numWorkers), - - // pass a ptr to the number of workers to avoid blocking in the TTL loop - numWorkers: utils.Uint64(numWorkers), - allocateTimeout: allocateTimeout, - workers: make([]worker.BaseProcess, 0, numWorkers), - - allocator: allocator, - events: events, - } - - return ww -} - -func (ww *workerWatcher) Watch(workers []worker.BaseProcess) error { - for i := 0; i < len(workers); i++ { - ww.container.Push(workers[i]) - // add worker to watch slice - ww.workers = append(ww.workers, workers[i]) - - go func(swc worker.BaseProcess) { - ww.wait(swc) - }(workers[i]) - } - return nil -} - -// Take is not a thread safe operation -func (ww *workerWatcher) Take(ctx context.Context) (worker.BaseProcess, error) { - const op = errors.Op("worker_watcher_get_free_worker") - - // thread safe operation - w, err := ww.container.Pop(ctx) - if err != nil { - if errors.Is(errors.WatcherStopped, err) { - return nil, errors.E(op, errors.WatcherStopped) - } - - return nil, errors.E(op, err) - } - - // fast path, worker not nil and in the ReadyState - if w.State().Value() == worker.StateReady { - return w, nil - } - - // ========================================================= - // SLOW PATH - _ = w.Kill() - // no free workers in the container or worker not in the ReadyState (TTL-ed) - // try to continuously get free one - for { - w, err = ww.container.Pop(ctx) - if err != nil { - if errors.Is(errors.WatcherStopped, err) { - return nil, errors.E(op, errors.WatcherStopped) - } - return nil, errors.E(op, err) - } - - if err != nil { - return nil, errors.E(op, err) - } - - switch w.State().Value() { - // return only workers in the Ready state - // check first - case worker.StateReady: - return w, nil - case worker.StateWorking: // how?? - ww.container.Push(w) // put it back, let worker finish the work - continue - case - // all the possible wrong states - worker.StateInactive, - worker.StateDestroyed, - worker.StateErrored, - worker.StateStopped, - worker.StateInvalid, - worker.StateKilling, - worker.StateStopping: - // worker doing no work because it in the container - // so we can safely kill it (inconsistent state) - _ = w.Kill() - // try to get new worker - continue - } - } -} - -func (ww *workerWatcher) Allocate() error { - const op = errors.Op("worker_watcher_allocate_new") - - sw, err := ww.allocator() - if err != nil { - // log incident - ww.events.Push( - events.WorkerEvent{ - Event: events.EventWorkerError, - Payload: errors.E(op, errors.Errorf("can't allocate worker: %v", err)), - }) - - // if no timeout, return error immediately - if ww.allocateTimeout == 0 { - return errors.E(op, errors.WorkerAllocate, err) - } - - // every half of a second - allocateFreq := time.NewTicker(time.Millisecond * 500) - - tt := time.After(ww.allocateTimeout) - for { - select { - case <-tt: - // reduce number of workers - atomic.AddUint64(ww.numWorkers, ^uint64(0)) - allocateFreq.Stop() - // timeout exceed, worker can't be allocated - return errors.E(op, errors.WorkerAllocate, err) - - case <-allocateFreq.C: - sw, err = ww.allocator() - if err != nil { - // log incident - ww.events.Push( - events.WorkerEvent{ - Event: events.EventWorkerError, - Payload: errors.E(op, errors.Errorf("can't allocate worker, retry attempt failed: %v", err)), - }) - continue - } - - // reallocated - allocateFreq.Stop() - goto done - } - } - } - -done: - // add worker to Wait - ww.addToWatch(sw) - - ww.Lock() - // add new worker to the workers slice (to get information about workers in parallel) - ww.workers = append(ww.workers, sw) - ww.Unlock() - - // push the worker to the container - ww.Release(sw) - return nil -} - -// Remove worker -func (ww *workerWatcher) Remove(wb worker.BaseProcess) { - ww.Lock() - defer ww.Unlock() - - // set remove state - pid := wb.Pid() - - // worker will be removed on the Get operation - for i := 0; i < len(ww.workers); i++ { - if ww.workers[i].Pid() == pid { - ww.workers = append(ww.workers[:i], ww.workers[i+1:]...) - // kill worker, just to be sure it's dead - _ = wb.Kill() - return - } - } -} - -// Release O(1) operation -func (ww *workerWatcher) Release(w worker.BaseProcess) { - switch w.State().Value() { - case worker.StateReady: - ww.container.Push(w) - default: - _ = w.Kill() - } -} - -// Destroy all underlying container (but let them complete the task) -func (ww *workerWatcher) Destroy(_ context.Context) { - // destroy container, we don't use ww mutex here, since we should be able to push worker - ww.Lock() - // do not release new workers - ww.container.Destroy() - ww.Unlock() - - tt := time.NewTicker(time.Millisecond * 100) - defer tt.Stop() - for { //nolint:gosimple - select { - case <-tt.C: - ww.Lock() - // that might be one of the workers is working - if atomic.LoadUint64(ww.numWorkers) != uint64(len(ww.workers)) { - ww.Unlock() - continue - } - // All container at this moment are in the container - // Pop operation is blocked, push can't be done, since it's not possible to pop - for i := 0; i < len(ww.workers); i++ { - ww.workers[i].State().Set(worker.StateDestroyed) - // kill the worker - _ = ww.workers[i].Kill() - } - return - } - } -} - -// List - this is O(n) operation, and it will return copy of the actual workers -func (ww *workerWatcher) List() []worker.BaseProcess { - ww.RLock() - defer ww.RUnlock() - - if len(ww.workers) == 0 { - return nil - } - - base := make([]worker.BaseProcess, 0, len(ww.workers)) - for i := 0; i < len(ww.workers); i++ { - base = append(base, ww.workers[i]) - } - - return base -} - -func (ww *workerWatcher) wait(w worker.BaseProcess) { - const op = errors.Op("worker_watcher_wait") - err := w.Wait() - if err != nil { - ww.events.Push(events.WorkerEvent{ - Event: events.EventWorkerError, - Worker: w, - Payload: errors.E(op, err), - }) - } - - // remove worker - ww.Remove(w) - - if w.State().Value() == worker.StateDestroyed { - // worker was manually destroyed, no need to replace - ww.events.Push(events.PoolEvent{Event: events.EventWorkerDestruct, Payload: w}) - return - } - - // set state as stopped - w.State().Set(worker.StateStopped) - - err = ww.Allocate() - if err != nil { - ww.events.Push(events.PoolEvent{ - Event: events.EventPoolError, - Payload: errors.E(op, err), - }) - - // no workers at all, panic - if len(ww.workers) == 0 && atomic.LoadUint64(ww.numWorkers) == 0 { - panic(errors.E(op, errors.WorkerAllocate, errors.Errorf("can't allocate workers: %v", err))) - } - } -} - -func (ww *workerWatcher) addToWatch(wb worker.BaseProcess) { - go func() { - ww.wait(wb) - }() -} diff --git a/plugins/amqp/amqpjobs/config.go b/plugins/amqp/amqpjobs/config.go deleted file mode 100644 index ac2f6e53..00000000 --- a/plugins/amqp/amqpjobs/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package amqpjobs - -// pipeline rabbitmq info -const ( - exchangeKey string = "exchange" - exchangeType string = "exchange_type" - queue string = "queue" - routingKey string = "routing_key" - prefetch string = "prefetch" - exclusive string = "exclusive" - priority string = "priority" - multipleAsk string = "multiple_ask" - requeueOnFail string = "requeue_on_fail" - - dlx string = "x-dead-letter-exchange" - dlxRoutingKey string = "x-dead-letter-routing-key" - dlxTTL string = "x-message-ttl" - dlxExpires string = "x-expires" - - contentType string = "application/octet-stream" -) - -type GlobalCfg struct { - Addr string `mapstructure:"addr"` -} - -// Config is used to parse pipeline configuration -type Config struct { - Prefetch int `mapstructure:"prefetch"` - Queue string `mapstructure:"queue"` - Priority int64 `mapstructure:"priority"` - Exchange string `mapstructure:"exchange"` - ExchangeType string `mapstructure:"exchange_type"` - RoutingKey string `mapstructure:"routing_key"` - Exclusive bool `mapstructure:"exclusive"` - MultipleAck bool `mapstructure:"multiple_ask"` - RequeueOnFail bool `mapstructure:"requeue_on_fail"` -} - -func (c *Config) InitDefault() { - // all options should be in sync with the pipeline defaults in the FromPipeline method - if c.ExchangeType == "" { - c.ExchangeType = "direct" - } - - if c.Exchange == "" { - c.Exchange = "amqp.default" - } - - if c.Queue == "" { - c.Queue = "default" - } - - if c.Prefetch == 0 { - c.Prefetch = 10 - } - - if c.Priority == 0 { - c.Priority = 10 - } -} - -func (c *GlobalCfg) InitDefault() { - if c.Addr == "" { - c.Addr = "amqp://guest:guest@127.0.0.1:5672/" - } -} diff --git a/plugins/amqp/amqpjobs/consumer.go b/plugins/amqp/amqpjobs/consumer.go deleted file mode 100644 index 2ff0a40a..00000000 --- a/plugins/amqp/amqpjobs/consumer.go +++ /dev/null @@ -1,524 +0,0 @@ -package amqpjobs - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/google/uuid" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" -) - -const ( - pluginName string = "amqp" -) - -type consumer struct { - sync.Mutex - log logger.Logger - pq priorityqueue.Queue - eh events.Handler - - pipeline atomic.Value - - // amqp connection - conn *amqp.Connection - consumeChan *amqp.Channel - publishChan chan *amqp.Channel - consumeID string - connStr string - - retryTimeout time.Duration - // - // prefetch QoS AMQP - // - prefetch int - // - // pipeline's priority - // - priority int64 - exchangeName string - queue string - exclusive bool - exchangeType string - routingKey string - multipleAck bool - requeueOnFail bool - - listeners uint32 - delayed *int64 - stopCh chan struct{} -} - -// NewAMQPConsumer initializes rabbitmq pipeline -func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_amqp_consumer") - // we need to obtain two parts of the amqp information here. - // firs part - address to connect, it is located in the global section under the amqp pluginName - // second part - queues and other pipeline information - // if no such key - error - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) - } - - // PARSE CONFIGURATION START ------- - var pipeCfg Config - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(configKey, &pipeCfg) - if err != nil { - return nil, errors.E(op, err) - } - - pipeCfg.InitDefault() - - err = cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - // PARSE CONFIGURATION END ------- - - jb := &consumer{ - log: log, - pq: pq, - eh: e, - consumeID: uuid.NewString(), - stopCh: make(chan struct{}), - // TODO to config - retryTimeout: time.Minute * 5, - priority: pipeCfg.Priority, - delayed: utils.Int64(0), - - publishChan: make(chan *amqp.Channel, 1), - routingKey: pipeCfg.RoutingKey, - queue: pipeCfg.Queue, - exchangeType: pipeCfg.ExchangeType, - exchangeName: pipeCfg.Exchange, - prefetch: pipeCfg.Prefetch, - exclusive: pipeCfg.Exclusive, - multipleAck: pipeCfg.MultipleAck, - requeueOnFail: pipeCfg.RequeueOnFail, - } - - jb.conn, err = amqp.Dial(globalCfg.Addr) - if err != nil { - return nil, errors.E(op, err) - } - - // save address - jb.connStr = globalCfg.Addr - - err = jb.initRabbitMQ() - if err != nil { - return nil, errors.E(op, err) - } - - pch, err := jb.conn.Channel() - if err != nil { - return nil, errors.E(op, err) - } - - jb.publishChan <- pch - - // run redialer and requeue listener for the connection - jb.redialer() - - return jb, nil -} - -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_amqp_consumer_from_pipeline") - // we need to obtain two parts of the amqp information here. - // firs part - address to connect, it is located in the global section under the amqp pluginName - // second part - queues and other pipeline information - - // only global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs")) - } - - // PARSE CONFIGURATION ------- - var globalCfg GlobalCfg - - err := cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - jb := &consumer{ - log: log, - eh: e, - pq: pq, - consumeID: uuid.NewString(), - stopCh: make(chan struct{}), - retryTimeout: time.Minute * 5, - delayed: utils.Int64(0), - - publishChan: make(chan *amqp.Channel, 1), - routingKey: pipeline.String(routingKey, ""), - queue: pipeline.String(queue, "default"), - exchangeType: pipeline.String(exchangeType, "direct"), - exchangeName: pipeline.String(exchangeKey, "amqp.default"), - prefetch: pipeline.Int(prefetch, 10), - priority: int64(pipeline.Int(priority, 10)), - exclusive: pipeline.Bool(exclusive, false), - multipleAck: pipeline.Bool(multipleAsk, false), - requeueOnFail: pipeline.Bool(requeueOnFail, false), - } - - jb.conn, err = amqp.Dial(globalCfg.Addr) - if err != nil { - return nil, errors.E(op, err) - } - - // save address - jb.connStr = globalCfg.Addr - - err = jb.initRabbitMQ() - if err != nil { - return nil, errors.E(op, err) - } - - pch, err := jb.conn.Channel() - if err != nil { - return nil, errors.E(op, err) - } - - jb.publishChan <- pch - - // register the pipeline - // error here is always nil - _ = jb.Register(context.Background(), pipeline) - - // run redialer for the connection - jb.redialer() - - return jb, nil -} - -func (c *consumer) Push(ctx context.Context, job *job.Job) error { - const op = errors.Op("rabbitmq_push") - // check if the pipeline registered - - // load atomic value - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != job.Options.Pipeline { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name())) - } - - err := c.handleItem(ctx, fromJob(job)) - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (c *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - c.pipeline.Store(p) - return nil -} - -func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - start := time.Now() - const op = errors.Op("rabbit_run") - - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) - } - - // protect connection (redial) - c.Lock() - defer c.Unlock() - - var err error - c.consumeChan, err = c.conn.Channel() - if err != nil { - return errors.E(op, err) - } - - err = c.consumeChan.Qos(c.prefetch, 0, false) - if err != nil { - return errors.E(op, err) - } - - // start reading messages from the channel - deliv, err := c.consumeChan.Consume( - c.queue, - c.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // run listener - c.listener(deliv) - - atomic.StoreUint32(&c.listeners, 1) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) - - return nil -} - -func (c *consumer) State(ctx context.Context) (*jobState.State, error) { - const op = errors.Op("amqp_driver_state") - select { - case pch := <-c.publishChan: - defer func() { - c.publishChan <- pch - }() - - q, err := pch.QueueInspect(c.queue) - if err != nil { - return nil, errors.E(op, err) - } - - pipe := c.pipeline.Load().(*pipeline.Pipeline) - - return &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: q.Name, - Active: int64(q.Messages), - Delayed: atomic.LoadInt64(c.delayed), - Ready: ready(atomic.LoadUint32(&c.listeners)), - }, nil - - case <-ctx.Done(): - return nil, errors.E(op, errors.TimeOut, ctx.Err()) - } -} - -func (c *consumer) Pause(_ context.Context, p string) { - start := time.Now() - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - c.log.Error("no such pipeline", "requested pause on: ", p) - } - - l := atomic.LoadUint32(&c.listeners) - // no active listeners - if l == 0 { - c.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&c.listeners, ^uint32(0)) - - // protect connection (redial) - c.Lock() - defer c.Unlock() - - err := c.consumeChan.Cancel(c.consumeID, true) - if err != nil { - c.log.Error("cancel publish channel, forcing close", "error", err) - errCl := c.consumeChan.Close() - if errCl != nil { - c.log.Error("force close failed", "error", err) - return - } - return - } - - c.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) -} - -func (c *consumer) Resume(_ context.Context, p string) { - start := time.Now() - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - c.log.Error("no such pipeline", "requested resume on: ", p) - } - - // protect connection (redial) - c.Lock() - defer c.Unlock() - - l := atomic.LoadUint32(&c.listeners) - // no active listeners - if l == 1 { - c.log.Warn("amqp listener already in the active state") - return - } - - var err error - c.consumeChan, err = c.conn.Channel() - if err != nil { - c.log.Error("create channel on rabbitmq connection", "error", err) - return - } - - err = c.consumeChan.Qos(c.prefetch, 0, false) - if err != nil { - c.log.Error("qos set failed", "error", err) - return - } - - // start reading messages from the channel - deliv, err := c.consumeChan.Consume( - c.queue, - c.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - c.log.Error("consume operation failed", "error", err) - return - } - - // run listener - c.listener(deliv) - - // increase number of listeners - atomic.AddUint32(&c.listeners, 1) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) -} - -func (c *consumer) Stop(context.Context) error { - start := time.Now() - c.stopCh <- struct{}{} - - pipe := c.pipeline.Load().(*pipeline.Pipeline) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) - - return nil -} - -// handleItem -func (c *consumer) handleItem(ctx context.Context, msg *Item) error { - const op = errors.Op("rabbitmq_handle_item") - select { - case pch := <-c.publishChan: - // return the channel back - defer func() { - c.publishChan <- pch - }() - - // convert - table, err := pack(msg.ID(), msg) - if err != nil { - return errors.E(op, err) - } - - const op = errors.Op("rabbitmq_handle_item") - // handle timeouts - if msg.Options.DelayDuration() > 0 { - atomic.AddInt64(c.delayed, 1) - // TODO declare separate method for this if condition - // TODO dlx cache channel?? - delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000) - tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, c.exchangeName, c.queue) - _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{ - dlx: c.exchangeName, - dlxRoutingKey: c.routingKey, - dlxTTL: delayMs, - dlxExpires: delayMs * 2, - }) - if err != nil { - atomic.AddInt64(c.delayed, ^int64(0)) - return errors.E(op, err) - } - - err = pch.QueueBind(tmpQ, tmpQ, c.exchangeName, false, nil) - if err != nil { - atomic.AddInt64(c.delayed, ^int64(0)) - return errors.E(op, err) - } - - // insert to the local, limited pipeline - err = pch.Publish(c.exchangeName, tmpQ, false, false, amqp.Publishing{ - Headers: table, - ContentType: contentType, - Timestamp: time.Now(), - DeliveryMode: amqp.Persistent, - Body: msg.Body(), - }) - - if err != nil { - atomic.AddInt64(c.delayed, ^int64(0)) - return errors.E(op, err) - } - - return nil - } - - // insert to the local, limited pipeline - err = pch.Publish(c.exchangeName, c.routingKey, false, false, amqp.Publishing{ - Headers: table, - ContentType: contentType, - Timestamp: time.Now(), - DeliveryMode: amqp.Persistent, - Body: msg.Body(), - }) - - if err != nil { - return errors.E(op, err) - } - - return nil - case <-ctx.Done(): - return errors.E(op, errors.TimeOut, ctx.Err()) - } -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/amqp/amqpjobs/item.go b/plugins/amqp/amqpjobs/item.go deleted file mode 100644 index b837ff86..00000000 --- a/plugins/amqp/amqpjobs/item.go +++ /dev/null @@ -1,250 +0,0 @@ -package amqpjobs - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - json "github.com/json-iterator/go" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // private - // ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery - ack func(multiply bool) error - - // nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server. - // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel. - // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue. - // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time - nack func(multiply bool, requeue bool) error - - // requeueFn used as a pointer to the push function - requeueFn func(context.Context, *Item) error - // delayed jobs TODO(rustatian): figure out how to get stats from the DLX - delayed *int64 - multipleAsk bool - requeue bool -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -// Not used in the amqp, amqp.Table used instead -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - return i.Options.ack(i.Options.multipleAsk) -} - -func (i *Item) Nack() error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - return i.Options.nack(false, i.Options.requeue) -} - -// Requeue with the provided delay, handled by the Nack -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - if i.Options.Delay > 0 { - atomic.AddInt64(i.Options.delayed, ^int64(0)) - } - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - errNack := i.Options.nack(false, true) - if errNack != nil { - return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack) - } - - return err - } - - // ack the job - err = i.Options.ack(false) - if err != nil { - return err - } - - return nil -} - -// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ -func (c *consumer) fromDelivery(d amqp.Delivery) (*Item, error) { - const op = errors.Op("from_delivery_convert") - item, err := c.unpack(d) - if err != nil { - return nil, errors.E(op, err) - } - - i := &Item{ - Job: item.Job, - Ident: item.Ident, - Payload: item.Payload, - Headers: item.Headers, - Options: item.Options, - } - - item.Options.ack = d.Ack - item.Options.nack = d.Nack - item.Options.delayed = c.delayed - - // requeue func - item.Options.requeueFn = c.handleItem - return i, nil -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} - -// pack job metadata into headers -func pack(id string, j *Item) (amqp.Table, error) { - headers, err := json.Marshal(j.Headers) - if err != nil { - return nil, err - } - return amqp.Table{ - job.RRID: id, - job.RRJob: j.Job, - job.RRPipeline: j.Options.Pipeline, - job.RRHeaders: headers, - job.RRDelay: j.Options.Delay, - job.RRPriority: j.Options.Priority, - }, nil -} - -// unpack restores jobs.Options -func (c *consumer) unpack(d amqp.Delivery) (*Item, error) { - item := &Item{Payload: utils.AsString(d.Body), Options: &Options{ - multipleAsk: c.multipleAck, - requeue: c.requeueOnFail, - requeueFn: c.handleItem, - }} - - if _, ok := d.Headers[job.RRID].(string); !ok { - return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID)) - } - - item.Ident = d.Headers[job.RRID].(string) - - if _, ok := d.Headers[job.RRJob].(string); !ok { - return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob)) - } - - item.Job = d.Headers[job.RRJob].(string) - - if _, ok := d.Headers[job.RRPipeline].(string); ok { - item.Options.Pipeline = d.Headers[job.RRPipeline].(string) - } - - if h, ok := d.Headers[job.RRHeaders].([]byte); ok { - err := json.Unmarshal(h, &item.Headers) - if err != nil { - return nil, err - } - } - - if t, ok := d.Headers[job.RRDelay]; ok { - switch t.(type) { - case int, int16, int32, int64: - item.Options.Delay = t.(int64) - default: - c.log.Warn("unknown delay type", "want:", "int, int16, int32, int64", "actual", t) - } - } - - if t, ok := d.Headers[job.RRPriority]; !ok { - // set pipe's priority - item.Options.Priority = c.priority - } else { - switch t.(type) { - case int, int16, int32, int64: - item.Options.Priority = t.(int64) - default: - c.log.Warn("unknown priority type", "want:", "int, int16, int32, int64", "actual", t) - } - } - - return item, nil -} diff --git a/plugins/amqp/amqpjobs/listener.go b/plugins/amqp/amqpjobs/listener.go deleted file mode 100644 index 75c61cad..00000000 --- a/plugins/amqp/amqpjobs/listener.go +++ /dev/null @@ -1,25 +0,0 @@ -package amqpjobs - -import amqp "github.com/rabbitmq/amqp091-go" - -func (c *consumer) listener(deliv <-chan amqp.Delivery) { - go func() { - for { //nolint:gosimple - select { - case msg, ok := <-deliv: - if !ok { - c.log.Info("delivery channel closed, leaving the rabbit listener") - return - } - - d, err := c.fromDelivery(msg) - if err != nil { - c.log.Error("amqp delivery convert", "error", err) - continue - } - // insert job into the main priority queue - c.pq.Insert(d) - } - } - }() -} diff --git a/plugins/amqp/amqpjobs/rabbit_init.go b/plugins/amqp/amqpjobs/rabbit_init.go deleted file mode 100644 index fb5f6911..00000000 --- a/plugins/amqp/amqpjobs/rabbit_init.go +++ /dev/null @@ -1,57 +0,0 @@ -package amqpjobs - -import ( - "github.com/spiral/errors" -) - -func (c *consumer) initRabbitMQ() error { - const op = errors.Op("jobs_plugin_rmq_init") - // Channel opens a unique, concurrent server channel to process the bulk of AMQP - // messages. Any error from methods on this receiver will render the receiver - // invalid and a new Channel should be opened. - channel, err := c.conn.Channel() - if err != nil { - return errors.E(op, err) - } - - // declare an exchange (idempotent operation) - err = channel.ExchangeDeclare( - c.exchangeName, - c.exchangeType, - true, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // verify or declare a queue - q, err := channel.QueueDeclare( - c.queue, - false, - false, - c.exclusive, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // bind queue to the exchange - err = channel.QueueBind( - q.Name, - c.routingKey, - c.exchangeName, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - return channel.Close() -} diff --git a/plugins/amqp/amqpjobs/redial.go b/plugins/amqp/amqpjobs/redial.go deleted file mode 100644 index 698a34a6..00000000 --- a/plugins/amqp/amqpjobs/redial.go +++ /dev/null @@ -1,138 +0,0 @@ -package amqpjobs - -import ( - "time" - - "github.com/cenkalti/backoff/v4" - amqp "github.com/rabbitmq/amqp091-go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" -) - -// redialer used to redial to the rabbitmq in case of the connection interrupts -func (c *consumer) redialer() { //nolint:gocognit - go func() { - const op = errors.Op("rabbitmq_redial") - - for { - select { - case err := <-c.conn.NotifyClose(make(chan *amqp.Error)): - if err == nil { - return - } - - c.Lock() - - // trash the broken publishing channel - <-c.publishChan - - t := time.Now().UTC() - pipe := c.pipeline.Load().(*pipeline.Pipeline) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeError, - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Error: err, - Start: time.Now().UTC(), - }) - - expb := backoff.NewExponentialBackOff() - // set the retry timeout (minutes) - expb.MaxElapsedTime = c.retryTimeout - operation := func() error { - c.log.Warn("rabbitmq reconnecting, caused by", "error", err) - var dialErr error - c.conn, dialErr = amqp.Dial(c.connStr) - if dialErr != nil { - return errors.E(op, dialErr) - } - - c.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers") - - // re-init connection - errInit := c.initRabbitMQ() - if errInit != nil { - c.log.Error("rabbitmq dial", "error", errInit) - return errInit - } - - // redeclare consume channel - var errConnCh error - c.consumeChan, errConnCh = c.conn.Channel() - if errConnCh != nil { - return errors.E(op, errConnCh) - } - - // redeclare publish channel - pch, errPubCh := c.conn.Channel() - if errPubCh != nil { - return errors.E(op, errPubCh) - } - - // start reading messages from the channel - deliv, err := c.consumeChan.Consume( - c.queue, - c.consumeID, - false, - false, - false, - false, - nil, - ) - if err != nil { - return errors.E(op, err) - } - - // put the fresh publishing channel - c.publishChan <- pch - // restart listener - c.listener(deliv) - - c.log.Info("queues and subscribers redeclared successfully") - - return nil - } - - retryErr := backoff.Retry(operation, expb) - if retryErr != nil { - c.Unlock() - c.log.Error("backoff failed", "error", retryErr) - return - } - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Start: t, - Elapsed: time.Since(t), - }) - - c.Unlock() - - case <-c.stopCh: - pch := <-c.publishChan - err := pch.Close() - if err != nil { - c.log.Error("publish channel close", "error", err) - } - - if c.consumeChan != nil { - err = c.consumeChan.Close() - if err != nil { - c.log.Error("consume channel close", "error", err) - } - } - - err = c.conn.Close() - if err != nil { - c.log.Error("amqp connection close", "error", err) - } - - return - } - } - }() -} diff --git a/plugins/amqp/plugin.go b/plugins/amqp/plugin.go deleted file mode 100644 index c4f5f1da..00000000 --- a/plugins/amqp/plugin.go +++ /dev/null @@ -1,41 +0,0 @@ -package amqp - -import ( - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/amqp/amqpjobs" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - pluginName string = "amqp" -) - -type Plugin struct { - log logger.Logger - cfg config.Configurer -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -func (p *Plugin) Name() string { - return pluginName -} - -func (p *Plugin) Available() {} - -func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return amqpjobs.NewAMQPConsumer(configKey, p.log, p.cfg, e, pq) -} - -// FromPipeline constructs AMQP driver from pipeline -func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return amqpjobs.FromPipeline(pipe, p.log, p.cfg, e, pq) -} diff --git a/plugins/beanstalk/config.go b/plugins/beanstalk/config.go deleted file mode 100644 index a8069f5d..00000000 --- a/plugins/beanstalk/config.go +++ /dev/null @@ -1,53 +0,0 @@ -package beanstalk - -import ( - "time" - - "github.com/spiral/roadrunner/v2/utils" -) - -const ( - tubePriority string = "tube_priority" - tube string = "tube" - reserveTimeout string = "reserve_timeout" -) - -type GlobalCfg struct { - Addr string `mapstructure:"addr"` - Timeout time.Duration `mapstructure:"timeout"` -} - -func (c *GlobalCfg) InitDefault() { - if c.Addr == "" { - c.Addr = "tcp://127.0.0.1:11300" - } - - if c.Timeout == 0 { - c.Timeout = time.Second * 30 - } -} - -type Config struct { - PipePriority int64 `mapstructure:"priority"` - TubePriority *uint32 `mapstructure:"tube_priority"` - Tube string `mapstructure:"tube"` - ReserveTimeout time.Duration `mapstructure:"reserve_timeout"` -} - -func (c *Config) InitDefault() { - if c.Tube == "" { - c.Tube = "default" - } - - if c.ReserveTimeout == 0 { - c.ReserveTimeout = time.Second * 1 - } - - if c.TubePriority == nil { - c.TubePriority = utils.Uint32(0) - } - - if c.PipePriority == 0 { - c.PipePriority = 10 - } -} diff --git a/plugins/beanstalk/connection.go b/plugins/beanstalk/connection.go deleted file mode 100644 index d3241b37..00000000 --- a/plugins/beanstalk/connection.go +++ /dev/null @@ -1,223 +0,0 @@ -package beanstalk - -import ( - "context" - "net" - "sync" - "time" - - "github.com/beanstalkd/go-beanstalk" - "github.com/cenkalti/backoff/v4" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -type ConnPool struct { - sync.RWMutex - - log logger.Logger - - conn *beanstalk.Conn - connT *beanstalk.Conn - ts *beanstalk.TubeSet - t *beanstalk.Tube - - network string - address string - tName string - tout time.Duration -} - -func NewConnPool(network, address, tName string, tout time.Duration, log logger.Logger) (*ConnPool, error) { - connT, err := beanstalk.DialTimeout(network, address, tout) - if err != nil { - return nil, err - } - - connTS, err := beanstalk.DialTimeout(network, address, tout) - if err != nil { - return nil, err - } - - tube := beanstalk.NewTube(connT, tName) - ts := beanstalk.NewTubeSet(connTS, tName) - - return &ConnPool{ - log: log, - network: network, - address: address, - tName: tName, - tout: tout, - conn: connTS, - connT: connT, - ts: ts, - t: tube, - }, nil -} - -// Put the payload -// TODO use the context ?? -func (cp *ConnPool) Put(_ context.Context, body []byte, pri uint32, delay, ttr time.Duration) (uint64, error) { - cp.RLock() - defer cp.RUnlock() - - // TODO(rustatian): redial based on the token - id, err := cp.t.Put(body, pri, delay, ttr) - if err != nil { - // errN contains both, err and internal checkAndRedial error - errN := cp.checkAndRedial(err) - if errN != nil { - return 0, errors.Errorf("err: %s\nerr redial: %s", err, errN) - } else { - // retry put only when we redialed - return cp.t.Put(body, pri, delay, ttr) - } - } - - return id, nil -} - -// Reserve reserves and returns a job from one of the tubes in t. If no -// job is available before time timeout has passed, Reserve returns a -// ConnError recording ErrTimeout. -// -// Typically, a client will reserve a job, perform some work, then delete -// the job with Conn.Delete. -func (cp *ConnPool) Reserve(reserveTimeout time.Duration) (uint64, []byte, error) { - cp.RLock() - defer cp.RUnlock() - - id, body, err := cp.ts.Reserve(reserveTimeout) - if err != nil { - // errN contains both, err and internal checkAndRedial error - errN := cp.checkAndRedial(err) - if errN != nil { - return 0, nil, errors.Errorf("err: %s\nerr redial: %s", err, errN) - } else { - // retry Reserve only when we redialed - return cp.ts.Reserve(reserveTimeout) - } - } - - return id, body, nil -} - -func (cp *ConnPool) Delete(_ context.Context, id uint64) error { - cp.RLock() - defer cp.RUnlock() - - err := cp.conn.Delete(id) - if err != nil { - // errN contains both, err and internal checkAndRedial error - errN := cp.checkAndRedial(err) - if errN != nil { - return errors.Errorf("err: %s\nerr redial: %s", err, errN) - } else { - // retry Delete only when we redialed - return cp.conn.Delete(id) - } - } - return nil -} - -func (cp *ConnPool) Stats(_ context.Context) (map[string]string, error) { - cp.RLock() - defer cp.RUnlock() - - stat, err := cp.conn.Stats() - if err != nil { - errR := cp.checkAndRedial(err) - if errR != nil { - return nil, errors.Errorf("err: %s\nerr redial: %s", err, errR) - } else { - return cp.conn.Stats() - } - } - - return stat, nil -} - -func (cp *ConnPool) redial() error { - const op = errors.Op("connection_pool_redial") - - cp.Lock() - // backoff here - expb := backoff.NewExponentialBackOff() - // TODO(rustatian) set via config - expb.MaxElapsedTime = time.Minute - - operation := func() error { - connT, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) - if err != nil { - return err - } - if connT == nil { - return errors.E(op, errors.Str("connectionT is nil")) - } - - connTS, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout) - if err != nil { - return err - } - - if connTS == nil { - return errors.E(op, errors.Str("connectionTS is nil")) - } - - cp.t = beanstalk.NewTube(connT, cp.tName) - cp.ts = beanstalk.NewTubeSet(connTS, cp.tName) - cp.conn = connTS - cp.connT = connT - - cp.log.Info("beanstalk redial was successful") - return nil - } - - retryErr := backoff.Retry(operation, expb) - if retryErr != nil { - cp.Unlock() - return retryErr - } - cp.Unlock() - - return nil -} - -var connErrors = map[string]struct{}{"EOF": {}} - -func (cp *ConnPool) checkAndRedial(err error) error { - const op = errors.Op("connection_pool_check_redial") - switch et := err.(type) { //nolint:gocritic - // check if the error - case beanstalk.ConnError: - switch bErr := et.Err.(type) { - case *net.OpError: - cp.RUnlock() - errR := cp.redial() - cp.RLock() - // if redial failed - return - if errR != nil { - return errors.E(op, errors.Errorf("%v:%v", bErr, errR)) - } - - // if redial was successful -> continue listening - return nil - default: - if _, ok := connErrors[et.Err.Error()]; ok { - // if error is related to the broken connection - redial - cp.RUnlock() - errR := cp.redial() - cp.RLock() - // if redial failed - return - if errR != nil { - return errors.E(op, errors.Errorf("%v:%v", err, errR)) - } - // if redial was successful -> continue listening - return nil - } - } - } - - // return initial error - return err -} diff --git a/plugins/beanstalk/consumer.go b/plugins/beanstalk/consumer.go deleted file mode 100644 index 30807f03..00000000 --- a/plugins/beanstalk/consumer.go +++ /dev/null @@ -1,374 +0,0 @@ -package beanstalk - -import ( - "bytes" - "context" - "encoding/gob" - "strconv" - "strings" - "sync/atomic" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" -) - -type consumer struct { - log logger.Logger - eh events.Handler - pq priorityqueue.Queue - - pipeline atomic.Value - listeners uint32 - - // beanstalk - pool *ConnPool - addr string - network string - reserveTimeout time.Duration - reconnectCh chan struct{} - tout time.Duration - // tube name - tName string - tubePriority *uint32 - priority int64 - - stopCh chan struct{} - requeueCh chan *Item -} - -func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_beanstalk_consumer") - - // PARSE CONFIGURATION ------- - var pipeCfg Config - var globalCfg GlobalCfg - - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) - } - - err := cfg.UnmarshalKey(configKey, &pipeCfg) - if err != nil { - return nil, errors.E(op, err) - } - - pipeCfg.InitDefault() - - err = cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - dsn := strings.Split(globalCfg.Addr, "://") - if len(dsn) != 2 { - return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) - } - - cPool, err := NewConnPool(dsn[0], dsn[1], pipeCfg.Tube, globalCfg.Timeout, log) - if err != nil { - return nil, errors.E(op, err) - } - - // initialize job consumer - jc := &consumer{ - pq: pq, - log: log, - eh: e, - pool: cPool, - network: dsn[0], - addr: dsn[1], - tout: globalCfg.Timeout, - tName: pipeCfg.Tube, - reserveTimeout: pipeCfg.ReserveTimeout, - tubePriority: pipeCfg.TubePriority, - priority: pipeCfg.PipePriority, - - // buffered with two because jobs root plugin can call Stop at the same time as Pause - stopCh: make(chan struct{}, 2), - requeueCh: make(chan *Item, 1000), - reconnectCh: make(chan struct{}, 2), - } - - return jc, nil -} - -func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("new_beanstalk_consumer") - - // PARSE CONFIGURATION ------- - var globalCfg GlobalCfg - - // if no global section - if !cfg.Has(pluginName) { - return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout")) - } - - err := cfg.UnmarshalKey(pluginName, &globalCfg) - if err != nil { - return nil, errors.E(op, err) - } - - globalCfg.InitDefault() - - // PARSE CONFIGURATION ------- - - dsn := strings.Split(globalCfg.Addr, "://") - if len(dsn) != 2 { - return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr)) - } - - cPool, err := NewConnPool(dsn[0], dsn[1], pipe.String(tube, "default"), globalCfg.Timeout, log) - if err != nil { - return nil, errors.E(op, err) - } - - // initialize job consumer - jc := &consumer{ - pq: pq, - log: log, - eh: e, - pool: cPool, - network: dsn[0], - addr: dsn[1], - tout: globalCfg.Timeout, - tName: pipe.String(tube, "default"), - reserveTimeout: time.Second * time.Duration(pipe.Int(reserveTimeout, 5)), - tubePriority: utils.Uint32(uint32(pipe.Int(tubePriority, 1))), - priority: pipe.Priority(), - - // buffered with two because jobs root plugin can call Stop at the same time as Pause - stopCh: make(chan struct{}, 2), - requeueCh: make(chan *Item, 1000), - reconnectCh: make(chan struct{}, 2), - } - - return jc, nil -} -func (j *consumer) Push(ctx context.Context, jb *job.Job) error { - const op = errors.Op("beanstalk_push") - // check if the pipeline registered - - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != jb.Options.Pipeline { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name())) - } - - err := j.handleItem(ctx, fromJob(jb)) - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (j *consumer) handleItem(ctx context.Context, item *Item) error { - const op = errors.Op("beanstalk_handle_item") - - bb := new(bytes.Buffer) - bb.Grow(64) - err := gob.NewEncoder(bb).Encode(item) - if err != nil { - return errors.E(op, err) - } - - body := make([]byte, bb.Len()) - copy(body, bb.Bytes()) - bb.Reset() - bb = nil - - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L458 - // is an integer < 2**32. Jobs with smaller priority values will be - // scheduled before jobs with larger priorities. The most urgent priority is 0; - // the least urgent priority is 4,294,967,295. - // - // is an integer number of seconds to wait before putting the job in - // the ready queue. The job will be in the "delayed" state during this time. - // Maximum delay is 2**32-1. - // - // -- time to run -- is an integer number of seconds to allow a worker - // to run this job. This time is counted from the moment a worker reserves - // this job. If the worker does not delete, release, or bury the job within - // seconds, the job will time out and the server will release the job. - // The minimum ttr is 1. If the client sends 0, the server will silently - // increase the ttr to 1. Maximum ttr is 2**32-1. - id, err := j.pool.Put(ctx, body, *j.tubePriority, item.Options.DelayDuration(), j.tout) - if err != nil { - errD := j.pool.Delete(ctx, id) - if errD != nil { - return errors.E(op, errors.Errorf("%s:%s", err.Error(), errD.Error())) - } - return errors.E(op, err) - } - - return nil -} - -func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error { - // register the pipeline - j.pipeline.Store(p) - return nil -} - -// State https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L514 -func (j *consumer) State(ctx context.Context) (*jobState.State, error) { - const op = errors.Op("beanstalk_state") - stat, err := j.pool.Stats(ctx) - if err != nil { - return nil, errors.E(op, err) - } - - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - out := &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: j.tName, - Ready: ready(atomic.LoadUint32(&j.listeners)), - } - - // set stat, skip errors (replace with 0) - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L523 - if v, err := strconv.Atoi(stat["current-jobs-ready"]); err == nil { - out.Active = int64(v) - } - - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L525 - if v, err := strconv.Atoi(stat["current-jobs-reserved"]); err == nil { - // this is not an error, reserved in beanstalk behaves like an active jobs - out.Reserved = int64(v) - } - - // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L528 - if v, err := strconv.Atoi(stat["current-jobs-delayed"]); err == nil { - out.Delayed = int64(v) - } - - return out, nil -} - -func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("beanstalk_run") - start := time.Now() - - // load atomic value - // check if the pipeline registered - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", p.Name(), pipe.Name())) - } - - atomic.AddUint32(&j.listeners, 1) - - go j.listen() - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) - - return nil -} - -func (j *consumer) Stop(context.Context) error { - start := time.Now() - pipe := j.pipeline.Load().(*pipeline.Pipeline) - - if atomic.LoadUint32(&j.listeners) == 1 { - j.stopCh <- struct{}{} - } - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) - - return nil -} - -func (j *consumer) Pause(_ context.Context, p string) { - start := time.Now() - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) - return - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 0 { - j.log.Warn("no active listeners, nothing to pause") - return - } - - atomic.AddUint32(&j.listeners, ^uint32(0)) - - j.stopCh <- struct{}{} - - j.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) -} - -func (j *consumer) Resume(_ context.Context, p string) { - start := time.Now() - // load atomic value - pipe := j.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name()) - return - } - - l := atomic.LoadUint32(&j.listeners) - // no active listeners - if l == 1 { - j.log.Warn("sqs listener already in the active state") - return - } - - // start listener - go j.listen() - - // increase num of listeners - atomic.AddUint32(&j.listeners, 1) - - j.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) -} - -func ready(r uint32) bool { - return r > 0 -} diff --git a/plugins/beanstalk/encode_test.go b/plugins/beanstalk/encode_test.go deleted file mode 100644 index e43207eb..00000000 --- a/plugins/beanstalk/encode_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package beanstalk - -import ( - "bytes" - "crypto/rand" - "encoding/gob" - "testing" - - json "github.com/json-iterator/go" - "github.com/spiral/roadrunner/v2/utils" -) - -func BenchmarkEncodeGob(b *testing.B) { - tb := make([]byte, 1024*10) - _, err := rand.Read(tb) - if err != nil { - b.Fatal(err) - } - - item := &Item{ - Job: "/super/test/php/class/loooooong", - Ident: "12341234-asdfasdfa-1234234-asdfasdfas", - Payload: utils.AsString(tb), - Headers: map[string][]string{"Test": {"test1", "test2"}}, - Options: &Options{ - Priority: 10, - Pipeline: "test-local-pipe", - Delay: 10, - }, - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - bb := new(bytes.Buffer) - err := gob.NewEncoder(bb).Encode(item) - if err != nil { - b.Fatal(err) - } - _ = bb.Bytes() - bb.Reset() - } -} - -func BenchmarkEncodeJsonIter(b *testing.B) { - tb := make([]byte, 1024*10) - _, err := rand.Read(tb) - if err != nil { - b.Fatal(err) - } - - item := &Item{ - Job: "/super/test/php/class/loooooong", - Ident: "12341234-asdfasdfa-1234234-asdfasdfas", - Payload: utils.AsString(tb), - Headers: map[string][]string{"Test": {"test1", "test2"}}, - Options: &Options{ - Priority: 10, - Pipeline: "test-local-pipe", - Delay: 10, - }, - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - bb, err := json.Marshal(item) - if err != nil { - b.Fatal(err) - } - _ = bb - } -} diff --git a/plugins/beanstalk/item.go b/plugins/beanstalk/item.go deleted file mode 100644 index 03060994..00000000 --- a/plugins/beanstalk/item.go +++ /dev/null @@ -1,138 +0,0 @@ -package beanstalk - -import ( - "bytes" - "context" - "encoding/gob" - "time" - - "github.com/beanstalkd/go-beanstalk" - json "github.com/json-iterator/go" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" -) - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // Private ================ - id uint64 - conn *beanstalk.Conn - requeueFn func(context.Context, *Item) error -} - -// DelayDuration returns delay duration in a form of time.Duration. -func (o *Options) DelayDuration() time.Duration { - return time.Second * time.Duration(o.Delay) -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -// Body packs job payload into binary payload. -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -// Context packs job context (job, id) into binary payload. -// Not used in the sqs, MessageAttributes used instead -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - return i.Options.conn.Delete(i.Options.id) -} - -func (i *Item) Nack() error { - return i.Options.conn.Delete(i.Options.id) -} - -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - // overwrite the delay - i.Options.Delay = delay - i.Headers = headers - - err := i.Options.requeueFn(context.Background(), i) - if err != nil { - return err - } - - // delete old job - err = i.Options.conn.Delete(i.Options.id) - if err != nil { - return err - } - - return nil -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} - -func (j *consumer) unpack(id uint64, data []byte, out *Item) error { - err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(out) - if err != nil { - return err - } - out.Options.conn = j.pool.conn - out.Options.id = id - out.Options.requeueFn = j.handleItem - - return nil -} diff --git a/plugins/beanstalk/listen.go b/plugins/beanstalk/listen.go deleted file mode 100644 index 6bb159ea..00000000 --- a/plugins/beanstalk/listen.go +++ /dev/null @@ -1,39 +0,0 @@ -package beanstalk - -import ( - "github.com/beanstalkd/go-beanstalk" -) - -func (j *consumer) listen() { - for { - select { - case <-j.stopCh: - j.log.Warn("beanstalk listener stopped") - return - default: - id, body, err := j.pool.Reserve(j.reserveTimeout) - if err != nil { - if errB, ok := err.(beanstalk.ConnError); ok { - switch errB.Err { //nolint:gocritic - case beanstalk.ErrTimeout: - j.log.Info("beanstalk reserve timeout", "warn", errB.Op) - continue - } - } - // in case of other error - continue - j.log.Error("beanstalk reserve", "error", err) - continue - } - - item := &Item{} - err = j.unpack(id, body, item) - if err != nil { - j.log.Error("beanstalk unpack item", "error", err) - continue - } - - // insert job into the priority queue - j.pq.Insert(item) - } - } -} diff --git a/plugins/beanstalk/plugin.go b/plugins/beanstalk/plugin.go deleted file mode 100644 index 529d1474..00000000 --- a/plugins/beanstalk/plugin.go +++ /dev/null @@ -1,47 +0,0 @@ -package beanstalk - -import ( - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - pluginName string = "beanstalk" -) - -type Plugin struct { - log logger.Logger - cfg config.Configurer -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -func (p *Plugin) Serve() chan error { - return make(chan error) -} - -func (p *Plugin) Stop() error { - return nil -} - -func (p *Plugin) Name() string { - return pluginName -} - -func (p *Plugin) Available() {} - -func (p *Plugin) JobsConstruct(configKey string, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return NewBeanstalkConsumer(configKey, p.log, p.cfg, eh, pq) -} - -func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) { - return FromPipeline(pipe, p.log, p.cfg, eh, pq) -} diff --git a/plugins/boltdb/boltjobs/config.go b/plugins/boltdb/boltjobs/config.go deleted file mode 100644 index 8cc098c1..00000000 --- a/plugins/boltdb/boltjobs/config.go +++ /dev/null @@ -1,39 +0,0 @@ -package boltjobs - -const ( - file string = "file" - priority string = "priority" - prefetch string = "prefetch" -) - -type GlobalCfg struct { - // db file permissions - Permissions int `mapstructure:"permissions"` - // consume timeout -} - -func (c *GlobalCfg) InitDefaults() { - if c.Permissions == 0 { - c.Permissions = 0777 - } -} - -type Config struct { - File string `mapstructure:"file"` - Priority int `mapstructure:"priority"` - Prefetch int `mapstructure:"prefetch"` -} - -func (c *Config) InitDefaults() { - if c.File == "" { - c.File = "rr.db" - } - - if c.Priority == 0 { - c.Priority = 10 - } - - if c.Prefetch == 0 { - c.Prefetch = 1000 - } -} diff --git a/plugins/boltdb/boltjobs/consumer.go b/plugins/boltdb/boltjobs/consumer.go deleted file mode 100644 index 62045d3b..00000000 --- a/plugins/boltdb/boltjobs/consumer.go +++ /dev/null @@ -1,430 +0,0 @@ -package boltjobs - -import ( - "bytes" - "context" - "encoding/gob" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - jobState "github.com/spiral/roadrunner/v2/pkg/state/job" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" - "github.com/spiral/roadrunner/v2/utils" - bolt "go.etcd.io/bbolt" -) - -const ( - PluginName string = "boltdb" - rrDB string = "rr.db" - - PushBucket string = "push" - InQueueBucket string = "processing" - DelayBucket string = "delayed" -) - -type consumer struct { - file string - permissions int - priority int - prefetch int - - db *bolt.DB - - bPool sync.Pool - log logger.Logger - eh events.Handler - pq priorityqueue.Queue - pipeline atomic.Value - cond *sync.Cond - - listeners uint32 - active *uint64 - delayed *uint64 - - stopCh chan struct{} -} - -func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("init_boltdb_jobs") - - if !cfg.Has(configKey) { - return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey)) - } - - // if no global section - if !cfg.Has(PluginName) { - return nil, errors.E(op, errors.Str("no global boltdb configuration")) - } - - conf := &GlobalCfg{} - err := cfg.UnmarshalKey(PluginName, conf) - if err != nil { - return nil, errors.E(op, err) - } - - localCfg := &Config{} - err = cfg.UnmarshalKey(configKey, localCfg) - if err != nil { - return nil, errors.E(op, err) - } - - localCfg.InitDefaults() - conf.InitDefaults() - - db, err := bolt.Open(localCfg.File, os.FileMode(conf.Permissions), &bolt.Options{ - Timeout: time.Second * 20, - NoGrowSync: false, - NoFreelistSync: false, - ReadOnly: false, - NoSync: false, - }) - - if err != nil { - return nil, errors.E(op, err) - } - - // create bucket if it does not exist - // tx.Commit invokes via the db.Update - err = db.Update(func(tx *bolt.Tx) error { - const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DelayBucket)) - if err != nil { - return errors.E(op, upOp) - } - - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) - if err != nil { - return errors.E(op, upOp) - } - - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) - if err != nil { - return errors.E(op, upOp) - } - - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - cursor := inQb.Cursor() - - pushB := tx.Bucket(utils.AsBytes(PushBucket)) - - // get all items, which are in the InQueueBucket and put them into the PushBucket - for k, v := cursor.First(); k != nil; k, v = cursor.Next() { - err = pushB.Put(k, v) - if err != nil { - return errors.E(op, err) - } - } - return nil - }) - - if err != nil { - return nil, errors.E(op, err) - } - - return &consumer{ - permissions: conf.Permissions, - file: localCfg.File, - priority: localCfg.Priority, - prefetch: localCfg.Prefetch, - - bPool: sync.Pool{New: func() interface{} { - return new(bytes.Buffer) - }}, - cond: sync.NewCond(&sync.Mutex{}), - - delayed: utils.Uint64(0), - active: utils.Uint64(0), - - db: db, - log: log, - eh: e, - pq: pq, - stopCh: make(chan struct{}, 2), - }, nil -} - -func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) { - const op = errors.Op("init_boltdb_jobs") - - // if no global section - if !cfg.Has(PluginName) { - return nil, errors.E(op, errors.Str("no global boltdb configuration")) - } - - conf := &GlobalCfg{} - err := cfg.UnmarshalKey(PluginName, conf) - if err != nil { - return nil, errors.E(op, err) - } - - // add default values - conf.InitDefaults() - - db, err := bolt.Open(pipeline.String(file, rrDB), os.FileMode(conf.Permissions), &bolt.Options{ - Timeout: time.Second * 20, - NoGrowSync: false, - NoFreelistSync: false, - ReadOnly: false, - NoSync: false, - }) - - if err != nil { - return nil, errors.E(op, err) - } - - // create bucket if it does not exist - // tx.Commit invokes via the db.Update - err = db.Update(func(tx *bolt.Tx) error { - const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DelayBucket)) - if err != nil { - return errors.E(op, upOp) - } - - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket)) - if err != nil { - return errors.E(op, upOp) - } - - _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket)) - if err != nil { - return errors.E(op, upOp) - } - - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - cursor := inQb.Cursor() - - pushB := tx.Bucket(utils.AsBytes(PushBucket)) - - // get all items, which are in the InQueueBucket and put them into the PushBucket - for k, v := cursor.First(); k != nil; k, v = cursor.Next() { - err = pushB.Put(k, v) - if err != nil { - return errors.E(op, err) - } - } - - return nil - }) - - if err != nil { - return nil, errors.E(op, err) - } - - return &consumer{ - file: pipeline.String(file, rrDB), - priority: pipeline.Int(priority, 10), - prefetch: pipeline.Int(prefetch, 1000), - permissions: conf.Permissions, - - bPool: sync.Pool{New: func() interface{} { - return new(bytes.Buffer) - }}, - cond: sync.NewCond(&sync.Mutex{}), - - delayed: utils.Uint64(0), - active: utils.Uint64(0), - - db: db, - log: log, - eh: e, - pq: pq, - stopCh: make(chan struct{}, 2), - }, nil -} - -func (c *consumer) Push(_ context.Context, job *job.Job) error { - const op = errors.Op("boltdb_jobs_push") - err := c.db.Update(func(tx *bolt.Tx) error { - item := fromJob(job) - // pool with buffers - buf := c.get() - // encode the job - enc := gob.NewEncoder(buf) - err := enc.Encode(item) - if err != nil { - c.put(buf) - return errors.E(op, err) - } - - value := make([]byte, buf.Len()) - copy(value, buf.Bytes()) - c.put(buf) - - // handle delay - if item.Options.Delay > 0 { - b := tx.Bucket(utils.AsBytes(DelayBucket)) - tKey := time.Now().UTC().Add(time.Second * time.Duration(item.Options.Delay)).Format(time.RFC3339) - - err = b.Put(utils.AsBytes(tKey), value) - if err != nil { - return errors.E(op, err) - } - - atomic.AddUint64(c.delayed, 1) - - return nil - } - - b := tx.Bucket(utils.AsBytes(PushBucket)) - err = b.Put(utils.AsBytes(item.ID()), value) - if err != nil { - return errors.E(op, err) - } - - // increment active counter - atomic.AddUint64(c.active, 1) - - return nil - }) - - if err != nil { - return errors.E(op, err) - } - - return nil -} - -func (c *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error { - c.pipeline.Store(pipeline) - return nil -} - -func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error { - const op = errors.Op("boltdb_run") - start := time.Now() - - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p.Name() { - return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name())) - } - - // run listener - go c.listener() - go c.delayedJobsListener() - - // increase number of listeners - atomic.AddUint32(&c.listeners, 1) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) - - return nil -} - -func (c *consumer) Stop(_ context.Context) error { - start := time.Now() - if atomic.LoadUint32(&c.listeners) > 0 { - c.stopCh <- struct{}{} - c.stopCh <- struct{}{} - } - - pipe := c.pipeline.Load().(*pipeline.Pipeline) - c.eh.Push(events.JobEvent{ - Event: events.EventPipeStopped, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) - return nil -} - -func (c *consumer) Pause(_ context.Context, p string) { - start := time.Now() - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - c.log.Error("no such pipeline", "requested pause on: ", p) - } - - l := atomic.LoadUint32(&c.listeners) - // no active listeners - if l == 0 { - c.log.Warn("no active listeners, nothing to pause") - return - } - - c.stopCh <- struct{}{} - c.stopCh <- struct{}{} - - atomic.AddUint32(&c.listeners, ^uint32(0)) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipePaused, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) -} - -func (c *consumer) Resume(_ context.Context, p string) { - start := time.Now() - pipe := c.pipeline.Load().(*pipeline.Pipeline) - if pipe.Name() != p { - c.log.Error("no such pipeline", "requested resume on: ", p) - } - - l := atomic.LoadUint32(&c.listeners) - // no active listeners - if l == 1 { - c.log.Warn("amqp listener already in the active state") - return - } - - // run listener - go c.listener() - go c.delayedJobsListener() - - // increase number of listeners - atomic.AddUint32(&c.listeners, 1) - - c.eh.Push(events.JobEvent{ - Event: events.EventPipeActive, - Driver: pipe.Driver(), - Pipeline: pipe.Name(), - Start: start, - Elapsed: time.Since(start), - }) -} - -func (c *consumer) State(_ context.Context) (*jobState.State, error) { - pipe := c.pipeline.Load().(*pipeline.Pipeline) - - return &jobState.State{ - Pipeline: pipe.Name(), - Driver: pipe.Driver(), - Queue: PushBucket, - Active: int64(atomic.LoadUint64(c.active)), - Delayed: int64(atomic.LoadUint64(c.delayed)), - Ready: toBool(atomic.LoadUint32(&c.listeners)), - }, nil -} - -// Private - -func (c *consumer) get() *bytes.Buffer { - return c.bPool.Get().(*bytes.Buffer) -} - -func (c *consumer) put(b *bytes.Buffer) { - b.Reset() - c.bPool.Put(b) -} - -func toBool(r uint32) bool { - return r > 0 -} diff --git a/plugins/boltdb/boltjobs/item.go b/plugins/boltdb/boltjobs/item.go deleted file mode 100644 index 837f8c63..00000000 --- a/plugins/boltdb/boltjobs/item.go +++ /dev/null @@ -1,229 +0,0 @@ -package boltjobs - -import ( - "bytes" - "encoding/gob" - "sync/atomic" - "time" - - json "github.com/json-iterator/go" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/jobs/job" - "github.com/spiral/roadrunner/v2/utils" - "go.etcd.io/bbolt" -) - -type Item struct { - // Job contains pluginName of job broker (usually PHP class). - Job string `json:"job"` - - // Ident is unique identifier of the job, should be provided from outside - Ident string `json:"id"` - - // Payload is string data (usually JSON) passed to Job broker. - Payload string `json:"payload"` - - // Headers with key-values pairs - Headers map[string][]string `json:"headers"` - - // Options contains set of PipelineOptions specific to job execution. Can be empty. - Options *Options `json:"options,omitempty"` -} - -// Options carry information about how to handle given job. -type Options struct { - // Priority is job priority, default - 10 - // pointer to distinguish 0 as a priority and nil as priority not set - Priority int64 `json:"priority"` - - // Pipeline manually specified pipeline. - Pipeline string `json:"pipeline,omitempty"` - - // Delay defines time duration to delay execution for. Defaults to none. - Delay int64 `json:"delay,omitempty"` - - // private - db *bbolt.DB - active *uint64 - delayed *uint64 -} - -func (i *Item) ID() string { - return i.Ident -} - -func (i *Item) Priority() int64 { - return i.Options.Priority -} - -func (i *Item) Body() []byte { - return utils.AsBytes(i.Payload) -} - -func (i *Item) Context() ([]byte, error) { - ctx, err := json.Marshal( - struct { - ID string `json:"id"` - Job string `json:"job"` - Headers map[string][]string `json:"headers"` - Pipeline string `json:"pipeline"` - }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline}, - ) - - if err != nil { - return nil, err - } - - return ctx, nil -} - -func (i *Item) Ack() error { - const op = errors.Op("boltdb_item_ack") - tx, err := i.Options.db.Begin(true) - if err != nil { - _ = tx.Rollback() - return errors.E(op, err) - } - - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - err = inQb.Delete(utils.AsBytes(i.ID())) - if err != nil { - _ = tx.Rollback() - return errors.E(op, err) - } - - if i.Options.Delay > 0 { - atomic.AddUint64(i.Options.delayed, ^uint64(0)) - } else { - atomic.AddUint64(i.Options.active, ^uint64(0)) - } - - return tx.Commit() -} - -func (i *Item) Nack() error { - const op = errors.Op("boltdb_item_ack") - /* - steps: - 1. begin tx - 2. get item by ID from the InQueueBucket (previously put in the listener) - 3. put it back to the PushBucket - 4. Delete it from the InQueueBucket - */ - tx, err := i.Options.db.Begin(true) - if err != nil { - _ = tx.Rollback() - return errors.E(op, err) - } - - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - v := inQb.Get(utils.AsBytes(i.ID())) - - pushB := tx.Bucket(utils.AsBytes(PushBucket)) - - err = pushB.Put(utils.AsBytes(i.ID()), v) - if err != nil { - _ = tx.Rollback() - return errors.E(op, err) - } - - err = inQb.Delete(utils.AsBytes(i.ID())) - if err != nil { - _ = tx.Rollback() - return errors.E(op, err) - } - - return tx.Commit() -} - -/* -Requeue algorithm: -1. Rewrite item headers and delay. -2. Begin writable transaction on attached to the item db. -3. Delete item from the InQueueBucket -4. Handle items with the delay: - 4.1. Get DelayBucket - 4.2. Make a key by adding the delay to the time.Now() in RFC3339 format - 4.3. Put this key with value to the DelayBucket -5. W/o delay, put the key with value to the PushBucket (requeue) -*/ -func (i *Item) Requeue(headers map[string][]string, delay int64) error { - const op = errors.Op("boltdb_item_requeue") - i.Headers = headers - i.Options.Delay = delay - - tx, err := i.Options.db.Begin(true) - if err != nil { - return errors.E(op, err) - } - - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - err = inQb.Delete(utils.AsBytes(i.ID())) - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } - - // encode the item - buf := new(bytes.Buffer) - enc := gob.NewEncoder(buf) - err = enc.Encode(i) - val := make([]byte, buf.Len()) - copy(val, buf.Bytes()) - buf.Reset() - - if delay > 0 { - delayB := tx.Bucket(utils.AsBytes(DelayBucket)) - tKey := time.Now().UTC().Add(time.Second * time.Duration(delay)).Format(time.RFC3339) - - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } - - err = delayB.Put(utils.AsBytes(tKey), val) - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } - - return tx.Commit() - } - - pushB := tx.Bucket(utils.AsBytes(PushBucket)) - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } - - err = pushB.Put(utils.AsBytes(i.ID()), val) - if err != nil { - return errors.E(op, i.rollback(err, tx)) - } - - return tx.Commit() -} - -func (i *Item) attachDB(db *bbolt.DB, active, delayed *uint64) { - i.Options.db = db - i.Options.active = active - i.Options.delayed = delayed -} - -func (i *Item) rollback(err error, tx *bbolt.Tx) error { - errR := tx.Rollback() - if errR != nil { - return errors.Errorf("transaction commit error: %v, rollback failed: %v", err, errR) - } - return errors.Errorf("transaction commit error: %v", err) -} - -func fromJob(job *job.Job) *Item { - return &Item{ - Job: job.Job, - Ident: job.Ident, - Payload: job.Payload, - Headers: job.Headers, - Options: &Options{ - Priority: job.Options.Priority, - Pipeline: job.Options.Pipeline, - Delay: job.Options.Delay, - }, - } -} diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go deleted file mode 100644 index 081d3f57..00000000 --- a/plugins/boltdb/boltjobs/listener.go +++ /dev/null @@ -1,156 +0,0 @@ -package boltjobs - -import ( - "bytes" - "encoding/gob" - "sync/atomic" - "time" - - "github.com/spiral/roadrunner/v2/utils" - bolt "go.etcd.io/bbolt" -) - -func (c *consumer) listener() { - tt := time.NewTicker(time.Millisecond) - defer tt.Stop() - for { - select { - case <-c.stopCh: - c.log.Info("boltdb listener stopped") - return - case <-tt.C: - if atomic.LoadUint64(c.active) > uint64(c.prefetch) { - time.Sleep(time.Second) - continue - } - tx, err := c.db.Begin(true) - if err != nil { - c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err) - continue - } - - b := tx.Bucket(utils.AsBytes(PushBucket)) - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - - // get first item - k, v := b.Cursor().First() - if k == nil && v == nil { - _ = tx.Commit() - continue - } - - buf := bytes.NewReader(v) - dec := gob.NewDecoder(buf) - - item := &Item{} - err = dec.Decode(item) - if err != nil { - c.rollback(err, tx) - continue - } - - err = inQb.Put(utils.AsBytes(item.ID()), v) - if err != nil { - c.rollback(err, tx) - continue - } - - // delete key from the PushBucket - err = b.Delete(k) - if err != nil { - c.rollback(err, tx) - continue - } - - err = tx.Commit() - if err != nil { - c.rollback(err, tx) - continue - } - - // attach pointer to the DB - item.attachDB(c.db, c.active, c.delayed) - // as the last step, after commit, put the item into the PQ - c.pq.Insert(item) - } - } -} - -func (c *consumer) delayedJobsListener() { - tt := time.NewTicker(time.Second) - defer tt.Stop() - - // just some 90's - loc, err := time.LoadLocation("UTC") - if err != nil { - c.log.Error("failed to load location, delayed jobs won't work", "error", err) - return - } - - var startDate = utils.AsBytes(time.Date(1990, 1, 1, 0, 0, 0, 0, loc).Format(time.RFC3339)) - - for { - select { - case <-c.stopCh: - c.log.Info("boltdb listener stopped") - return - case <-tt.C: - tx, err := c.db.Begin(true) - if err != nil { - c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err) - continue - } - - delayB := tx.Bucket(utils.AsBytes(DelayBucket)) - inQb := tx.Bucket(utils.AsBytes(InQueueBucket)) - - cursor := delayB.Cursor() - endDate := utils.AsBytes(time.Now().UTC().Format(time.RFC3339)) - - for k, v := cursor.Seek(startDate); k != nil && bytes.Compare(k, endDate) <= 0; k, v = cursor.Next() { - buf := bytes.NewReader(v) - dec := gob.NewDecoder(buf) - - item := &Item{} - err = dec.Decode(item) - if err != nil { - c.rollback(err, tx) - continue - } - - err = inQb.Put(utils.AsBytes(item.ID()), v) - if err != nil { - c.rollback(err, tx) - continue - } - - // delete key from the PushBucket - err = delayB.Delete(k) - if err != nil { - c.rollback(err, tx) - continue - } - - // attach pointer to the DB - item.attachDB(c.db, c.active, c.delayed) - // as the last step, after commit, put the item into the PQ - c.pq.Insert(item) - } - - err = tx.Commit() - if err != nil { - c.rollback(err, tx) - continue - } - } - } -} - -func (c *consumer) rollback(err error, tx *bolt.Tx) { - errR := tx.Rollback() - if errR != nil { - c.log.Error("transaction commit error, rollback failed", "error", err, "rollback error", errR) - } - - c.log.Error("transaction commit error, rollback succeed", "error", err) -} diff --git a/plugins/boltdb/boltkv/config.go b/plugins/boltdb/boltkv/config.go deleted file mode 100644 index 56d00674..00000000 --- a/plugins/boltdb/boltkv/config.go +++ /dev/null @@ -1,30 +0,0 @@ -package boltkv - -type Config struct { - // File is boltDB file. No need to create it by your own, - // boltdb driver is able to create the file, or read existing - File string - // Bucket to store data in boltDB - bucket string - // db file permissions - Permissions int - // timeout - Interval int `mapstructure:"interval"` -} - -// InitDefaults initializes default values for the boltdb -func (s *Config) InitDefaults() { - s.bucket = "default" - - if s.File == "" { - s.File = "rr.db" // default file name - } - - if s.Permissions == 0 { - s.Permissions = 0777 // free for all - } - - if s.Interval == 0 { - s.Interval = 60 // default is 60 seconds timeout - } -} diff --git a/plugins/boltdb/boltkv/driver.go b/plugins/boltdb/boltkv/driver.go deleted file mode 100644 index 656d572e..00000000 --- a/plugins/boltdb/boltkv/driver.go +++ /dev/null @@ -1,472 +0,0 @@ -package boltkv - -import ( - "bytes" - "encoding/gob" - "os" - "strings" - "sync" - "time" - - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/logger" - kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta" - "github.com/spiral/roadrunner/v2/utils" - bolt "go.etcd.io/bbolt" -) - -const ( - RootPluginName string = "kv" -) - -type Driver struct { - clearMu sync.RWMutex - // db instance - DB *bolt.DB - // name should be UTF-8 - bucket []byte - log logger.Logger - cfg *Config - - // gc contains keys with timeouts - gc sync.Map - // default timeout for cache cleanup is 1 minute - timeout time.Duration - - // stop is used to stop keys GC and close boltdb connection - stop chan struct{} -} - -func NewBoltDBDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) { - const op = errors.Op("new_boltdb_driver") - - if !cfgPlugin.Has(RootPluginName) { - return nil, errors.E(op, errors.Str("no kv section in the configuration")) - } - - d := &Driver{ - log: log, - stop: make(chan struct{}), - } - - err := cfgPlugin.UnmarshalKey(key, &d.cfg) - if err != nil { - return nil, errors.E(op, err) - } - - // add default values - d.cfg.InitDefaults() - - d.bucket = []byte(d.cfg.bucket) - d.timeout = time.Duration(d.cfg.Interval) * time.Second - d.gc = sync.Map{} - - db, err := bolt.Open(d.cfg.File, os.FileMode(d.cfg.Permissions), &bolt.Options{ - Timeout: time.Second * 20, - NoGrowSync: false, - NoFreelistSync: false, - ReadOnly: false, - NoSync: false, - }) - - if err != nil { - return nil, errors.E(op, err) - } - - d.DB = db - - // create bucket if it does not exist - // tx.Commit invokes via the db.Update - err = db.Update(func(tx *bolt.Tx) error { - const upOp = errors.Op("boltdb_plugin_update") - _, err = tx.CreateBucketIfNotExists([]byte(d.cfg.bucket)) - if err != nil { - return errors.E(op, upOp) - } - return nil - }) - - if err != nil { - return nil, errors.E(op, err) - } - - go d.startGCLoop() - - return d, nil -} - -func (d *Driver) Has(keys ...string) (map[string]bool, error) { - const op = errors.Op("boltdb_driver_has") - d.log.Debug("boltdb HAS method called", "args", keys) - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - m := make(map[string]bool, len(keys)) - - // this is readable transaction - err := d.DB.View(func(tx *bolt.Tx) error { - // Get retrieves the value for a key in the bucket. - // Returns a nil value if the key does not exist or if the key is a nested bucket. - // The returned value is only valid for the life of the transaction. - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return errors.E(op, errors.EmptyKey) - } - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - exist := b.Get([]byte(keys[i])) - if exist != nil { - m[keys[i]] = true - } - } - return nil - }) - if err != nil { - return nil, errors.E(op, err) - } - - d.log.Debug("boltdb HAS method finished") - return m, nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (d *Driver) Get(key string) ([]byte, error) { - const op = errors.Op("boltdb_driver_get") - // to get cases like " " - keyTrimmed := strings.TrimSpace(key) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - - var val []byte - err := d.DB.View(func(tx *bolt.Tx) error { - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - val = b.Get([]byte(key)) - - // try to decode values - if val != nil { - buf := bytes.NewReader(val) - decoder := gob.NewDecoder(buf) - - var i string - err := decoder.Decode(&i) - if err != nil { - // unsafe (w/o runes) convert - return errors.E(op, err) - } - - // set the value - val = utils.AsBytes(i) - } - return nil - }) - if err != nil { - return nil, errors.E(op, err) - } - - return val, nil -} - -func (d *Driver) MGet(keys ...string) (map[string][]byte, error) { - const op = errors.Op("boltdb_driver_mget") - // defense - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - } - - m := make(map[string][]byte, len(keys)) - - err := d.DB.View(func(tx *bolt.Tx) error { - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - - buf := new(bytes.Buffer) - var out []byte - buf.Grow(100) - for i := range keys { - value := b.Get([]byte(keys[i])) - buf.Write(value) - // allocate enough space - dec := gob.NewDecoder(buf) - if value != nil { - err := dec.Decode(&out) - if err != nil { - return errors.E(op, err) - } - m[keys[i]] = out - buf.Reset() - out = nil - } - } - - return nil - }) - if err != nil { - return nil, errors.E(op, err) - } - - return m, nil -} - -// Set puts the K/V to the bolt -func (d *Driver) Set(items ...*kvv1.Item) error { - const op = errors.Op("boltdb_driver_set") - if items == nil { - return errors.E(op, errors.NoKeys) - } - - // start writable transaction - tx, err := d.DB.Begin(true) - if err != nil { - return errors.E(op, err) - } - defer func() { - err = tx.Commit() - if err != nil { - errRb := tx.Rollback() - if errRb != nil { - d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb) - } - } - }() - - b := tx.Bucket(d.bucket) - // use access by index to avoid copying - for i := range items { - // performance note: pass a prepared bytes slice with initial cap - // we can't move buf and gob out of loop, because we need to clear both from data - // but gob will contain (w/o re-init) the past data - buf := new(bytes.Buffer) - encoder := gob.NewEncoder(buf) - if errors.Is(errors.EmptyItem, err) { - return errors.E(op, errors.EmptyItem) - } - - // Encode value - err = encoder.Encode(&items[i].Value) - if err != nil { - return errors.E(op, err) - } - // buf.Bytes will copy the underlying slice. Take a look in case of performance problems - err = b.Put([]byte(items[i].Key), buf.Bytes()) - if err != nil { - return errors.E(op, err) - } - - // if there are no errors, and TTL > 0, we put the key with timeout to the hashmap, for future check - // we do not need mutex here, since we use sync.Map - if items[i].Timeout != "" { - // check correctness of provided TTL - _, err := time.Parse(time.RFC3339, items[i].Timeout) - if err != nil { - return errors.E(op, err) - } - // Store key TTL in the separate map - d.gc.Store(items[i].Key, items[i].Timeout) - } - - buf.Reset() - } - - return nil -} - -// Delete all keys from DB -func (d *Driver) Delete(keys ...string) error { - const op = errors.Op("boltdb_driver_delete") - if keys == nil { - return errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for _, key := range keys { - keyTrimmed := strings.TrimSpace(key) - if keyTrimmed == "" { - return errors.E(op, errors.EmptyKey) - } - } - - // start writable transaction - tx, err := d.DB.Begin(true) - if err != nil { - return errors.E(op, err) - } - - defer func() { - err = tx.Commit() - if err != nil { - errRb := tx.Rollback() - if errRb != nil { - d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb) - } - } - }() - - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - - for _, key := range keys { - err = b.Delete([]byte(key)) - if err != nil { - return errors.E(op, err) - } - } - - return nil -} - -// MExpire sets the expiration time to the key -// If key already has the expiration time, it will be overwritten -func (d *Driver) MExpire(items ...*kvv1.Item) error { - const op = errors.Op("boltdb_driver_mexpire") - for i := range items { - if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" { - return errors.E(op, errors.Str("should set timeout and at least one key")) - } - - // verify provided TTL - _, err := time.Parse(time.RFC3339, items[i].Timeout) - if err != nil { - return errors.E(op, err) - } - - d.gc.Store(items[i].Key, items[i].Timeout) - } - return nil -} - -func (d *Driver) TTL(keys ...string) (map[string]string, error) { - const op = errors.Op("boltdb_driver_ttl") - if keys == nil { - return nil, errors.E(op, errors.NoKeys) - } - - // should not be empty keys - for i := range keys { - keyTrimmed := strings.TrimSpace(keys[i]) - if keyTrimmed == "" { - return nil, errors.E(op, errors.EmptyKey) - } - } - - m := make(map[string]string, len(keys)) - - for i := range keys { - if item, ok := d.gc.Load(keys[i]); ok { - // a little bit dangerous operation, but user can't store value other that kv.Item.TTL --> int64 - m[keys[i]] = item.(string) - } - } - return m, nil -} - -func (d *Driver) Clear() error { - err := d.DB.Update(func(tx *bolt.Tx) error { - err := tx.DeleteBucket(d.bucket) - if err != nil { - d.log.Error("boltdb delete bucket", "error", err) - return err - } - - _, err = tx.CreateBucket(d.bucket) - if err != nil { - d.log.Error("boltdb create bucket", "error", err) - return err - } - - return nil - }) - - if err != nil { - d.log.Error("clear transaction failed", "error", err) - return err - } - - d.clearMu.Lock() - d.gc = sync.Map{} - d.clearMu.Unlock() - - return nil -} - -func (d *Driver) Stop() { - d.stop <- struct{}{} -} - -// ========================= PRIVATE ================================= - -func (d *Driver) startGCLoop() { //nolint:gocognit - go func() { - t := time.NewTicker(d.timeout) - defer t.Stop() - for { - select { - case <-t.C: - d.clearMu.RLock() - - // calculate current time before loop started to be fair - now := time.Now() - d.gc.Range(func(key, value interface{}) bool { - const op = errors.Op("boltdb_plugin_gc") - k := key.(string) - v, err := time.Parse(time.RFC3339, value.(string)) - if err != nil { - return false - } - - if now.After(v) { - // time expired - d.gc.Delete(k) - d.log.Debug("key deleted", "key", k) - err := d.DB.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(d.bucket) - if b == nil { - return errors.E(op, errors.NoSuchBucket) - } - err := b.Delete(utils.AsBytes(k)) - if err != nil { - return errors.E(op, err) - } - return nil - }) - if err != nil { - d.log.Error("error during the gc phase of update", "error", err) - return false - } - } - return true - }) - - d.clearMu.RUnlock() - case <-d.stop: - err := d.DB.Close() - if err != nil { - d.log.Error("error") - } - return - } - } - }() -} diff --git a/plugins/boltdb/doc/boltjobs.drawio b/plugins/boltdb/doc/boltjobs.drawio deleted file mode 100644 index 7d1f3531..00000000 --- a/plugins/boltdb/doc/boltjobs.drawio +++ /dev/null @@ -1 +0,0 @@ -7V1bc5s4GP01nmkfkgHE9TF2km13up20me1uH2WQbRJsuYATe3/9ijtICiHmIrvBnWmNEAjrfNejT3QCZuv9Hz7crv7CDvImiuTsJ+B6oigKABr5J2o5JC0yUJWkZem7TtpWNNy7/6G0UUpbd66DgkrHEGMvdLfVRhtvNsgOK23Q9/FztdsCe9VRt3CJmIZ7G3ps6z+uE67SVl2SihOfkLtcZUMr2Zk1zHqnDcEKOvi51ARuJmDmYxwm39b7GfKi6csmJrnu9oWz+ZP5aBM2ucDcf0ZP8pX6w3iw/K8/fijw1/VFikYQHrJfjBwyAekh9sMVXuIN9G6K1qmPdxsHRXeVyFHR5wvGW9Iok8YHFIaHFE24CzFpWoVrLz1LHtg//Btdf6llhz/T28UH1/vK0SE9WuBNeAvXrhc1zGBgQ8eF5OlnRO7S0+mYMiDHyU+Lfs+LM5aJFPSXKKyZJjnHi4g6wmtEHotc5yMPhu5T9f4wFbll3i+/9A67ZGRFSvVDV1PROORiVb1F8lzpVWVoX7uRSt0owDvfRsyNyJfS7ymaYsl5gxRl2jqwGO3dsCRF5Ohn6UwhQ9HBoSxQpyR6CTKviV4DEVVbimhFJt4sAMl9n6C3S0e6+/v+E2n5fjdjRKMK/PPKDdH9FsaT8EzcSBXkN078wvW8GfawHw8F5tA2nag9CH38iEpnFKCqmpND9YT8EO3rwWInN71AM6vqp2R6/FxyGZkbWJW8Ba2mneFhiNRHuaSNhW7y9VGcXoGGeqWJ1CvzXHAs7GrFqhZG9uTsalP8hdpVa8RfNP66SPwB41enJPm5nrb0qVUnuSAf0+Q5SaADCzjHwtTKp+pVl6qZGutSZY5LBX25VFlmJv1EdVGcTrW1lfzsxqDCK7VhmtRVdqOywe0uWJGW6c5+RGG3uqhFf3i6qMefFL+yjsYfITqqaBQwoGHcSwPYmZIqQh1mNRGVapX0pUQ050dqHGZNwmM7aG7OubacfG5vX5eTDs2Bfg4hlgxOR2Qachfy5ExiLFk5hyBL1hgDP8PbaPwHPOcKxxc4R14VUOi5yw35bpNJQkTlppFldW3oXaUn1q7jJLKDAvc/OI/vF8GzjfxW/Ju06US75ul4E43lWPKUFE/HmuRMdBnKGp140e4TkbOUdNJa+vYLWbIqPuRCoXwDXiwC1A9rqYvVfKO56tMS0RONKUL1ZaOXoJEkB1QGMXDUmE0AGzZ6bhCiDTESivRBltbBx24jyAXSbZsXATiGNZeOdgitIkUmhOdFiuqQkaLMRvXyZTwHheWXiO6Sv1aI/P15822HdigL+XWPPOB07ldw03/tosW8eAIvgngGr0gHGWz38TRm58m3ZfSvEg14jTwURiMsfLzOh4tE5YX0gqAQ8n2PhxZhO88TmasghZ4RoQ3eIDHSQ5EBwGSlJ5eUQfh1WSgx+5ag8Qw9h9HUc1jDeA59aM9hsJ7j010k7th/RH4QzSvGXkuvQckFE79SbgXJjoYMnluxdANAvamItDIEskQtdGuG6JU2RSgtKF0qw6x9vzkd6cYUWA1NQamgRkACmeVCp+8NfmcReCFnHWihRjCL9IZcsisZYByBFH9OVDrEcowW49G/o2CLNwEa1os7EJkLbnKo2yaaL8R4cV28FxdaMCOVdLf5QnvLdYPBdDQD8rRJ4OwpSyr6FdqPw6rnwrQRn7uZm5qqNbatrdTTtKraaUrCtZOl0m58n0xNc2qkC1q+KTnSBXXfHC4rq4xI4TI41JrGQYvm2bsLhQRnREdFQsq5GNOhypbiS698Hx5KHVKdaE6dWDq1J4G6QDFr+5MvySN0yq8obFXV1dCW3tGQ6ag8S28qc6ALolPEm3ogOJU+jk4xWlmPU0ikFLWhXWmdZh9nWHJ3lUmqVW9YZBnUXtCPZQGCV5TbkwC/t+y2ZQmPlF2qTM6iN+pRF1j1/XsSXbEZ8HslsBrLLhBKYCmcGlgfh9h+n8tQ8smlyOrZVKe32SlyatqrNVReVejyFFveeE3GjcbvoYC9PcM8QKF642Xj3sqPsui0Uh0WxQDzmHQsVx4dVQr0+/JdJDyqYsmxvQpvZ1B/hBdLT+ZFXX/GdWSV0q5KJdll3seN6jocMquXI8xlFc2CYJWFGRiDwnxCe0veT4AMGgfIpkgfq57hAmI1/jJaB2C3t5Jknqh0iKHDKXrVMl8hraj+hqZRgtlH4s+mdt/Rr8hDtYwL37r2CR0D8tc+VV2T1GEyO5pntIS/yiN7YdY5WZauCXERlsVsaFmEvh8ke0o2txtVV5Y04bqritTd41gZSndbx4wCdDfDs/eNx0dFBQZd/SbVRwV0f1Wu7w/q+/cTRagsn5GRTD1vd2tdMtUD36Q14ZsG3e6mM/B83pDjb0mcJ817IAJbVzgMQAQackMX0d8bK7gePCGPHl4hjxi83iVRRJUQ8dZilEGJIpUl3JOtpSsUc7tOahh9uFlGmH6A8T0J2kH82OEqsecJ4nGvj+Udp6XNowlnjPZb1ydqmt+jLXs83EbSYWVFpdbtdB6paA0aIdao/6j4tWAa1EKAxdsVzHOy/Sk+u80jUdDEkEeaHan7V/w8iZ5hmtkCot3SIzpUFTcOn0a4ixfCqVW4uekdV3l7w1tjg152Gf7ssKLeLxblbzHn34WvpsqDAcf+8qKv/hBkX1zM0m0jgiUEqWU5VTiCCoMguxVzRLBA0KJiII2TAg2LIFuOz9JmI4IFgnL+4tMsjOVs+RkWQnYNia3jHyEsQUjvvODt2hoWQjZpHSGsh5AiHgzOUuGwELIk3whhvSEFp6aF7Kt/2C0lI4SleJR6NRiX/RsUQZbQYUuDRgQLBOktoDonrR8WQZbFGXWwVgfp/Uqic8LMJowINq6doBdRLNEQssTMaEbrIKTfsMplwwdFkCVm2JdmjggWCOpKA4K7IwjJYfEfTyY1IMV/4Alu/gc= \ No newline at end of file diff --git a/plugins/boltdb/doc/job_lifecycle.md b/plugins/boltdb/doc/job_lifecycle.md deleted file mode 100644 index 1424e586..00000000 --- a/plugins/boltdb/doc/job_lifecycle.md +++ /dev/null @@ -1,9 +0,0 @@ -### Job lifecycle - -There are several boltdb buckets: - -1. `PushBucket` - used for pushed jobs via RPC. -2. `InQueueBucket` - when the job consumed from the `PushBucket`, in the same transaction, it copied into the priority queue and -get into the `InQueueBucket` waiting to acknowledgement. -3. `DelayBucket` - used for delayed jobs. RFC3339 used as a timestamp to track delay expiration. - diff --git a/plugins/boltdb/plugin.go b/plugins/boltdb/plugin.go deleted file mode 100644 index ad98cf3c..00000000 --- a/plugins/boltdb/plugin.go +++ /dev/null @@ -1,68 +0,0 @@ -package boltdb - -import ( - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/common/jobs" - "github.com/spiral/roadrunner/v2/common/kv" - "github.com/spiral/roadrunner/v2/pkg/events" - priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue" - "github.com/spiral/roadrunner/v2/plugins/boltdb/boltjobs" - "github.com/spiral/roadrunner/v2/plugins/boltdb/boltkv" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - PluginName string = "boltdb" -) - -// Plugin BoltDB K/V storage. -type Plugin struct { - cfg config.Configurer - // logger - log logger.Logger -} - -func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error { - p.log = log - p.cfg = cfg - return nil -} - -// Serve is noop here -func (p *Plugin) Serve() chan error { - return make(chan error, 1) -} - -func (p *Plugin) Stop() error { - return nil -} - -// Name returns plugin name -func (p *Plugin) Name() string { - return PluginName -} - -// Available interface implementation -func (p *Plugin) Available() {} - -func (p *Plugin) KVConstruct(key string) (kv.Storage, error) { - const op = errors.Op("boltdb_plugin_provide") - st, err := boltkv.NewBoltDBDriver(p.log, key, p.cfg) - if err != nil { - return nil, errors.E(op, err) - } - - return st, nil -} - -// JOBS bbolt implementation - -func (p *Plugin) JobsConstruct(configKey string, e events.Handler, queue priorityqueue.Queue) (jobs.Consumer, error) { - return boltjobs.NewBoltDBJobs(configKey, p.log, p.cfg, e, queue) -} - -func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, queue priorityqueue.Queue) (jobs.Consumer, error) { - return boltjobs.FromPipeline(pipe, p.log, p.cfg, e, queue) -} diff --git a/plugins/broadcast/config.go b/plugins/broadcast/config.go deleted file mode 100644 index 9531025b..00000000 --- a/plugins/broadcast/config.go +++ /dev/null @@ -1,27 +0,0 @@ -package broadcast - -/* - -# Global redis config (priority - 2) -default: - # redis configuration here - -websockets: # <----- one of possible subscribers - path: /ws - broker: default # <------ broadcast broker to use --------------- | - | match -broadcast: # <-------- broadcast entry point plugin | - default: # <----------------------------------------------------- | - driver: redis - # local redis config (priority - 1) - test: - driver: memory - - -priority local -> global -*/ - -// Config ... -type Config struct { - Data map[string]interface{} `mapstructure:"broadcast"` -} diff --git a/plugins/broadcast/doc/broadcast_arch.drawio b/plugins/broadcast/doc/broadcast_arch.drawio deleted file mode 100644 index fd5ff1f9..00000000 --- a/plugins/broadcast/doc/broadcast_arch.drawio +++ /dev/null @@ -1 +0,0 @@ -7V1bc6M4Fv41rk1vVVIg7o+Jk8l01fR2Np7e7n7a4iLbbDB4AMdJ//qVQGCQZBsHEMSTviRGIAznfj4dSRNlunq5j+318kvkwWACJO9lotxOALBkE/3EDa95AzAVJW9ZxL6Xt8m7hpn/C5JGibRufA8mtQvTKApSf11vdKMwhG5aa7PjONrWL5tHQf1b1/YCMg0z1w7Y1u++ly7Ja0iStDvxO/QXy5Q+s7KLq0lDsrS9aFtpUu4myjSOojT/tHqZwgBTryBM3u+3PWfLJ4thmDbpMFV1/fPNfXhp/FAuZ+Hz8kd0f6mr+W2e7WBDXpk8bfpa0CCONqEH8V2kiXKzXfopnK1tF5/dIq6jtmW6CtCRjD7O/SCYRkEUZ30Vz4bm3EXtSRpHT7ByRndN6MzRGfY9yKs9wziFL5Um8l73MFrBNH5Fl5CzikloTMRM1cjxtsazvG1ZZZdFGm0iJ4vy3jtKog+EmCcQVtP7Jexcw3+5hM3+4B5RmFba8z/dEFyW5DrFDYOluGpyKK7oWl8U186b4qBO8ctSoAckec/WY2iSU0KuSNrQFLcY+kIPOSxyGMXpMlpEoR3c7Vpv6hzYXfNHFK0J3f8H0/SVeF97k0Z1rsAXP/2Bu19p5OgnuRn+fPtSPXgtDkL0upVO+PBncT98sOuWHRX99vItiTaxCw+QpggP7HgB00PXEYZhwh0UgxgGduo/1yMBHkdJ14fIR89cio8BpCtZBaZmkJ81WdIkyvPkz03uQUlJ+VBvF5xCIoeSHFAVHbmh6FQFpyJHe0SHNh6mC11u6OGYGo4QRAhbEV91LGzXcWy/Vi5YY7FJ9suibmo16dNVKlakr5fbXW/Kh69XJKnV9ZqlUUqSU6RTlSkcxs653Tx+vb6dXs/+7NbLtRDU5t5M1ygSciI2WeU4M62vEFkd0iLJFXO082unGSRwqkWazwGf0Z7u6JreyiKBpu6va4PUSgYAo2OPd7efZ6jp4dvN7NtNt4o2h/oe+huWI3WkaCoVqJcRYlXRgEhFG9T1d6FoY9Iz5V3qmcLo2Ze7L18ff56TonE9mlBFM967ohlj0jT9XWoai/tdf/n3w0T5Df87I23T9aG1zWQojV49wKMA6Muy7vaz7Qe2g4iMXn7jJBsHffBiJANxwrAA0SGt09kO/EWIPruIahCR9AZTy3ft4JqcWPmelysxTPxf2RflDCT5GLqvdjPRbvG9kN4muQrLDKvCKIT9IOGaynKJj4T3xSVZGdIojhyykpqauHHZuOK5q6lxHNmeaydY+XysLnNsvbo0c54GTU/lmTkTOIp+0NeckCbrb4ze1d4USP9QoGN6cVyBtK4V6E04nEE50WL8cR/uZRlmq+v1I9fLsiUd6tAPsKaBYSXaaC7SAkRT7dy2v000DaUmCbJ5ouhQHXoSnUGjiXqKZTTNsUZnDYE1CpEzKQlSSay4T+To62VZO1VGqR49DRywIwcToAc4u1jXZFf/a4Orf25WiGE+Siiu0Vlp/YJ+ZnIg5e2XKRZTfE6tnMPpyiXJUPA5kqSU90SfFuS3vcKRVcAeVQI10lzvUhxlFztx5ZZ5S9FQBn/ZA5Av+s2hr0dta7ptydy1OxKRcwGcp8fpl13FUO8eprdZvnjxBF+zB4v9cPEJfZptnEdoe7U3rrwdZaSSpb3GH+OsSK1qWyJ083mQFYbhgHdPWNwh7nJCLEyN6imclL+s1KvGwkZvsfBH/cPREPe49RdUAMFYX42GgnqueZDZirDv0Eki9wmmLP7TJjd1ZM+bSzx9lCVDsToCd2RZoRFv44otSeKmp0pfFUmywRD5c+inFxhqK/3CJ4baZ4G2MUV5OuCgBWLhNhYVfYQuxFoMJKcRSHOmrOEgobIukjWlJX9PuUvVdzUYH3q7+yrIftx9AUHuC9BYuiJdYYDd0sjPRs7s1KRILqMnCsLfm+SwHUxKVnvIccCg1TvjjsSai3I/qCQ3EruSJUVVTWBopg7qUI9Kz4LoOS4r6FPxUV+3Ye6fnnBCJSUoTfKjMHNTk2x2z9xfnEXEJnO8kNDhBDDseFzNC4GGXkiue6GjdXctVBc0Vd3Ok6h2XAWMShW4hOc/F7DEdpf9YMyjxC0ql3B6lXBQ6CTrHDwhipoBJx6c25sgbXM7OwiiLfT+G8V+CfLssJh/1mCZN3/JOpuQV781us82OX77NnaniJ1zFGogM6RQM1lUi5M28op/e0NywMeo5tGw4LgRMkTFD6Z1tZvIohbfW+CCouMHFtcpkYarHWJbGilioTBqe0lj21Uc9/2FFxJV1K8qbHgBuDPUetNrFg6aIXHAwgRfoIs0EhF6momk7aFfK5gk9gIiIywFWIvfIRPoGE/hIQ1iYzzzw7jupY3V0Lgq/UzdajBIWc47FWVPLUZnv6bLLBF7CDaLLAVjsavx13IxmolS4OEBc4XNfv++gLksWRxjKRQxV9jR+g/EPFcfHlghFDJXwJCObOyQeeGhjrsyXZAroyFzyxoEMpeLVXgaY+aohwDQXPmohe9AmE1xcdkB0NwSXMxQ0GdI0NyTXAgBL2zTdE02+kqorKYT+3pLqJRhV8+o+aHxgeZK05l63adU7bjKW6OLgZLzVKg7vDyFSQpa3GxfNaY40FqQGaBBa0sbGrRWBp3gO3L/bTY1Av0Un58KWpui/TdbmDU20FqQXtOgtcmBAMSC1goLgM1g/Nzx/MVhgieTs0yg2OCpuPGH1dw/+ev4LDFRKTyNRtNi0bOZVFk0ro5Fs4hQq7UTbM/yuCtM2lCVFdCTUurDw9AqYAj9N4Khafhn8LptlV2h5wOFrkX4g4HQ6ogWqhsfCK02Tf5VUbgdDUKDOgYtm2ojj9YahVZOxKCJ69t7PVPEVbu+H8RaHXjZ2FHHbk0lv8h3BkasL2kL2Xcox8JrwhFr0fHdcEnWsNDUyBFqtSk41X2a1Y6rLGi0D6HOs6NprkLdotXyKTcbHJ0WpPK0Ny5nGx9Fp3vL6D4mxx91wcd9dT+r8ZyKTl/Kgp11QaARw9OCFJuGpy85K8CJhac1FhPrHp4eCAm7lAcf3NfPvipH5M4YmtHQ0o5rSUuNnbfw+DCt4237YptkbYdFWwnVXT1snMBPlhdkhsOnSiRU7dCpEkPZ06DB46ylG4rd1bpASn1dIIM3xGQIVeFBl1Y4SYXRwQOMffTe2I8OFQ41zYe0zmeYtdNSNh/aJJwwo1hua7MKrl085WgHff9hOzB4iBI/Qx6UWydK02g1YbHxbEmx2kpdmzTwQ6RUxVaTB3nQXJ0Mido8h7N7ocLRJr03bWKBmxkMvUk5WwtLToS/Gf0nZo7DhLMYj1AokNbgrQfMs3W9DUforKd6iKNn38Nsqa3ElzMpqg2fnum65zq1QRRoulRdF2y6MR7+9f3PG/mv6eNqMdV/RL/Pvg27FwS131rjqBIICSspZ3LUhXHpyymc4F5ntvRX/PRdo5YJlzXaGneXr/Nff9ApnvVByaZ4b0v5aryXsFD5kkUFRNxvx/1HY2aabuvYKnkdqRSALszMqUPOjBUy9GNjzvLhHq1HkQ9Rkb9dRJcYVlMPxAQWHOnYG2to9KxijbeaSl8QFt8MMATO8dpMAhc+IodNBloTdwlX9miDwFaMAQxjOJlUb7VDXMaMqHRoXFvvdWKdOXvx7bc/Q7loTmEftcUljSt+Xq0DuIKZ6cf59Szbt2oC8NDKw2wahYi4mwzW6NR4Nt1GrGMd5S0A0Nc+tFwGsasJkX3EkvzbjhH9LGwnjXPw+SLUdrIoVIUP6ISLQXRpXWIfXuHyWI1y7ARi/CpzgekS7vrh1rzuaFP6yGyp/8ZMJlUI42cxoDASWee4Ry7S2MXIJpfFA1csGG/KXoxa+rJziSN0kByYf3wOskhJPoKkYWWgn0IXnV7dSpOkq7w8VdJNC7+uSNyMHUCa3T3+544ThIX+biPRfPNQHAqgmMpd4vYYcirnkjV0/bmfeRV/52v6KGwVEq0xqa7atFqji8Epfqo78GzCt/gMqtD11F2WBUNejTGvLuzFyXuGGSplTo7sAWape0CBxh10st5ovxAZZxUyKk2ULrBN8lHc+SszMHY2JJvg1TWh94m1YLOyuv4Cvtg4p/xErdjcxh4dWSKiqci2Mk/0yLms6iwSB4rlYrteIoLPx/e4rrHRBpBvXHPUjXUCY7ZOLL5ebJu+19rQ4dHJHQz5cIdyT6I9HXqyZywk3dPeW43lr5WpKXeO3PkRkXN++DRmK0Hu8wnDq9qO93MMWpFQdIXfqZh3ddAbnAWspZu0M+dVc/Pgxt5grWIPwyreuITuU4Uv58kKpmTCZJ21JZITgI25fuKVyHHZ72f8EHamTOnST6rMmWbPh/M/KTvxinzWZAc/niXvaKejS5xAS6gaAd6CCekmxvEuqUvMx03KAm4cLa+L9awvYuhl3CNpes5iEiWfJQ8NqmpbNjgrKyhCa+SKTcRqQy+VgRYphDCH6HN9w5rnQNfeJFj5thnoH4X/wMHOUxhtJyyUb6el5yOADsqXFvCKzZZKedn62YM7uI/9bPtBxjxSTOnkcE42PJQ9mPNafsFd6G0yqxCFQRY0zvMqzOoXZxxLEuhdNVnY7xzFkAlfAWtJVN440xsGIdBhHOGdfXaRLYoml18iD+Ir/g8= \ No newline at end of file diff --git a/plugins/broadcast/interface.go b/plugins/broadcast/interface.go deleted file mode 100644 index eda3572f..00000000 --- a/plugins/broadcast/interface.go +++ /dev/null @@ -1,7 +0,0 @@ -package broadcast - -import "github.com/spiral/roadrunner/v2/common/pubsub" - -type Broadcaster interface { - GetDriver(key string) (pubsub.SubReader, error) -} diff --git a/plugins/broadcast/plugin.go b/plugins/broadcast/plugin.go deleted file mode 100644 index 40263eaa..00000000 --- a/plugins/broadcast/plugin.go +++ /dev/null @@ -1,192 +0,0 @@ -package broadcast - -import ( - "fmt" - "sync" - - endure "github.com/spiral/endure/pkg/container" - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/common/pubsub" - "github.com/spiral/roadrunner/v2/plugins/config" - "github.com/spiral/roadrunner/v2/plugins/logger" -) - -const ( - PluginName string = "broadcast" - // driver is the mandatory field which should present in every storage - driver string = "driver" - - // every driver should have config section for the local configuration - conf string = "config" -) - -type Plugin struct { - sync.RWMutex - - cfg *Config - cfgPlugin config.Configurer - log logger.Logger - // publishers implement Publisher interface - // and able to receive a payload - publishers map[string]pubsub.PubSub - constructors map[string]pubsub.Constructor -} - -func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error { - const op = errors.Op("broadcast_plugin_init") - if !cfg.Has(PluginName) { - return errors.E(op, errors.Disabled) - } - p.cfg = &Config{} - // unmarshal config section - err := cfg.UnmarshalKey(PluginName, &p.cfg.Data) - if err != nil { - return errors.E(op, err) - } - - p.publishers = make(map[string]pubsub.PubSub) - p.constructors = make(map[string]pubsub.Constructor) - - p.log = log - p.cfgPlugin = cfg - return nil -} - -func (p *Plugin) Serve() chan error { - return make(chan error, 1) -} - -func (p *Plugin) Stop() error { - return nil -} - -func (p *Plugin) Collects() []interface{} { - return []interface{}{ - p.CollectPublishers, - } -} - -// CollectPublishers collect all plugins who implement pubsub.Publisher interface -func (p *Plugin) CollectPublishers(name endure.Named, constructor pubsub.Constructor) { - // key redis, value - interface - p.constructors[name.Name()] = constructor -} - -// Publish is an entry point to the websocket PUBSUB -func (p *Plugin) Publish(m *pubsub.Message) error { - p.Lock() - defer p.Unlock() - - const op = errors.Op("broadcast_plugin_publish") - - // check if any publisher registered - if len(p.publishers) > 0 { - for j := range p.publishers { - err := p.publishers[j].Publish(m) - if err != nil { - return errors.E(op, err) - } - } - return nil - } else { - p.log.Warn("no publishers registered") - } - - return nil -} - -func (p *Plugin) PublishAsync(m *pubsub.Message) { - // TODO(rustatian) channel here? - go func() { - p.Lock() - defer p.Unlock() - // check if any publisher registered - if len(p.publishers) > 0 { - for j := range p.publishers { - err := p.publishers[j].Publish(m) - if err != nil { - p.log.Error("publishAsync", "error", err) - // continue publishing to the other registered publishers - continue - } - } - } else { - p.log.Warn("no publishers registered") - } - }() -} - -func (p *Plugin) GetDriver(key string) (pubsub.SubReader, error) { - const op = errors.Op("broadcast_plugin_get_driver") - - // choose a driver - if val, ok := p.cfg.Data[key]; ok { - // check type of the v - // should be a map[string]interface{} - switch t := val.(type) { - // correct type - case map[string]interface{}: - if _, ok := t[driver]; !ok { - panic(errors.E(op, errors.Errorf("could not find mandatory driver field in the %s storage", val))) - } - default: - return nil, errors.E(op, errors.Str("wrong type detected in the configuration, please, check yaml indentation")) - } - - // config key for the particular sub-driver broadcast.memcached.config - configKey := fmt.Sprintf("%s.%s.%s", PluginName, key, conf) - - drName := val.(map[string]interface{})[driver] - - // driver name should be a string - if drStr, ok := drName.(string); ok { - if _, ok := p.constructors[drStr]; !ok { - return nil, errors.E(op, errors.Errorf("no drivers with the requested name registered, registered: %s, requested: %s", p.publishers, drStr)) - } - - switch { - // try local config first - case p.cfgPlugin.Has(configKey): - // we found a local configuration - ps, err := p.constructors[drStr].PSConstruct(configKey) - if err != nil { - return nil, errors.E(op, err) - } - - // save the initialized publisher channel - // for the in-memory, register new publishers - p.publishers[configKey] = ps - - return ps, nil - case p.cfgPlugin.Has(key): - // try global driver section after local - ps, err := p.constructors[drStr].PSConstruct(key) - if err != nil { - return nil, errors.E(op, err) - } - - // save the initialized publisher channel - // for the in-memory, register new publishers - p.publishers[configKey] = ps - - return ps, nil - default: - p.log.Error("can't find local or global configuration, this section will be skipped", "local: ", configKey, "global: ", key) - } - } - } - return nil, errors.E(op, errors.Str("could not find driver by provided key")) -} - -func (p *Plugin) RPC() interface{} { - return &rpc{ - plugin: p, - log: p.log, - } -} - -func (p *Plugin) Name() string { - return PluginName -} - -func (p *Plugin) Available() {} diff --git a/plugins/broadcast/rpc.go b/plugins/broadcast/rpc.go deleted file mode 100644 index 475076a0..00000000 --- a/plugins/broadcast/rpc.go +++ /dev/null @@ -1,87 +0,0 @@ -package broadcast - -import ( - "github.com/spiral/errors" - "github.com/spiral/roadrunner/v2/common/pubsub" - "github.com/spiral/roadrunner/v2/plugins/logger" - websocketsv1 "github.com/spiral/roadrunner/v2/proto/websockets/v1beta" -) - -// rpc collectors struct -type rpc struct { - plugin *Plugin - log logger.Logger -} - -// Publish ... msg is a proto decoded payload -// see: root/proto -func (r *rpc) Publish(in *websocketsv1.Request, out *websocketsv1.Response) error { - const op = errors.Op("broadcast_publish") - - // just return in case of nil message - if in == nil { - out.Ok = false - return nil - } - - r.log.Debug("message published", "msg", in.String()) - msgLen := len(in.GetMessages()) - - for i := 0; i < msgLen; i++ { - for j := 0; j < len(in.GetMessages()[i].GetTopics()); j++ { - if in.GetMessages()[i].GetTopics()[j] == "" { - r.log.Warn("message with empty topic, skipping") - // skip empty topics - continue - } - - tmp := &pubsub.Message{ - Topic: in.GetMessages()[i].GetTopics()[j], - Payload: in.GetMessages()[i].GetPayload(), - } - - err := r.plugin.Publish(tmp) - if err != nil { - out.Ok = false - return errors.E(op, err) - } - } - } - - out.Ok = true - return nil -} - -// PublishAsync ... -// see: root/proto -func (r *rpc) PublishAsync(in *websocketsv1.Request, out *websocketsv1.Response) error { - // just return in case of nil message - if in == nil { - out.Ok = false - return nil - } - - r.log.Debug("message published", "msg", in.GetMessages()) - - msgLen := len(in.GetMessages()) - - for i := 0; i < msgLen; i++ { - for j := 0; j < len(in.GetMessages()[i].GetTopics()); j++ { - if in.GetMessages()[i].GetTopics()[j] == "" { - r.log.Warn("message with empty topic, skipping") - // skip empty topics - continue - } - - tmp := &pubsub.Message{ - Topic: in.GetMessages()[i].GetTopics()[j], - Payload: in.GetMessages()[i].GetPayload(), - } - - r.plugin.PublishAsync(tmp) - } - } - - out.Ok = true - return nil -} diff --git a/plugins/config/config.go b/plugins/config/config.go deleted file mode 100644 index b5807921..00000000 --- a/plugins/config/config.go +++ /dev/null @@ -1,10 +0,0 @@ -package config - -import "time" - -// General is the part of the config plugin which contains general for the whole RR2 parameters -// For example - http timeouts, headers sizes etc and also graceful shutdown timeout should be the same across whole application -type General struct { - // GracefulTimeout for the temporal and http - GracefulTimeout time.Duration -} diff --git a/plugins/config/interface.go b/plugins/config/interface.go deleted file mode 100644 index b3854e09..00000000 --- a/plugins/config/interface.go +++ /dev/null @@ -1,29 +0,0 @@ -package config - -type Configurer interface { - // UnmarshalKey takes a single key and unmarshals it into a Struct. - // - // func (h *HttpService) Init(cp config.Configurer) error { - // h.config := &HttpConfig{} - // if err := configProvider.UnmarshalKey("http", h.config); err != nil { - // return err - // } - // } - UnmarshalKey(name string, out interface{}) error - - // Unmarshal unmarshal the config into a Struct. Make sure that the tags - // on the fields of the structure are properly set. - Unmarshal(out interface{}) error - - // Get used to get config section - Get(name string) interface{} - - // Overwrite used to overwrite particular values in the unmarshalled config - Overwrite(values map[string]interface{}) error - - // Has checks if config section exists. - Has(name string) bool - - // GetCommonConfig returns General section. Read-only - GetCommonConfig() *General -} diff --git a/plugins/config/plugin.go b/plugins/config/plugin.go deleted file mode 100755 index 918381c4..00000000 --- a/plugins/config/plugin.go +++ /dev/null @@ -1,174 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "os" - "strings" - - "github.com/spf13/viper" - "github.com/spiral/errors" -) - -const PluginName string = "config" - -type Viper struct { - viper *viper.Viper - Path string - Prefix string - Type string - ReadInCfg []byte - // user defined Flags in the form of