summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorValery Piashchynski <[email protected]>2021-09-16 21:46:50 +0300
committerGitHub <[email protected]>2021-09-16 21:46:50 +0300
commit3581b45f237a3f7aa29591ceb2bf6f4a4642a2f5 (patch)
treee723b19ec1ac16b7ccc7b3c2da69d4a416d63d81
parent337d292dd2d6ff0a555098b1970d8194d8df8bc2 (diff)
parent823d831b57b75f70c7c3bbbee355f2016633bb3b (diff)
[#803]: feat(plugins): move plugins to a separate repositoryv2.5.0-alpha.2
[#803]: feat(plugins): move plugins to a separate repository
-rw-r--r--.github/workflows/linux.yml13
-rw-r--r--.vscode/settings.json2
-rw-r--r--Makefile90
-rw-r--r--bst/bst.go (renamed from pkg/bst/bst.go)0
-rw-r--r--bst/bst_test.go (renamed from pkg/bst/bst_test.go)0
-rw-r--r--bst/doc.go (renamed from pkg/bst/doc.go)0
-rw-r--r--bst/interface.go (renamed from pkg/bst/interface.go)0
-rw-r--r--codecov.yml37
-rw-r--r--common/doc.go9
-rw-r--r--common/jobs/interface.go31
-rw-r--r--common/kv/interface.go42
-rw-r--r--common/pubsub/interface.go56
-rw-r--r--common/pubsub/psmessage.go15
-rw-r--r--doc/README.md (renamed from pkg/doc/README.md)0
-rw-r--r--doc/pool_workflow.drawio (renamed from pkg/doc/pool_workflow.drawio)0
-rw-r--r--doc/pool_workflow.svg (renamed from pkg/doc/pool_workflow.svg)0
-rwxr-xr-xevents/general.go (renamed from pkg/events/general.go)0
-rw-r--r--events/grpc_event.go (renamed from pkg/events/grpc_event.go)0
-rw-r--r--events/interface.go (renamed from pkg/events/interface.go)0
-rw-r--r--events/jobs_events.go (renamed from pkg/events/jobs_events.go)0
-rw-r--r--events/pool_events.go (renamed from pkg/events/pool_events.go)11
-rw-r--r--events/worker_events.go (renamed from pkg/events/worker_events.go)0
-rw-r--r--go.mod79
-rw-r--r--go.sum659
-rwxr-xr-xpayload/payload.go (renamed from pkg/payload/payload.go)0
-rw-r--r--pkg/worker_handler/constants.go8
-rw-r--r--pkg/worker_handler/errors.go26
-rw-r--r--pkg/worker_handler/errors_windows.go28
-rw-r--r--pkg/worker_handler/handler.go246
-rw-r--r--pkg/worker_handler/parse.go149
-rw-r--r--pkg/worker_handler/request.go189
-rw-r--r--pkg/worker_handler/response.go105
-rw-r--r--pkg/worker_handler/uploads.go159
-rw-r--r--plugins/amqp/amqpjobs/config.go67
-rw-r--r--plugins/amqp/amqpjobs/consumer.go524
-rw-r--r--plugins/amqp/amqpjobs/item.go250
-rw-r--r--plugins/amqp/amqpjobs/listener.go25
-rw-r--r--plugins/amqp/amqpjobs/rabbit_init.go57
-rw-r--r--plugins/amqp/amqpjobs/redial.go138
-rw-r--r--plugins/amqp/plugin.go41
-rw-r--r--plugins/beanstalk/config.go53
-rw-r--r--plugins/beanstalk/connection.go223
-rw-r--r--plugins/beanstalk/consumer.go374
-rw-r--r--plugins/beanstalk/encode_test.go75
-rw-r--r--plugins/beanstalk/item.go138
-rw-r--r--plugins/beanstalk/listen.go39
-rw-r--r--plugins/beanstalk/plugin.go47
-rw-r--r--plugins/boltdb/boltjobs/config.go39
-rw-r--r--plugins/boltdb/boltjobs/consumer.go430
-rw-r--r--plugins/boltdb/boltjobs/item.go229
-rw-r--r--plugins/boltdb/boltjobs/listener.go156
-rw-r--r--plugins/boltdb/boltkv/config.go30
-rw-r--r--plugins/boltdb/boltkv/driver.go472
-rw-r--r--plugins/boltdb/doc/boltjobs.drawio1
-rw-r--r--plugins/boltdb/doc/job_lifecycle.md9
-rw-r--r--plugins/boltdb/plugin.go68
-rw-r--r--plugins/broadcast/config.go27
-rw-r--r--plugins/broadcast/doc/broadcast_arch.drawio1
-rw-r--r--plugins/broadcast/interface.go7
-rw-r--r--plugins/broadcast/plugin.go192
-rw-r--r--plugins/broadcast/rpc.go87
-rw-r--r--plugins/config/config.go10
-rw-r--r--plugins/config/interface.go29
-rwxr-xr-xplugins/config/plugin.go174
-rw-r--r--plugins/grpc/codec/codec.go44
-rw-r--r--plugins/grpc/codec/codec_test.go79
-rw-r--r--plugins/grpc/config.go128
-rw-r--r--plugins/grpc/parser/message.proto7
-rw-r--r--plugins/grpc/parser/parse.go114
-rw-r--r--plugins/grpc/parser/parse_test.go71
-rw-r--r--plugins/grpc/parser/pong.proto10
-rw-r--r--plugins/grpc/parser/test.proto20
-rw-r--r--plugins/grpc/parser/test_import.proto12
-rw-r--r--plugins/grpc/parser/test_nested/message.proto7
-rw-r--r--plugins/grpc/parser/test_nested/pong.proto10
-rw-r--r--plugins/grpc/parser/test_nested/test_import.proto12
-rw-r--r--plugins/grpc/plugin.go195
-rw-r--r--plugins/grpc/protoc_plugins/protoc-gen-php-grpc/main.go68
-rw-r--r--plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/generate.go57
-rw-r--r--plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/keywords.go139
-rw-r--r--plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/ns.go103
-rw-r--r--plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/template.go103
-rw-r--r--plugins/grpc/proxy/proxy.go219
-rw-r--r--plugins/grpc/proxy/proxy_test.go134
-rw-r--r--plugins/grpc/server.go154
-rw-r--r--plugins/gzip/plugin.go28
-rw-r--r--plugins/headers/config.go36
-rw-r--r--plugins/headers/plugin.go127
-rw-r--r--plugins/http/attributes/attributes.go89
-rw-r--r--plugins/http/config/fcgi.go7
-rw-r--r--plugins/http/config/http.go187
-rw-r--r--plugins/http/config/http2.go28
-rw-r--r--plugins/http/config/ip.go26
-rw-r--r--plugins/http/config/ssl.go84
-rw-r--r--plugins/http/config/ssl_config_test.go116
-rw-r--r--plugins/http/config/uploads_config.go46
-rw-r--r--plugins/http/metrics.go92
-rw-r--r--plugins/http/plugin.go412
-rw-r--r--plugins/http/serve.go254
-rw-r--r--plugins/informer/interface.go34
-rw-r--r--plugins/informer/plugin.go89
-rw-r--r--plugins/informer/rpc.go59
-rw-r--r--plugins/jobs/config.go62
-rw-r--r--plugins/jobs/doc/jobs_arch.drawio1
-rw-r--r--plugins/jobs/doc/response_protocol.md54
-rw-r--r--plugins/jobs/job/job.go51
-rw-r--r--plugins/jobs/job/job_test.go18
-rw-r--r--plugins/jobs/metrics.go92
-rw-r--r--plugins/jobs/pipeline/pipeline.go98
-rw-r--r--plugins/jobs/pipeline/pipeline_test.go21
-rw-r--r--plugins/jobs/plugin.go719
-rw-r--r--plugins/jobs/protocol.go78
-rw-r--r--plugins/jobs/rpc.go160
-rw-r--r--plugins/kv/config.go6
-rw-r--r--plugins/kv/doc/kv.drawio1
-rw-r--r--plugins/kv/plugin.go159
-rw-r--r--plugins/kv/rpc.go180
-rw-r--r--plugins/logger/config.go212
-rw-r--r--plugins/logger/encoder.go66
-rw-r--r--plugins/logger/enums.go12
-rw-r--r--plugins/logger/interface.go14
-rw-r--r--plugins/logger/plugin.go86
-rw-r--r--plugins/logger/std_log_adapter.go26
-rw-r--r--plugins/logger/zap_adapter.go79
-rw-r--r--plugins/memcached/memcachedkv/config.go12
-rw-r--r--plugins/memcached/memcachedkv/driver.go254
-rw-r--r--plugins/memcached/plugin.go49
-rw-r--r--plugins/memory/memoryjobs/consumer.go296
-rw-r--r--plugins/memory/memoryjobs/item.go134
-rw-r--r--plugins/memory/memorykv/config.go14
-rw-r--r--plugins/memory/memorykv/kv.go257
-rw-r--r--plugins/memory/memorypubsub/pubsub.go92
-rw-r--r--plugins/memory/plugin.go68
-rw-r--r--plugins/metrics/config.go140
-rw-r--r--plugins/metrics/config_test.go89
-rw-r--r--plugins/metrics/doc.go1
-rw-r--r--plugins/metrics/interface.go7
-rw-r--r--plugins/metrics/plugin.go242
-rw-r--r--plugins/metrics/rpc.go294
-rw-r--r--plugins/redis/config.go34
-rw-r--r--plugins/redis/kv/config.go36
-rw-r--r--plugins/redis/kv/kv.go255
-rw-r--r--plugins/redis/plugin.go77
-rw-r--r--plugins/redis/pubsub/channel.go97
-rw-r--r--plugins/redis/pubsub/config.go34
-rw-r--r--plugins/redis/pubsub/pubsub.go187
-rw-r--r--plugins/reload/config.go62
-rw-r--r--plugins/reload/plugin.go167
-rw-r--r--plugins/reload/watcher.go372
-rw-r--r--plugins/resetter/interface.go7
-rw-r--r--plugins/resetter/plugin.go55
-rw-r--r--plugins/resetter/rpc.go29
-rw-r--r--plugins/rpc/config.go46
-rw-r--r--plugins/rpc/doc/plugin_arch.drawio1
-rw-r--r--plugins/rpc/interface.go7
-rw-r--r--plugins/rpc/plugin.go155
-rw-r--r--plugins/server/command.go33
-rw-r--r--plugins/server/command_test.go43
-rw-r--r--plugins/server/config.go60
-rw-r--r--plugins/server/interface.go23
-rw-r--r--plugins/server/plugin.go268
-rw-r--r--plugins/service/config.go34
-rw-r--r--plugins/service/plugin.go110
-rw-r--r--plugins/service/process.go147
-rw-r--r--plugins/sqs/config.go114
-rw-r--r--plugins/sqs/consumer.go421
-rw-r--r--plugins/sqs/item.go250
-rw-r--r--plugins/sqs/listener.go87
-rw-r--r--plugins/sqs/plugin.go39
-rw-r--r--plugins/static/config.go55
-rw-r--r--plugins/static/etag.go72
-rw-r--r--plugins/static/plugin.go188
-rw-r--r--plugins/status/config.go18
-rw-r--r--plugins/status/interface.go18
-rw-r--r--plugins/status/plugin.go214
-rw-r--r--plugins/status/rpc.go43
-rw-r--r--plugins/websockets/commands/enums.go9
-rw-r--r--plugins/websockets/config.go83
-rw-r--r--plugins/websockets/connection/connection.go67
-rw-r--r--plugins/websockets/doc/broadcast.drawio1
-rw-r--r--plugins/websockets/doc/doc.go27
-rw-r--r--plugins/websockets/executor/executor.go214
-rw-r--r--plugins/websockets/origin.go28
-rw-r--r--plugins/websockets/origin_test.go73
-rw-r--r--plugins/websockets/plugin.go370
-rw-r--r--plugins/websockets/pool/workers_pool.go135
-rw-r--r--plugins/websockets/validator/access_validator.go81
-rw-r--r--plugins/websockets/wildcard.go12
-rw-r--r--pool/config.go (renamed from pkg/pool/config.go)0
-rw-r--r--pool/interface.go (renamed from pkg/pool/interface.go)4
-rwxr-xr-xpool/static_pool.go (renamed from pkg/pool/static_pool.go)14
-rwxr-xr-xpool/static_pool_test.go (renamed from pkg/pool/static_pool_test.go)52
-rwxr-xr-xpool/supervisor_pool.go (renamed from pkg/pool/supervisor_pool.go)8
-rw-r--r--pool/supervisor_test.go (renamed from pkg/pool/supervisor_test.go)26
-rw-r--r--priority_queue/binary_heap.go (renamed from pkg/priority_queue/binary_heap.go)0
-rw-r--r--priority_queue/binary_heap_test.go (renamed from pkg/priority_queue/binary_heap_test.go)0
-rw-r--r--priority_queue/interface.go (renamed from pkg/priority_queue/interface.go)0
-rw-r--r--proto/jobs/v1beta/jobs.pb.go840
-rw-r--r--proto/jobs/v1beta/jobs.proto60
-rw-r--r--proto/kv/v1beta/kv.pb.go301
-rw-r--r--proto/kv/v1beta/kv.proto22
-rw-r--r--proto/websockets/v1beta/websockets.pb.go291
-rw-r--r--proto/websockets/v1beta/websockets.proto20
-rw-r--r--state/job/state.go (renamed from pkg/state/job/state.go)0
-rw-r--r--state/process/state.go (renamed from pkg/state/process/state.go)2
-rw-r--r--tests/env/Dockerfile-beanstalkd.yaml14
-rw-r--r--tests/env/Dockerfile-elastic-mq.yaml9
-rw-r--r--tests/env/custom.conf8
-rw-r--r--tests/env/docker-compose.yaml44
-rw-r--r--tests/mocks/mock_log.go146
-rw-r--r--tests/plugins/broadcast/broadcast_plugin_test.go513
-rw-r--r--tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml32
-rw-r--r--tests/plugins/broadcast/configs/.rr-broadcast-global.yaml50
-rw-r--r--tests/plugins/broadcast/configs/.rr-broadcast-init.yaml36
-rw-r--r--tests/plugins/broadcast/configs/.rr-broadcast-no-config.yaml29
-rw-r--r--tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml48
-rw-r--r--tests/plugins/broadcast/plugins/plugin1.go73
-rw-r--r--tests/plugins/broadcast/plugins/plugin2.go74
-rw-r--r--tests/plugins/broadcast/plugins/plugin3.go74
-rw-r--r--tests/plugins/broadcast/plugins/plugin4.go74
-rw-r--r--tests/plugins/broadcast/plugins/plugin5.go74
-rw-r--r--tests/plugins/broadcast/plugins/plugin6.go74
-rwxr-xr-xtests/plugins/config/config_test.go272
-rwxr-xr-xtests/plugins/config/configs/.rr-env.yaml24
-rwxr-xr-xtests/plugins/config/configs/.rr.yaml24
-rwxr-xr-xtests/plugins/config/plugin1.go96
-rwxr-xr-xtests/plugins/config/plugin2.go50
-rwxr-xr-xtests/plugins/config/plugin3.go34
-rw-r--r--tests/plugins/grpc/configs/.rr-grpc-init.yaml58
-rw-r--r--tests/plugins/grpc/configs/external.proto19
-rw-r--r--tests/plugins/grpc/configs/test.pb.go291
-rw-r--r--tests/plugins/grpc/configs/test.proto33
-rw-r--r--tests/plugins/grpc/grpc_plugin_test.go89
-rw-r--r--tests/plugins/grpc/php_server/.rr.yaml22
-rw-r--r--tests/plugins/grpc/php_server/composer.json23
-rw-r--r--tests/plugins/grpc/php_server/server.crt15
-rw-r--r--tests/plugins/grpc/php_server/server.key9
-rw-r--r--tests/plugins/grpc/php_server/service.proto11
-rw-r--r--tests/plugins/grpc/php_server/src/EchoService.php17
-rw-r--r--tests/plugins/grpc/php_server/src/GPBMetadata/Service.php27
-rw-r--r--tests/plugins/grpc/php_server/src/Service/EchoInterface.php22
-rw-r--r--tests/plugins/grpc/php_server/src/Service/Message.php58
-rw-r--r--tests/plugins/grpc/php_server/worker-grpc.php26
-rw-r--r--tests/plugins/grpc/plugin_test.go178
-rw-r--r--tests/plugins/grpc/testdata/import/Import/ServiceInterface.php32
-rw-r--r--tests/plugins/grpc/testdata/import/service.proto17
-rw-r--r--tests/plugins/grpc/testdata/import/sub/message.proto7
-rw-r--r--tests/plugins/grpc/testdata/import_custom/Test/CustomImport/ServiceInterface.php32
-rw-r--r--tests/plugins/grpc/testdata/import_custom/service.proto19
-rw-r--r--tests/plugins/grpc/testdata/import_custom/sub/message.proto14
-rw-r--r--tests/plugins/grpc/testdata/php_namespace/Test/CustomNamespace/ServiceInterface.php22
-rw-r--r--tests/plugins/grpc/testdata/php_namespace/service.proto15
-rw-r--r--tests/plugins/grpc/testdata/simple/TestSimple/SimpleServiceInterface.php22
-rw-r--r--tests/plugins/grpc/testdata/simple/simple.proto13
-rw-r--r--tests/plugins/grpc/testdata/use_empty/Test/ServiceInterface.php23
-rw-r--r--tests/plugins/grpc/testdata/use_empty/service.proto10
-rw-r--r--tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml20
-rw-r--r--tests/plugins/gzip/configs/.rr-http-withGzip.yaml22
-rw-r--r--tests/plugins/gzip/plugin_test.go180
-rw-r--r--tests/plugins/headers/configs/.rr-cors-headers.yaml34
-rw-r--r--tests/plugins/headers/configs/.rr-headers-init.yaml38
-rw-r--r--tests/plugins/headers/configs/.rr-req-headers.yaml31
-rw-r--r--tests/plugins/headers/configs/.rr-res-headers.yaml31
-rw-r--r--tests/plugins/headers/headers_plugin_test.go368
-rw-r--r--tests/plugins/http/attributes_test.go78
-rw-r--r--tests/plugins/http/configs/.rr-big-req-size.yaml21
-rw-r--r--tests/plugins/http/configs/.rr-broken-pipes.yaml29
-rw-r--r--tests/plugins/http/configs/.rr-env.yaml27
-rw-r--r--tests/plugins/http/configs/.rr-fcgi-reqUri.yaml32
-rw-r--r--tests/plugins/http/configs/.rr-fcgi.yaml30
-rw-r--r--tests/plugins/http/configs/.rr-h2c.yaml28
-rw-r--r--tests/plugins/http/configs/.rr-http-ipv6-2.yaml24
-rw-r--r--tests/plugins/http/configs/.rr-http-ipv6.yaml24
-rw-r--r--tests/plugins/http/configs/.rr-http-static-disabled.yaml27
-rw-r--r--tests/plugins/http/configs/.rr-http-static-etags.yaml34
-rw-r--r--tests/plugins/http/configs/.rr-http-static-files-disable.yaml24
-rw-r--r--tests/plugins/http/configs/.rr-http-static-files.yaml30
-rw-r--r--tests/plugins/http/configs/.rr-http-static-security.yaml34
-rw-r--r--tests/plugins/http/configs/.rr-http-static.yaml30
-rw-r--r--tests/plugins/http/configs/.rr-http-supervised-pool.yaml28
-rw-r--r--tests/plugins/http/configs/.rr-http.yaml25
-rw-r--r--tests/plugins/http/configs/.rr-init.yaml36
-rw-r--r--tests/plugins/http/configs/.rr-issue659.yaml23
-rw-r--r--tests/plugins/http/configs/.rr-no-http.yaml16
-rw-r--r--tests/plugins/http/configs/.rr-resetter.yaml28
-rw-r--r--tests/plugins/http/configs/.rr-ssl-push.yaml30
-rw-r--r--tests/plugins/http/configs/.rr-ssl-redirect.yaml30
-rw-r--r--tests/plugins/http/configs/.rr-ssl.yaml32
-rw-r--r--tests/plugins/http/fixtures/server.crt15
-rw-r--r--tests/plugins/http/fixtures/server.key9
-rw-r--r--tests/plugins/http/handler_test.go1862
-rw-r--r--tests/plugins/http/http_plugin_test.go2516
-rw-r--r--tests/plugins/http/parse_test.go54
-rw-r--r--tests/plugins/http/plugin1.go27
-rw-r--r--tests/plugins/http/plugin_middleware.go69
-rw-r--r--tests/plugins/http/response_test.go165
-rw-r--r--tests/plugins/http/uploads_config_test.go26
-rw-r--r--tests/plugins/http/uploads_test.go433
-rw-r--r--tests/plugins/informer/.rr-informer.yaml15
-rw-r--r--tests/plugins/informer/informer_test.go136
-rw-r--r--tests/plugins/informer/test_plugin.go71
-rw-r--r--tests/plugins/jobs/amqp/.rr-amqp-declare.yaml24
-rw-r--r--tests/plugins/jobs/amqp/.rr-amqp-init.yaml55
-rw-r--r--tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml24
-rw-r--r--tests/plugins/jobs/amqp/.rr-no-global.yaml47
-rw-r--r--tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml27
-rw-r--r--tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml45
-rw-r--r--tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml25
-rw-r--r--tests/plugins/jobs/beanstalk/.rr-no-global.yaml34
-rw-r--r--tests/plugins/jobs/boltdb/.rr-boltdb-declare.yaml24
-rw-r--r--tests/plugins/jobs/boltdb/.rr-boltdb-init.yaml43
-rw-r--r--tests/plugins/jobs/boltdb/.rr-boltdb-jobs-err.yaml24
-rw-r--r--tests/plugins/jobs/boltdb/.rr-no-global.yaml41
-rw-r--r--tests/plugins/jobs/configs/.rr-jobs-init.yaml112
-rw-r--r--tests/plugins/jobs/configs/.rr-jobs-metrics.yaml27
-rw-r--r--tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml55
-rw-r--r--tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml44
-rw-r--r--tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml60
-rw-r--r--tests/plugins/jobs/helpers.go234
-rw-r--r--tests/plugins/jobs/jobs_amqp_test.go499
-rw-r--r--tests/plugins/jobs/jobs_beanstalk_test.go515
-rw-r--r--tests/plugins/jobs/jobs_boltdb_test.go506
-rw-r--r--tests/plugins/jobs/jobs_general_test.go249
-rw-r--r--tests/plugins/jobs/jobs_memory_test.go570
-rw-r--r--tests/plugins/jobs/jobs_sqs_test.go503
-rw-r--r--tests/plugins/jobs/jobs_with_toxics_test.go400
-rw-r--r--tests/plugins/jobs/memory/.rr-memory-declare.yaml21
-rw-r--r--tests/plugins/jobs/memory/.rr-memory-init.yaml37
-rw-r--r--tests/plugins/jobs/memory/.rr-memory-jobs-err.yaml21
-rw-r--r--tests/plugins/jobs/memory/.rr-memory-pause-resume.yaml44
-rw-r--r--tests/plugins/jobs/sqs/.rr-no-global.yaml39
-rw-r--r--tests/plugins/jobs/sqs/.rr-sqs-declare.yaml29
-rw-r--r--tests/plugins/jobs/sqs/.rr-sqs-init.yaml54
-rw-r--r--tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml28
-rw-r--r--tests/plugins/kv/configs/.rr-boltdb.yaml16
-rw-r--r--tests/plugins/kv/configs/.rr-in-memory.yaml12
-rw-r--r--tests/plugins/kv/configs/.rr-kv-bolt-no-interval.yaml19
-rw-r--r--tests/plugins/kv/configs/.rr-kv-bolt-perms.yaml18
-rw-r--r--tests/plugins/kv/configs/.rr-kv-init.yaml35
-rw-r--r--tests/plugins/kv/configs/.rr-memcached.yaml13
-rw-r--r--tests/plugins/kv/configs/.rr-redis-global.yaml14
-rw-r--r--tests/plugins/kv/configs/.rr-redis-no-config.yaml10
-rw-r--r--tests/plugins/kv/configs/.rr-redis.yaml13
-rw-r--r--tests/plugins/kv/storage_plugin_test.go1517
-rw-r--r--tests/plugins/logger/configs/.rr-file-logger.yaml23
-rw-r--r--tests/plugins/logger/configs/.rr-no-logger.yaml0
-rw-r--r--tests/plugins/logger/configs/.rr-no-logger2.yaml16
-rw-r--r--tests/plugins/logger/configs/.rr-raw-mode.yaml15
-rw-r--r--tests/plugins/logger/configs/.rr.yaml3
-rw-r--r--tests/plugins/logger/logger_test.go430
-rw-r--r--tests/plugins/logger/plugin.go71
-rw-r--r--tests/plugins/metrics/configs/.rr-http-metrics.yaml20
-rw-r--r--tests/plugins/metrics/configs/.rr-issue-571.yaml13
-rw-r--r--tests/plugins/metrics/configs/.rr-test.yaml18
-rw-r--r--tests/plugins/metrics/metrics_test.go1091
-rw-r--r--tests/plugins/metrics/plugin1.go46
-rw-r--r--tests/plugins/reload/config_test.go63
-rw-r--r--tests/plugins/reload/configs/.rr-reload-2.yaml37
-rw-r--r--tests/plugins/reload/configs/.rr-reload-3.yaml39
-rw-r--r--tests/plugins/reload/configs/.rr-reload-4.yaml40
-rw-r--r--tests/plugins/reload/configs/.rr-reload.yaml37
-rw-r--r--tests/plugins/reload/reload_plugin_test.go852
-rw-r--r--tests/plugins/resetter/.rr-resetter.yaml15
-rw-r--r--tests/plugins/resetter/resetter_test.go125
-rw-r--r--tests/plugins/resetter/test_plugin.go66
-rwxr-xr-xtests/plugins/rpc/config_test.go163
-rw-r--r--tests/plugins/rpc/configs/.rr-rpc-disabled.yaml8
-rw-r--r--tests/plugins/rpc/configs/.rr.yaml11
-rw-r--r--tests/plugins/rpc/plugin1.go42
-rw-r--r--tests/plugins/rpc/plugin2.go53
-rw-r--r--tests/plugins/rpc/rpc_test.go188
-rw-r--r--tests/plugins/server/configs/.rr-no-app-section.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-sockets.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-tcp.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-wrong-command.yaml12
-rw-r--r--tests/plugins/server/configs/.rr-wrong-relay.yaml12
-rw-r--r--tests/plugins/server/configs/.rr.yaml12
-rw-r--r--tests/plugins/server/plugin_pipes.go128
-rw-r--r--tests/plugins/server/plugin_sockets.go109
-rw-r--r--tests/plugins/server/plugin_tcp.go109
-rw-r--r--tests/plugins/server/server_plugin_test.go352
-rw-r--r--tests/plugins/server/socket.php25
-rw-r--r--tests/plugins/server/tcp.php20
-rw-r--r--tests/plugins/service/configs/.rr-service-error.yaml16
-rw-r--r--tests/plugins/service/configs/.rr-service-init.yaml22
-rw-r--r--tests/plugins/service/configs/.rr-service-restarts.yaml16
-rw-r--r--tests/plugins/service/service_plugin_test.go254
-rw-r--r--tests/plugins/service/test_files/loop.php6
-rwxr-xr-xtests/plugins/service/test_files/test_binarybin1363968 -> 0 bytes
-rwxr-xr-xtests/plugins/status/configs/.rr-ready-init.yaml28
-rwxr-xr-xtests/plugins/status/configs/.rr-status-init.yaml28
-rw-r--r--tests/plugins/status/plugin_test.go388
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-allow.yaml52
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-allow2.yaml54
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-broker-no-section.yaml48
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-deny.yaml48
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-deny2.yaml50
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-init.yaml48
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-redis.yaml51
-rw-r--r--tests/plugins/websockets/configs/.rr-websockets-stop.yaml48
-rw-r--r--tests/plugins/websockets/websocket_plugin_test.go918
-rw-r--r--transport/interface.go (renamed from pkg/transport/interface.go)4
-rwxr-xr-xtransport/pipe/pipe_factory.go (renamed from pkg/transport/pipe/pipe_factory.go)4
-rw-r--r--transport/pipe/pipe_factory_spawn_test.go (renamed from pkg/transport/pipe/pipe_factory_spawn_test.go)50
-rwxr-xr-xtransport/pipe/pipe_factory_test.go (renamed from pkg/transport/pipe/pipe_factory_test.go)50
-rwxr-xr-xtransport/socket/socket_factory.go (renamed from pkg/transport/socket/socket_factory.go)4
-rw-r--r--transport/socket/socket_factory_spawn_test.go (renamed from pkg/transport/socket/socket_factory_spawn_test.go)40
-rwxr-xr-xtransport/socket/socket_factory_test.go (renamed from pkg/transport/socket/socket_factory_test.go)42
-rw-r--r--worker/interface.go (renamed from pkg/worker/interface.go)2
-rwxr-xr-xworker/state.go (renamed from pkg/worker/state.go)0
-rwxr-xr-xworker/state_test.go (renamed from pkg/worker/state_test.go)0
-rwxr-xr-xworker/sync_worker.go (renamed from pkg/worker/sync_worker.go)2
-rwxr-xr-xworker/sync_worker_test.go (renamed from pkg/worker/sync_worker_test.go)2
-rwxr-xr-xworker/worker.go (renamed from pkg/worker/worker.go)2
-rwxr-xr-xworker/worker_test.go (renamed from pkg/worker/worker_test.go)0
-rw-r--r--worker_watcher/container/channel/vec.go (renamed from pkg/worker_watcher/container/channel/vec.go)2
-rw-r--r--worker_watcher/container/queue/queue.go (renamed from pkg/worker_watcher/container/queue/queue.go)2
-rwxr-xr-xworker_watcher/worker_watcher.go (renamed from pkg/worker_watcher/worker_watcher.go)10
418 files changed, 226 insertions, 42438 deletions
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
index b20ec4dd..1e48bfd6 100644
--- a/.github/workflows/linux.yml
+++ b/.github/workflows/linux.yml
@@ -7,12 +7,12 @@ on:
- beta
- stable
tags-ignore:
- - '**'
+ - "**"
paths-ignore:
- - '**.md'
+ - "**.md"
pull_request:
paths-ignore:
- - '**.md'
+ - "**.md"
jobs:
golang:
@@ -22,9 +22,9 @@ jobs:
strategy:
fail-fast: true
matrix:
- php: [ "7.4", "8.0" ]
- go: [ "1.17" ]
- os: [ 'ubuntu-latest' ]
+ php: ["7.4", "8.0", "8.1"]
+ go: ["1.17.1"]
+ os: ["ubuntu-latest"]
steps:
- name: Set up Go ${{ matrix.go }}
uses: actions/setup-go@v2 # action page: <https://github.com/actions/setup-go>
@@ -76,4 +76,3 @@ jobs:
with:
file: ./coverage-ci/summary.txt
fail_ci_if_error: false
-
diff --git a/.vscode/settings.json b/.vscode/settings.json
index e7762292..8d55456f 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -6,6 +6,7 @@
"anypb",
"boltdb",
"codecov",
+ "Errored",
"Conv",
"golangci",
"gomemcache",
@@ -13,6 +14,7 @@
"hget",
"hset",
"INMEMORY",
+ "keyvals",
"Itestdata",
"memcachedkv",
"memorykv",
diff --git a/Makefile b/Makefile
index 109c963e..9c800ea4 100644
--- a/Makefile
+++ b/Makefile
@@ -5,85 +5,23 @@
SHELL = /bin/sh
test_coverage:
- docker-compose -f tests/env/docker-compose.yaml up -d --remove-orphans
rm -rf coverage-ci
mkdir ./coverage-ci
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipe.out -covermode=atomic ./pkg/transport/pipe
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/socket.out -covermode=atomic ./pkg/transport/socket
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pool.out -covermode=atomic ./pkg/pool
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker.out -covermode=atomic ./pkg/worker
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/bst.out -covermode=atomic ./pkg/bst
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pq.out -covermode=atomic ./pkg/priority_queue
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker_stack.out -covermode=atomic ./pkg/worker_watcher
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/ws_origin.out -covermode=atomic ./plugins/websockets
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http_config.out -covermode=atomic ./plugins/http/config
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/grpc_codec.out -covermode=atomic ./plugins/grpc/codec
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/grpc_parser.out -covermode=atomic ./plugins/grpc/parser
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/grpc_proxy.out -covermode=atomic ./plugins/grpc/proxy
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server_cmd.out -covermode=atomic ./plugins/server
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/struct_jobs.out -covermode=atomic ./plugins/jobs/job
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipeline_jobs.out -covermode=atomic ./plugins/jobs/pipeline
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/jobs_core.out -covermode=atomic ./tests/plugins/jobs
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/kv_plugin.out -covermode=atomic ./tests/plugins/kv
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/grpc_plugin.out -covermode=atomic ./tests/plugins/grpc
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/broadcast_plugin.out -covermode=atomic ./tests/plugins/broadcast
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/websockets.out -covermode=atomic ./tests/plugins/websockets
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/http.out -covermode=atomic ./tests/plugins/http
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/informer.out -covermode=atomic ./tests/plugins/informer
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/reload.out -covermode=atomic ./tests/plugins/reload
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/server.out -covermode=atomic ./tests/plugins/server
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/service.out -covermode=atomic ./tests/plugins/service
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/status.out -covermode=atomic ./tests/plugins/status
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/config.out -covermode=atomic ./tests/plugins/config
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/gzip.out -covermode=atomic ./tests/plugins/gzip
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/headers.out -covermode=atomic ./tests/plugins/headers
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/logger.out -covermode=atomic ./tests/plugins/logger
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/metrics.out -covermode=atomic ./tests/plugins/metrics
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/resetter.out -covermode=atomic ./tests/plugins/resetter
- go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/rpc.out -covermode=atomic ./tests/plugins/rpc
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pipe.out -covermode=atomic ./transport/pipe
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/socket.out -covermode=atomic ./transport/socket
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pool.out -covermode=atomic ./pool
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker.out -covermode=atomic ./worker
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/bst.out -covermode=atomic ./bst
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/pq.out -covermode=atomic ./priority_queue
+ go test -v -race -cover -tags=debug -coverpkg=./... -coverprofile=./coverage-ci/worker_stack.out -covermode=atomic ./worker_watcher
echo 'mode: atomic' > ./coverage-ci/summary.txt
tail -q -n +2 ./coverage-ci/*.out >> ./coverage-ci/summary.txt
- docker-compose -f tests/env/docker-compose.yaml down
test: ## Run application tests
- docker-compose -f tests/env/docker-compose.yaml up -d
- go test -v -race -tags=debug ./pkg/transport/pipe
- go test -v -race -tags=debug ./pkg/transport/socket
- go test -v -race -tags=debug ./pkg/pool
- go test -v -race -tags=debug ./pkg/worker
- go test -v -race -tags=debug ./pkg/worker_watcher
- go test -v -race -tags=debug ./pkg/bst
- go test -v -race -tags=debug ./pkg/priority_queue
- go test -v -race -tags=debug ./plugins/jobs/pipeline
- go test -v -race -tags=debug ./plugins/http/config
- go test -v -race -tags=debug ./plugins/server
- go test -v -race -tags=debug ./plugins/jobs/job
- go test -v -race -tags=debug ./plugins/websockets
- go test -v -race -tags=debug ./plugins/grpc
- go test -v -race -tags=debug ./plugins/grpc/codec
- go test -v -race -tags=debug ./plugins/grpc/parser
- go test -v -race -tags=debug ./plugins/grpc/proxy
- go test -v -race -tags=debug ./tests/plugins/jobs
- go test -v -race -tags=debug ./tests/plugins/kv
- go test -v -race -tags=debug ./tests/plugins/broadcast
- go test -v -race -tags=debug ./tests/plugins/websockets
- go test -v -race -tags=debug ./tests/plugins/http
- go test -v -race -tags=debug ./tests/plugins/grpc
- go test -v -race -tags=debug ./tests/plugins/informer
- go test -v -race -tags=debug ./tests/plugins/reload
- go test -v -race -tags=debug ./tests/plugins/server
- go test -v -race -tags=debug ./tests/plugins/service
- go test -v -race -tags=debug ./tests/plugins/status
- go test -v -race -tags=debug ./tests/plugins/config
- go test -v -race -tags=debug ./tests/plugins/gzip
- go test -v -race -tags=debug ./tests/plugins/headers
- go test -v -race -tags=debug ./tests/plugins/logger
- go test -v -race -tags=debug ./tests/plugins/metrics
- go test -v -race -tags=debug ./tests/plugins/resetter
- go test -v -race -tags=debug ./tests/plugins/rpc
- docker-compose -f tests/env/docker-compose.yaml down
-
-generate-proto:
- protoc --proto_path=./proto/jobs/v1beta --go_out=./proto/jobs/v1beta jobs.proto
- protoc --proto_path=./proto/kv/v1beta --go_out=./proto/kv/v1beta kv.proto
- protoc --proto_path=./proto/websockets/v1beta --go_out=./proto/websockets/v1beta websockets.proto \ No newline at end of file
+ go test -v -race -tags=debug ./transport/pipe
+ go test -v -race -tags=debug ./transport/socket
+ go test -v -race -tags=debug ./pool
+ go test -v -race -tags=debug ./worker
+ go test -v -race -tags=debug ./worker_watcher
+ go test -v -race -tags=debug ./bst
+ go test -v -race -tags=debug ./priority_queue
diff --git a/pkg/bst/bst.go b/bst/bst.go
index dab9346c..dab9346c 100644
--- a/pkg/bst/bst.go
+++ b/bst/bst.go
diff --git a/pkg/bst/bst_test.go b/bst/bst_test.go
index 2271508c..2271508c 100644
--- a/pkg/bst/bst_test.go
+++ b/bst/bst_test.go
diff --git a/pkg/bst/doc.go b/bst/doc.go
index abb7e6e9..abb7e6e9 100644
--- a/pkg/bst/doc.go
+++ b/bst/doc.go
diff --git a/pkg/bst/interface.go b/bst/interface.go
index 95b03e11..95b03e11 100644
--- a/pkg/bst/interface.go
+++ b/bst/interface.go
diff --git a/codecov.yml b/codecov.yml
index e61cda06..088a4d1b 100644
--- a/codecov.yml
+++ b/codecov.yml
@@ -19,25 +19,18 @@ ignore:
- "tests"
- "systemd"
- "utils/to_ptr.go"
- - "plugins/metrics/config_test.go"
- - "plugins/websockets/storage/storage_test.go"
- - "plugins/websockets/config.go"
- - "plugins/amqp/amqpjobs/config.go"
- - "plugins/beanstalk/config.go"
- - "plugins/redis/config.go"
- - "plugins/redis/kv/config.go"
- - "pkg/doc"
- - "pkg/bst/bst_test.go"
- - "pkg/pool/static_pool_test.go"
- - "pkg/pool/supervisor_test.go"
- - "pkg/transport/pipe/pipe_factory_spawn_test.go"
- - "pkg/transport/pipe/pipe_factory_test.go"
- - "pkg/transport/socket/socket_factory_spawn_test.go"
- - "pkg/transport/socket/socket_factory_test.go"
- - "pkg/transport/interface.go"
- - "pkg/worker/state_test.go"
- - "pkg/worker/sync_worker_test.go"
- - "pkg/worker/worker_test.go"
- - "pkg/events/pool_events.go"
- - "pkg/events/worker_events.go"
- - "pkg/events/jobs_events.go"
+ - "doc"
+ - "bst/bst_test.go"
+ - "pool/static_pool_test.go"
+ - "pool/supervisor_test.go"
+ - "transport/pipe/pipe_factory_spawn_test.go"
+ - "transport/pipe/pipe_factory_test.go"
+ - "transport/socket/socket_factory_spawn_test.go"
+ - "transport/socket/socket_factory_test.go"
+ - "transport/interface.go"
+ - "worker/state_test.go"
+ - "worker/sync_worker_test.go"
+ - "worker/worker_test.go"
+ - "events/pool_events.go"
+ - "events/worker_events.go"
+ - "events/jobs_events.go"
diff --git a/common/doc.go b/common/doc.go
deleted file mode 100644
index adc03351..00000000
--- a/common/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
-Package common used to collect common interfaces/structures which might be implemented (or imported) by a different plugins.
-For example, 'pubsub' interface might be implemented by memory, redis, websockets and many other plugins.
-
-Folders:
-- kv - contains KV interfaces and structures
-- pubsub - contains pub-sub interfaces and structures
-*/
-package common
diff --git a/common/jobs/interface.go b/common/jobs/interface.go
deleted file mode 100644
index 4b5ff70e..00000000
--- a/common/jobs/interface.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package jobs
-
-import (
- "context"
-
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
-)
-
-// Consumer represents a single jobs driver interface
-type Consumer interface {
- Push(ctx context.Context, job *job.Job) error
- Register(ctx context.Context, pipeline *pipeline.Pipeline) error
- Run(ctx context.Context, pipeline *pipeline.Pipeline) error
- Stop(ctx context.Context) error
-
- Pause(ctx context.Context, pipeline string)
- Resume(ctx context.Context, pipeline string)
-
- // State provide information about driver state
- State(ctx context.Context) (*jobState.State, error)
-}
-
-// Constructor constructs Consumer interface. Endure abstraction.
-type Constructor interface {
- JobsConstruct(configKey string, e events.Handler, queue priorityqueue.Queue) (Consumer, error)
- FromPipeline(pipe *pipeline.Pipeline, e events.Handler, queue priorityqueue.Queue) (Consumer, error)
-}
diff --git a/common/kv/interface.go b/common/kv/interface.go
deleted file mode 100644
index bc6a07b2..00000000
--- a/common/kv/interface.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package kv
-
-import kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
-
-// Storage represents single abstract storage.
-type Storage interface {
- // Has checks if value exists.
- Has(keys ...string) (map[string]bool, error)
-
- // Get loads value content into a byte slice.
- Get(key string) ([]byte, error)
-
- // MGet loads content of multiple values
- // Returns the map with existing keys and associated values
- MGet(keys ...string) (map[string][]byte, error)
-
- // Set used to upload item to KV with TTL
- // 0 value in TTL means no TTL
- Set(items ...*kvv1.Item) error
-
- // MExpire sets the TTL for multiply keys
- MExpire(items ...*kvv1.Item) error
-
- // TTL return the rest time to live for provided keys
- // Not supported for the memcached
- TTL(keys ...string) (map[string]string, error)
-
- // Clear clean the entire storage
- Clear() error
-
- // Delete one or multiple keys.
- Delete(keys ...string) error
-
- // Stop the storage driver
- Stop()
-}
-
-// Constructor provides storage based on the config
-type Constructor interface {
- // KVConstruct provides Storage based on the config key
- KVConstruct(key string) (Storage, error)
-}
diff --git a/common/pubsub/interface.go b/common/pubsub/interface.go
deleted file mode 100644
index 5b69d577..00000000
--- a/common/pubsub/interface.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package pubsub
-
-import "context"
-
-/*
-This interface is in BETA. It might be changed.
-*/
-
-// PubSub interface designed to implement on any storage type to provide pub-sub abilities
-// Publisher used to receive messages from the PHP app via RPC
-// Subscriber should be implemented to subscribe to a topics and provide a connections list per topic
-// Reader return next message from the channel
-type PubSub interface {
- Publisher
- Subscriber
- Reader
-}
-
-type SubReader interface {
- Subscriber
- Reader
-}
-
-// Subscriber defines the ability to operate as message passing broker.
-// BETA interface
-type Subscriber interface {
- // Subscribe broker to one or multiple topics.
- Subscribe(connectionID string, topics ...string) error
-
- // Unsubscribe from one or multiply topics
- Unsubscribe(connectionID string, topics ...string) error
-
- // Connections returns all connections associated with the particular topic
- Connections(topic string, ret map[string]struct{})
-}
-
-// Publisher publish one or more messages
-// BETA interface
-type Publisher interface {
- // Publish one or multiple Channel.
- Publish(message *Message) error
-
- // PublishAsync publish message and return immediately
- // If error occurred it will be printed into the logger
- PublishAsync(message *Message)
-}
-
-// Reader interface should return next message
-type Reader interface {
- Next(ctx context.Context) (*Message, error)
-}
-
-// Constructor is a special pub-sub interface made to return a constructed PubSub type
-type Constructor interface {
- PSConstruct(key string) (PubSub, error)
-}
diff --git a/common/pubsub/psmessage.go b/common/pubsub/psmessage.go
deleted file mode 100644
index e33d9284..00000000
--- a/common/pubsub/psmessage.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package pubsub
-
-import json "github.com/json-iterator/go"
-
-// Message represents a single message with payload bound to a particular topic
-type Message struct {
- // Topic (channel in terms of redis)
- Topic string `json:"topic"`
- // Payload (on some decode stages might be represented as base64 string)
- Payload []byte `json:"payload"`
-}
-
-func (m *Message) MarshalBinary() (data []byte, err error) {
- return json.Marshal(m)
-}
diff --git a/pkg/doc/README.md b/doc/README.md
index 709df603..709df603 100644
--- a/pkg/doc/README.md
+++ b/doc/README.md
diff --git a/pkg/doc/pool_workflow.drawio b/doc/pool_workflow.drawio
index 3f74d0fc..3f74d0fc 100644
--- a/pkg/doc/pool_workflow.drawio
+++ b/doc/pool_workflow.drawio
diff --git a/pkg/doc/pool_workflow.svg b/doc/pool_workflow.svg
index 1e043eaa..1e043eaa 100644
--- a/pkg/doc/pool_workflow.svg
+++ b/doc/pool_workflow.svg
diff --git a/pkg/events/general.go b/events/general.go
index 5cf13e10..5cf13e10 100755
--- a/pkg/events/general.go
+++ b/events/general.go
diff --git a/pkg/events/grpc_event.go b/events/grpc_event.go
index 31ff4957..31ff4957 100644
--- a/pkg/events/grpc_event.go
+++ b/events/grpc_event.go
diff --git a/pkg/events/interface.go b/events/interface.go
index 7d57e4d0..7d57e4d0 100644
--- a/pkg/events/interface.go
+++ b/events/interface.go
diff --git a/pkg/events/jobs_events.go b/events/jobs_events.go
index f65ede67..f65ede67 100644
--- a/pkg/events/jobs_events.go
+++ b/events/jobs_events.go
diff --git a/pkg/events/pool_events.go b/events/pool_events.go
index 4d4cae5d..eb28df6a 100644
--- a/pkg/events/pool_events.go
+++ b/events/pool_events.go
@@ -7,12 +7,12 @@ const (
// EventWorkerDestruct thrown after worker destruction.
EventWorkerDestruct
- // EventPoolError caused on pool wide errors.
- EventPoolError
-
// EventSupervisorError triggered when supervisor can not complete work.
EventSupervisorError
+ // EventWorkerProcessExit triggered on process wait exit
+ EventWorkerProcessExit
+
// EventNoFreeWorkers triggered when there are no free workers in the stack and timeout for worker allocate elapsed
EventNoFreeWorkers
@@ -36,12 +36,12 @@ type P int64
func (ev P) String() string {
switch ev {
+ case EventWorkerProcessExit:
+ return "EventWorkerProcessExit"
case EventWorkerConstruct:
return "EventWorkerConstruct"
case EventWorkerDestruct:
return "EventWorkerDestruct"
- case EventPoolError:
- return "EventPoolError"
case EventSupervisorError:
return "EventSupervisorError"
case EventNoFreeWorkers:
@@ -67,4 +67,5 @@ type PoolEvent struct {
// Payload depends on event type, typically it's worker or error.
Payload interface{}
+ Error error
}
diff --git a/pkg/events/worker_events.go b/events/worker_events.go
index 39c38e57..39c38e57 100644
--- a/pkg/events/worker_events.go
+++ b/events/worker_events.go
diff --git a/go.mod b/go.mod
index 6c1616f5..9beabc67 100644
--- a/go.mod
+++ b/go.mod
@@ -3,95 +3,36 @@ module github.com/spiral/roadrunner/v2
go 1.17
require (
- github.com/Shopify/toxiproxy v2.1.4+incompatible
- // ========= AWS SDK v2
- github.com/aws/aws-sdk-go-v2 v1.9.0
- github.com/aws/aws-sdk-go-v2/config v1.8.0
- github.com/aws/aws-sdk-go-v2/credentials v1.4.0
- github.com/aws/aws-sdk-go-v2/service/sqs v1.9.0
- github.com/aws/smithy-go v1.8.0
- // =====================
- github.com/beanstalkd/go-beanstalk v0.1.0
- github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b
- github.com/cenkalti/backoff/v4 v4.1.1
- github.com/fasthttp/websocket v1.4.3-rc.8
- github.com/fatih/color v1.12.0
- github.com/go-redis/redis/v8 v8.11.3
- github.com/gofiber/fiber/v2 v2.18.0
- github.com/golang/mock v1.6.0
github.com/google/uuid v1.3.0
- github.com/json-iterator/go v1.1.11
- github.com/klauspost/compress v1.13.5
- github.com/prometheus/client_golang v1.11.0
- github.com/rabbitmq/amqp091-go v0.0.0-20210823000215-c428a6150891
+ github.com/json-iterator/go v1.1.12
github.com/shirou/gopsutil v3.21.8+incompatible
- github.com/spf13/viper v1.8.1
- // SPIRAL ====
- github.com/spiral/endure v1.0.4
github.com/spiral/errors v1.0.12
github.com/spiral/goridge/v3 v3.2.1
- // ===========
github.com/stretchr/testify v1.7.0
github.com/valyala/tcplisten v1.0.0
- github.com/yookoala/gofast v0.6.0
- go.etcd.io/bbolt v1.3.6
go.uber.org/multierr v1.7.0
- go.uber.org/zap v1.19.0
- golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34
google.golang.org/grpc v1.40.0
- google.golang.org/protobuf v1.27.1
- gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
require (
github.com/StackExchange/wmi v1.2.1 // indirect
- github.com/andybalholm/brotli v1.0.3 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.4.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.7.0 // indirect
- github.com/beorn7/perks v1.0.1 // indirect
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
- github.com/emicklei/proto v1.9.1
- github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-ole/go-ole v1.2.5 // indirect
- github.com/hashicorp/hcl v1.0.0 // indirect
- github.com/magiconair/properties v1.8.5 // indirect
- github.com/mattn/go-colorable v0.1.8 // indirect
- github.com/mattn/go-isatty v0.0.14 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
- github.com/mitchellh/mapstructure v1.4.1 // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/google/go-cmp v0.5.6 // indirect
+ github.com/kr/pretty v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.1 // indirect
- github.com/pelletier/go-toml v1.9.4 // indirect
+ github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.2.0 // indirect
- github.com/prometheus/common v0.30.0 // indirect
- github.com/prometheus/procfs v0.7.3 // indirect
- github.com/savsgio/gotils v0.0.0-20210907153846-c06938798b52 // indirect
- github.com/spf13/afero v1.6.0 // indirect
- github.com/spf13/cast v1.4.1 // indirect
- github.com/spf13/jwalterweatherman v1.1.0 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- github.com/subosito/gotenv v1.2.0 // indirect
github.com/tklauser/go-sysconf v0.3.9 // indirect
github.com/tklauser/numcpus v0.3.0 // indirect
- github.com/valyala/bytebufferpool v1.0.0 // indirect
- github.com/valyala/fasthttp v1.29.0 // indirect
- github.com/vmihailenco/msgpack/v5 v5.3.4 // indirect
- github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
+ golang.org/x/net v0.0.0-20210916014120-12bc252f5db8 // indirect
+ golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 // indirect
golang.org/x/text v0.3.7 // indirect
- golang.org/x/tools v0.1.5 // indirect
- google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
- gopkg.in/ini.v1 v1.63.0 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
+ google.golang.org/genproto v0.0.0-20210916144049-3192f974c780 // indirect
+ google.golang.org/protobuf v1.27.1 // indirect
+ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
-
-require github.com/golang/protobuf v1.5.2 // indirect
diff --git a/go.sum b/go.sum
index 07e30edd..ee55f9f8 100644
--- a/go.sum
+++ b/go.sum
@@ -1,178 +1,34 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
-github.com/andybalholm/brotli v1.0.3 h1:fpcw+r1N1h0Poc1F/pHbW40cUm/lMEQslZtCkBQ0UnM=
-github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/aws/aws-sdk-go-v2 v1.9.0 h1:+S+dSqQCN3MSU5vJRu1HqHrq00cJn6heIMU7X9hcsoo=
-github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
-github.com/aws/aws-sdk-go-v2/config v1.8.0 h1:O8EMFBOl6tue5gdJJV6U3Ikyl3lqgx6WrulCYrcy2SQ=
-github.com/aws/aws-sdk-go-v2/config v1.8.0/go.mod h1:w9+nMZ7soXCe5nT46Ri354SNhXDQ6v+V5wqDjnZE+GY=
-github.com/aws/aws-sdk-go-v2/credentials v1.4.0 h1:kmvesfjY861FzlCU9mvAfe01D9aeXcG2ZuC+k9F2YLM=
-github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0 h1:OxTAgH8Y4BXHD6PGCJ8DHx2kaZPCQfSTqmDsdRZFezE=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.0/go.mod h1:CpNzHK9VEFUCknu50kkB8z58AH2B5DvPP7ea1LHve/Y=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2 h1:d95cddM3yTm4qffj3P6EnP+TzX1SSkWaQypXSgT/hpA=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.2.2/go.mod h1:BQV0agm+JEhqR+2RT5e1XTFIDcAAV0eW6z2trp+iduw=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0 h1:VNJ5NLBteVXEwE2F1zEXVmyIH58mZ6kIQGJoC7C+vkg=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.9.0 h1:g6EHC3RFpgbRR8/Yk6BTbzfPn+E3o6J3zWPrcjvVJTw=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.9.0/go.mod h1:BXA1CVaEd9TBOQ8G2ke7lMWdVggAeh35+h2HDO50z7s=
-github.com/aws/aws-sdk-go-v2/service/sso v1.4.0 h1:sHXMIKYS6YiLPzmKSvDpPmOpJDHxmAUgbiF49YNVztg=
-github.com/aws/aws-sdk-go-v2/service/sso v1.4.0/go.mod h1:+1fpWnL96DL23aXPpMGbsmKe8jLTEfbjuQoA4WS1VaA=
-github.com/aws/aws-sdk-go-v2/service/sts v1.7.0 h1:1at4e5P+lvHNl2nUktdM2/v+rpICg/QSEr9TO/uW9vU=
-github.com/aws/aws-sdk-go-v2/service/sts v1.7.0/go.mod h1:0qcSMCyASQPN2sk/1KQLQ2Fh6yq8wm0HSDAimPhzCoM=
-github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc=
-github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
-github.com/beanstalkd/go-beanstalk v0.1.0 h1:IiNwYbAoVBDs5xEOmleGoX+DRD3Moz99EpATbl8672w=
-github.com/beanstalkd/go-beanstalk v0.1.0/go.mod h1:/G8YTyChOtpOArwLTQPY1CHB+i212+av35bkPXXj56Y=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=
-github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
-github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
-github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
-github.com/emicklei/proto v1.9.1 h1:MUgjFo5xlMwYv72TnF5xmmdKZ04u+dVbv6wdARv16D8=
-github.com/emicklei/proto v1.9.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fasthttp/websocket v1.4.3-rc.8 h1:6P/+ejKdkLC9UhkY7GlShGWYMDBiWQtIECLBTDZ/2LU=
-github.com/fasthttp/websocket v1.4.3-rc.8/go.mod h1:4m/MeZnTBQR2coy0HDUpyBXDkgtl2SxO+GZng0EKr6k=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
-github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
-github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/go-redis/redis/v8 v8.11.3 h1:GCjoYp8c+yQTJfc0n69iwSiHjvuAdruxl7elnZCxgt8=
-github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc=
-github.com/go-restit/lzjson v0.0.0-20161206095556-efe3c53acc68/go.mod h1:7vXSKQt83WmbPeyVjCfNT9YDJ5BUFmcwFsEjI9SCvYM=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofiber/fiber/v2 v2.18.0 h1:xCWYSVoTNibHpzfciPwUSZGiTyTpTXYchCwynuJU09s=
-github.com/gofiber/fiber/v2 v2.18.0/go.mod h1:/LdZHMUXZvTTo7gU4+b1hclqCAdoQphNQ9bi9gutPyI=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -182,612 +38,149 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0=
-github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
-github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
-github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4=
-github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
-github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
-github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
-github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
-github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
-github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
-github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/rabbitmq/amqp091-go v0.0.0-20210823000215-c428a6150891 h1:13nv5f/LNJxNpvpYm/u0NqrlFebon342f9Xu9GpklKc=
-github.com/rabbitmq/amqp091-go v0.0.0-20210823000215-c428a6150891/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873/go.mod h1:dmPawKuiAeG/aFYVs2i+Dyosoo7FNcm+Pi8iK6ZUrX8=
-github.com/savsgio/gotils v0.0.0-20210907153846-c06938798b52 h1:FODZE/jDkENIpW3JiMA9sXBQfNklTfClUNhR9k37dPY=
-github.com/savsgio/gotils v0.0.0-20210907153846-c06938798b52/go.mod h1:oejLrk1Y/5zOF+c/aHtXqn3TFlzzbAgPWg8zBiAHDas=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU=
github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/assertions v1.1.1 h1:T/YLemO5Yp7KPzS+lVtu+WsHn8yoSwTfItdAd1r3cck=
-github.com/smartystreets/assertions v1.1.1/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
-github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
-github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
-github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
-github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
-github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
-github.com/spiral/endure v1.0.4 h1:qpProWUVuu6fRceMnIHs9SkpkjlzAxPl7UxSH6zUPDo=
-github.com/spiral/endure v1.0.4/go.mod h1:I9IoSCMtqXVmXX0TQ3Gu72Z1uIDVNKlhKXmcCoqnR/w=
github.com/spiral/errors v1.0.12 h1:38Waf8ZL/Xvxg4HTYGmrUbvi7TCHivmuatNQZlBhQ8s=
github.com/spiral/errors v1.0.12/go.mod h1:j5UReqxZxfkwXkI9mFY87VhEXcXmSg7kAk5Sswy1eEA=
github.com/spiral/goridge/v3 v3.2.1 h1:5IJofcvWYjAy+X5XevOhwf/8F0i0Bu/baPsBGiSgqzU=
github.com/spiral/goridge/v3 v3.2.1/go.mod h1:jDHXTORSxchJYCv2jG4vtZojsa+4JJyXmfdPefOpJ3c=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
-github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
-github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.29.0 h1:F5GKpytwFk5OhCuRh6H+d4vZAcEeNAwPTdwQnm6IERY=
-github.com/valyala/fasthttp v1.29.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
-github.com/vmihailenco/msgpack/v5 v5.3.4 h1:qMKAwOV+meBw2Y8k9cVwAy7qErtYCwBzZ2ellBfvnqc=
github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
-github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
-github.com/yookoala/gofast v0.6.0 h1:E5x2acfUD7GkzCf8bmIMwnV10VxDy5tUCHc5LGhluwc=
-github.com/yookoala/gofast v0.6.0/go.mod h1:OJU201Q6HCaE1cASckaTbMm3KB6e0cZxK0mgqfwOKvQ=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
-go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
-go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec=
go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
-go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
-go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
-golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210916014120-12bc252f5db8 h1:/6y1LfuqNuQdHAm0jjtPtgRcxIxjVZgm5OTu8/QhZvk=
+golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
-golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 h1:7ZDGnxgHAMw7thfC5bEos0RDAccZKxioiWBhfIe+tvw=
+golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20200908211811-12e1bf57a112/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210916144049-3192f974c780 h1:RE6jTVCXBKZ7U9atSg8N3bsjRvvUujhEPspbEhdyy8s=
+google.golang.org/genproto v0.0.0-20210916144049-3192f974c780/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
@@ -799,46 +192,18 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/ini.v1 v1.63.0 h1:2t0h8NA59dpVQpa5Yh8cIcR6nHAeBIEk0zlLVqfw4N4=
-gopkg.in/ini.v1 v1.63.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/pkg/payload/payload.go b/payload/payload.go
index e1e45ac1..e1e45ac1 100755
--- a/pkg/payload/payload.go
+++ b/payload/payload.go
diff --git a/pkg/worker_handler/constants.go b/pkg/worker_handler/constants.go
deleted file mode 100644
index 3355d9c2..00000000
--- a/pkg/worker_handler/constants.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package handler
-
-import "net/http"
-
-var http2pushHeaderKey = http.CanonicalHeaderKey("http2-push")
-
-// TrailerHeaderKey http header key
-var TrailerHeaderKey = http.CanonicalHeaderKey("trailer")
diff --git a/pkg/worker_handler/errors.go b/pkg/worker_handler/errors.go
deleted file mode 100644
index c3352a52..00000000
--- a/pkg/worker_handler/errors.go
+++ /dev/null
@@ -1,26 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package handler
-
-import (
- "errors"
- "net"
- "os"
- "syscall"
-)
-
-// Broken pipe
-var errEPIPE = errors.New("EPIPE(32) -> connection reset by peer")
-
-// handleWriteError just check if error was caused by aborted connection on linux
-func handleWriteError(err error) error {
- if netErr, ok2 := err.(*net.OpError); ok2 {
- if syscallErr, ok3 := netErr.Err.(*os.SyscallError); ok3 {
- if errors.Is(syscallErr.Err, syscall.EPIPE) {
- return errEPIPE
- }
- }
- }
- return err
-}
diff --git a/pkg/worker_handler/errors_windows.go b/pkg/worker_handler/errors_windows.go
deleted file mode 100644
index 3c6c2186..00000000
--- a/pkg/worker_handler/errors_windows.go
+++ /dev/null
@@ -1,28 +0,0 @@
-//go:build windows
-// +build windows
-
-package handler
-
-import (
- "errors"
- "net"
- "os"
- "syscall"
-)
-
-//Software caused connection abort.
-//An established connection was aborted by the software in your host computer,
-//possibly due to a data transmission time-out or protocol error.
-var errEPIPE = errors.New("WSAECONNABORTED (10053) -> an established connection was aborted by peer")
-
-// handleWriteError just check if error was caused by aborted connection on windows
-func handleWriteError(err error) error {
- if netErr, ok2 := err.(*net.OpError); ok2 {
- if syscallErr, ok3 := netErr.Err.(*os.SyscallError); ok3 {
- if syscallErr.Err == syscall.WSAECONNABORTED {
- return errEPIPE
- }
- }
- }
- return err
-}
diff --git a/pkg/worker_handler/handler.go b/pkg/worker_handler/handler.go
deleted file mode 100644
index fc03563b..00000000
--- a/pkg/worker_handler/handler.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package handler
-
-import (
- "net"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-// MB is 1024 bytes
-const MB uint64 = 1024 * 1024
-
-// ErrorEvent represents singular http error event.
-type ErrorEvent struct {
- // Request contains client request, must not be stored.
- Request *http.Request
-
- // Error - associated error, if any.
- Error error
-
- // event timings
- start time.Time
- elapsed time.Duration
-}
-
-// Elapsed returns duration of the invocation.
-func (e *ErrorEvent) Elapsed() time.Duration {
- return e.elapsed
-}
-
-// ResponseEvent represents singular http response event.
-type ResponseEvent struct {
- // Request contains client request, must not be stored.
- Request *Request
-
- // Response contains service response.
- Response *Response
-
- // event timings
- start time.Time
- elapsed time.Duration
-}
-
-// Elapsed returns duration of the invocation.
-func (e *ResponseEvent) Elapsed() time.Duration {
- return e.elapsed
-}
-
-// Handler serves http connections to underlying PHP application using PSR-7 protocol. Context will include request headers,
-// parsed files and query, payload will include parsed form dataTree (if any).
-type Handler struct {
- maxRequestSize uint64
- uploads config.Uploads
- trusted config.Cidrs
- log logger.Logger
- pool pool.Pool
- mul sync.Mutex
- lsn []events.Listener
- internalHTTPCode uint64
-}
-
-// NewHandler return handle interface implementation
-func NewHandler(maxReqSize uint64, internalHTTPCode uint64, uploads config.Uploads, trusted config.Cidrs, pool pool.Pool) (*Handler, error) {
- if pool == nil {
- return nil, errors.E(errors.Str("pool should be initialized"))
- }
- return &Handler{
- maxRequestSize: maxReqSize * MB,
- uploads: uploads,
- pool: pool,
- trusted: trusted,
- internalHTTPCode: internalHTTPCode,
- }, nil
-}
-
-// AddListener attaches handler event controller.
-func (h *Handler) AddListener(l ...events.Listener) {
- h.mul.Lock()
- defer h.mul.Unlock()
-
- h.lsn = l
-}
-
-// mdwr serve using PSR-7 requests passed to underlying application. Attempts to serve static files first if enabled.
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- const op = errors.Op("serve_http")
- start := time.Now()
-
- // validating request size
- if h.maxRequestSize != 0 {
- const op = errors.Op("http_handler_max_size")
- if length := r.Header.Get("content-length"); length != "" {
- // try to parse the value from the `content-length` header
- size, err := strconv.ParseInt(length, 10, 64)
- if err != nil {
- // if got an error while parsing -> assign 500 code to the writer and return
- http.Error(w, "", 500)
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, errors.Str("error while parsing value from the `content-length` header")), start: start, elapsed: time.Since(start)})
- return
- }
-
- if size > int64(h.maxRequestSize) {
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, errors.Str("request body max size is exceeded")), start: start, elapsed: time.Since(start)})
- http.Error(w, errors.E(op, errors.Str("request body max size is exceeded")).Error(), http.StatusBadRequest)
- return
- }
- }
- }
-
- req, err := NewRequest(r, h.uploads)
- if err != nil {
- // if pipe is broken, there is no sense to write the header
- // in this case we just report about error
- if err == errEPIPE {
- h.sendEvent(ErrorEvent{Request: r, Error: err, start: start, elapsed: time.Since(start)})
- return
- }
-
- http.Error(w, errors.E(op, err).Error(), 500)
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)})
- return
- }
-
- // proxy IP resolution
- h.resolveIP(req)
-
- req.Open(h.log)
- defer req.Close(h.log)
-
- p, err := req.Payload()
- if err != nil {
- h.handleError(w, r, start, err)
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)})
- return
- }
-
- rsp, err := h.pool.Exec(p)
- if err != nil {
- h.handleError(w, r, start, err)
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)})
- return
- }
-
- resp, err := NewResponse(rsp)
- if err != nil {
- h.handleError(w, r, start, err)
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)})
- return
- }
-
- h.handleResponse(req, resp, start)
- err = resp.Write(w)
- if err != nil {
- http.Error(w, errors.E(op, err).Error(), 500)
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)})
- }
-}
-
-// handleError will handle internal RR errors and return 500
-func (h *Handler) handleError(w http.ResponseWriter, r *http.Request, start time.Time, err error) {
- const op = errors.Op("handle_error")
- // internal error types, user should not see them
- if errors.Is(errors.SoftJob, err) ||
- errors.Is(errors.WatcherStopped, err) ||
- errors.Is(errors.WorkerAllocate, err) ||
- errors.Is(errors.NoFreeWorkers, err) ||
- errors.Is(errors.ExecTTL, err) ||
- errors.Is(errors.IdleTTL, err) ||
- errors.Is(errors.TTL, err) ||
- errors.Is(errors.Encode, err) ||
- errors.Is(errors.Decode, err) ||
- errors.Is(errors.Network, err) {
- // write an internal server error
- w.WriteHeader(int(h.internalHTTPCode))
- h.sendEvent(ErrorEvent{Request: r, Error: errors.E(op, err), start: start, elapsed: time.Since(start)})
- }
-}
-
-// handleResponse triggers response event.
-func (h *Handler) handleResponse(req *Request, resp *Response, start time.Time) {
- h.sendEvent(ResponseEvent{Request: req, Response: resp, start: start, elapsed: time.Since(start)})
-}
-
-// sendEvent invokes event handler if any.
-func (h *Handler) sendEvent(event interface{}) {
- if h.lsn != nil {
- for i := range h.lsn {
- // do not block the pipeline
- // TODO not a good approach, redesign event bus
- i := i
- go func() {
- h.lsn[i](event)
- }()
- }
- }
-}
-
-// get real ip passing multiple proxy
-func (h *Handler) resolveIP(r *Request) {
- if h.trusted.IsTrusted(r.RemoteAddr) == false { //nolint:gosimple
- return
- }
-
- if r.Header.Get("X-Forwarded-For") != "" {
- ips := strings.Split(r.Header.Get("X-Forwarded-For"), ",")
- ipCount := len(ips)
-
- for i := ipCount - 1; i >= 0; i-- {
- addr := strings.TrimSpace(ips[i])
- if net.ParseIP(addr) != nil {
- r.RemoteAddr = addr
- return
- }
- }
-
- return
- }
-
- // The logic here is the following:
- // In general case, we only expect X-Real-Ip header. If it exist, we get the IP address from header and set request Remote address
- // But, if there is no X-Real-Ip header, we also trying to check CloudFlare headers
- // True-Client-IP is a general CF header in which copied information from X-Real-Ip in CF.
- // CF-Connecting-IP is an Enterprise feature and we check it last in order.
- // This operations are near O(1) because Headers struct are the map type -> type MIMEHeader map[string][]string
- if r.Header.Get("X-Real-Ip") != "" {
- r.RemoteAddr = FetchIP(r.Header.Get("X-Real-Ip"))
- return
- }
-
- if r.Header.Get("True-Client-IP") != "" {
- r.RemoteAddr = FetchIP(r.Header.Get("True-Client-IP"))
- return
- }
-
- if r.Header.Get("CF-Connecting-IP") != "" {
- r.RemoteAddr = FetchIP(r.Header.Get("CF-Connecting-IP"))
- }
-}
diff --git a/pkg/worker_handler/parse.go b/pkg/worker_handler/parse.go
deleted file mode 100644
index 2790da2a..00000000
--- a/pkg/worker_handler/parse.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package handler
-
-import (
- "net/http"
-
- "github.com/spiral/roadrunner/v2/plugins/http/config"
-)
-
-// MaxLevel defines maximum tree depth for incoming request data and files.
-const MaxLevel = 127
-
-type dataTree map[string]interface{}
-type fileTree map[string]interface{}
-
-// parseData parses incoming request body into data tree.
-func parseData(r *http.Request) dataTree {
- data := make(dataTree)
- if r.PostForm != nil {
- for k, v := range r.PostForm {
- data.push(k, v)
- }
- }
-
- if r.MultipartForm != nil {
- for k, v := range r.MultipartForm.Value {
- data.push(k, v)
- }
- }
-
- return data
-}
-
-// pushes value into data tree.
-func (d dataTree) push(k string, v []string) {
- keys := FetchIndexes(k)
- if len(keys) <= MaxLevel {
- d.mount(keys, v)
- }
-}
-
-// mount mounts data tree recursively.
-func (d dataTree) mount(i []string, v []string) {
- if len(i) == 1 {
- // single value context (last element)
- d[i[0]] = v[len(v)-1]
- return
- }
-
- if len(i) == 2 && i[1] == "" {
- // non associated array of elements
- d[i[0]] = v
- return
- }
-
- if p, ok := d[i[0]]; ok {
- p.(dataTree).mount(i[1:], v)
- return
- }
-
- d[i[0]] = make(dataTree)
- d[i[0]].(dataTree).mount(i[1:], v)
-}
-
-// parse incoming dataTree request into JSON (including contentMultipart form dataTree)
-func parseUploads(r *http.Request, cfg config.Uploads) *Uploads {
- u := &Uploads{
- cfg: cfg,
- tree: make(fileTree),
- list: make([]*FileUpload, 0),
- }
-
- for k, v := range r.MultipartForm.File {
- files := make([]*FileUpload, 0, len(v))
- for _, f := range v {
- files = append(files, NewUpload(f))
- }
-
- u.list = append(u.list, files...)
- u.tree.push(k, files)
- }
-
- return u
-}
-
-// pushes new file upload into it's proper place.
-func (d fileTree) push(k string, v []*FileUpload) {
- keys := FetchIndexes(k)
- if len(keys) <= MaxLevel {
- d.mount(keys, v)
- }
-}
-
-// mount mounts data tree recursively.
-func (d fileTree) mount(i []string, v []*FileUpload) {
- if len(i) == 1 {
- // single value context
- d[i[0]] = v[0]
- return
- }
-
- if len(i) == 2 && i[1] == "" {
- // non associated array of elements
- d[i[0]] = v
- return
- }
-
- if p, ok := d[i[0]]; ok {
- p.(fileTree).mount(i[1:], v)
- return
- }
-
- d[i[0]] = make(fileTree)
- d[i[0]].(fileTree).mount(i[1:], v)
-}
-
-// FetchIndexes parses input name and splits it into separate indexes list.
-func FetchIndexes(s string) []string {
- var (
- pos int
- ch string
- keys = make([]string, 1)
- )
-
- for _, c := range s {
- ch = string(c)
- switch ch {
- case " ":
- // ignore all spaces
- continue
- case "[":
- pos = 1
- continue
- case "]":
- if pos == 1 {
- keys = append(keys, "")
- }
- pos = 2
- default:
- if pos == 1 || pos == 2 {
- keys = append(keys, "")
- }
-
- keys[len(keys)-1] += ch
- pos = 0
- }
- }
-
- return keys
-}
diff --git a/pkg/worker_handler/request.go b/pkg/worker_handler/request.go
deleted file mode 100644
index 3d60897b..00000000
--- a/pkg/worker_handler/request.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package handler
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "strings"
-
- j "github.com/json-iterator/go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/plugins/http/attributes"
- "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-var json = j.ConfigCompatibleWithStandardLibrary
-
-const (
- defaultMaxMemory = 32 << 20 // 32 MB
- contentNone = iota + 900
- contentStream
- contentMultipart
- contentFormData
-)
-
-// Request maps net/http requests to PSR7 compatible structure and managed state of temporary uploaded files.
-type Request struct {
- // RemoteAddr contains ip address of client, make sure to check X-Real-Ip and X-Forwarded-For for real client address.
- RemoteAddr string `json:"remoteAddr"`
-
- // Protocol includes HTTP protocol version.
- Protocol string `json:"protocol"`
-
- // Method contains name of HTTP method used for the request.
- Method string `json:"method"`
-
- // URI contains full request URI with scheme and query.
- URI string `json:"uri"`
-
- // Header contains list of request headers.
- Header http.Header `json:"headers"`
-
- // Cookies contains list of request cookies.
- Cookies map[string]string `json:"cookies"`
-
- // RawQuery contains non parsed query string (to be parsed on php end).
- RawQuery string `json:"rawQuery"`
-
- // Parsed indicates that request body has been parsed on RR end.
- Parsed bool `json:"parsed"`
-
- // Uploads contains list of uploaded files, their names, sized and associations with temporary files.
- Uploads *Uploads `json:"uploads"`
-
- // Attributes can be set by chained mdwr to safely pass value from Golang to PHP. See: GetAttribute, SetAttribute functions.
- Attributes map[string]interface{} `json:"attributes"`
-
- // request body can be parsedData or []byte
- body interface{}
-}
-
-func FetchIP(pair string) string {
- if !strings.ContainsRune(pair, ':') {
- return pair
- }
-
- addr, _, _ := net.SplitHostPort(pair)
- return addr
-}
-
-// NewRequest creates new PSR7 compatible request using net/http request.
-func NewRequest(r *http.Request, cfg config.Uploads) (*Request, error) {
- req := &Request{
- RemoteAddr: FetchIP(r.RemoteAddr),
- Protocol: r.Proto,
- Method: r.Method,
- URI: URI(r),
- Header: r.Header,
- Cookies: make(map[string]string),
- RawQuery: r.URL.RawQuery,
- Attributes: attributes.All(r),
- }
-
- for _, c := range r.Cookies() {
- if v, err := url.QueryUnescape(c.Value); err == nil {
- req.Cookies[c.Name] = v
- }
- }
-
- switch req.contentType() {
- case contentNone:
- return req, nil
-
- case contentStream:
- var err error
- req.body, err = ioutil.ReadAll(r.Body)
- return req, err
-
- case contentMultipart:
- if err := r.ParseMultipartForm(defaultMaxMemory); err != nil {
- return nil, err
- }
-
- req.Uploads = parseUploads(r, cfg)
- fallthrough
- case contentFormData:
- if err := r.ParseForm(); err != nil {
- return nil, err
- }
-
- req.body = parseData(r)
- }
-
- req.Parsed = true
- return req, nil
-}
-
-// Open moves all uploaded files to temporary directory so it can be given to php later.
-func (r *Request) Open(log logger.Logger) {
- if r.Uploads == nil {
- return
- }
-
- r.Uploads.Open(log)
-}
-
-// Close clears all temp file uploads
-func (r *Request) Close(log logger.Logger) {
- if r.Uploads == nil {
- return
- }
-
- r.Uploads.Clear(log)
-}
-
-// Payload request marshaled RoadRunner payload based on PSR7 data. values encode method is JSON. Make sure to open
-// files prior to calling this method.
-func (r *Request) Payload() (*payload.Payload, error) {
- const op = errors.Op("marshal_payload")
- p := &payload.Payload{}
-
- var err error
- if p.Context, err = json.Marshal(r); err != nil {
- return nil, errors.E(op, errors.Encode, err)
- }
-
- if r.Parsed {
- if p.Body, err = json.Marshal(r.body); err != nil {
- return nil, errors.E(op, errors.Encode, err)
- }
- } else if r.body != nil {
- p.Body = r.body.([]byte)
- }
-
- return p, nil
-}
-
-// contentType returns the payload content type.
-func (r *Request) contentType() int {
- if r.Method == "HEAD" || r.Method == "OPTIONS" {
- return contentNone
- }
-
- ct := r.Header.Get("content-type")
- if strings.Contains(ct, "application/x-www-form-urlencoded") {
- return contentFormData
- }
-
- if strings.Contains(ct, "multipart/form-data") {
- return contentMultipart
- }
-
- return contentStream
-}
-
-// URI fetches full uri from request in a form of string (including https scheme if TLS connection is enabled).
-func URI(r *http.Request) string {
- if r.URL.Host != "" {
- return r.URL.String()
- }
- if r.TLS != nil {
- return fmt.Sprintf("https://%s%s", r.Host, r.URL.String())
- }
-
- return fmt.Sprintf("http://%s%s", r.Host, r.URL.String())
-}
diff --git a/pkg/worker_handler/response.go b/pkg/worker_handler/response.go
deleted file mode 100644
index d22f09d4..00000000
--- a/pkg/worker_handler/response.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package handler
-
-import (
- "io"
- "net/http"
- "strings"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/payload"
-)
-
-// Response handles PSR7 response logic.
-type Response struct {
- // Status contains response status.
- Status int `json:"status"`
-
- // Header contains list of response headers.
- Headers map[string][]string `json:"headers"`
-
- // associated Body payload.
- Body interface{}
-}
-
-// NewResponse creates new response based on given pool payload.
-func NewResponse(p *payload.Payload) (*Response, error) {
- const op = errors.Op("http_response")
- r := &Response{Body: p.Body}
- if err := json.Unmarshal(p.Context, r); err != nil {
- return nil, errors.E(op, errors.Decode, err)
- }
-
- return r, nil
-}
-
-// Write writes response headers, status and body into ResponseWriter.
-func (r *Response) Write(w http.ResponseWriter) error {
- // INFO map is the reference type in golang
- p := handlePushHeaders(r.Headers)
- if pusher, ok := w.(http.Pusher); ok {
- for _, v := range p {
- err := pusher.Push(v, nil)
- if err != nil {
- return err
- }
- }
- }
-
- handleTrailers(r.Headers)
- for n, h := range r.Headers {
- for _, v := range h {
- w.Header().Add(n, v)
- }
- }
-
- w.WriteHeader(r.Status)
-
- if data, ok := r.Body.([]byte); ok {
- _, err := w.Write(data)
- if err != nil {
- return handleWriteError(err)
- }
- }
-
- if rc, ok := r.Body.(io.Reader); ok {
- if _, err := io.Copy(w, rc); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func handlePushHeaders(h map[string][]string) []string {
- var p []string
- pushHeader, ok := h[http2pushHeaderKey]
- if !ok {
- return p
- }
-
- p = append(p, pushHeader...)
-
- delete(h, http2pushHeaderKey)
-
- return p
-}
-
-func handleTrailers(h map[string][]string) {
- trailers, ok := h[TrailerHeaderKey]
- if !ok {
- return
- }
-
- for _, tr := range trailers {
- for _, n := range strings.Split(tr, ",") {
- n = strings.Trim(n, "\t ")
- if v, ok := h[n]; ok {
- h["Trailer:"+n] = v
-
- delete(h, n)
- }
- }
- }
-
- delete(h, TrailerHeaderKey)
-}
diff --git a/pkg/worker_handler/uploads.go b/pkg/worker_handler/uploads.go
deleted file mode 100644
index e695000e..00000000
--- a/pkg/worker_handler/uploads.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package handler
-
-import (
- "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-
- "io"
- "io/ioutil"
- "mime/multipart"
- "os"
- "sync"
-)
-
-const (
- // UploadErrorOK - no error, the file uploaded with success.
- UploadErrorOK = 0
-
- // UploadErrorNoFile - no file was uploaded.
- UploadErrorNoFile = 4
-
- // UploadErrorNoTmpDir - missing a temporary folder.
- UploadErrorNoTmpDir = 6
-
- // UploadErrorCantWrite - failed to write file to disk.
- UploadErrorCantWrite = 7
-
- // UploadErrorExtension - forbidden file extension.
- UploadErrorExtension = 8
-)
-
-// Uploads tree manages uploaded files tree and temporary files.
-type Uploads struct {
- // associated temp directory and forbidden extensions.
- cfg config.Uploads
-
- // pre processed data tree for Uploads.
- tree fileTree
-
- // flat list of all file Uploads.
- list []*FileUpload
-}
-
-// MarshalJSON marshal tree tree into JSON.
-func (u *Uploads) MarshalJSON() ([]byte, error) {
- return json.Marshal(u.tree)
-}
-
-// Open moves all uploaded files to temp directory, return error in case of issue with temp directory. File errors
-// will be handled individually.
-func (u *Uploads) Open(log logger.Logger) {
- var wg sync.WaitGroup
- for _, f := range u.list {
- wg.Add(1)
- go func(f *FileUpload) {
- defer wg.Done()
- err := f.Open(u.cfg)
- if err != nil && log != nil {
- log.Error("error opening the file", "err", err)
- }
- }(f)
- }
-
- wg.Wait()
-}
-
-// Clear deletes all temporary files.
-func (u *Uploads) Clear(log logger.Logger) {
- for _, f := range u.list {
- if f.TempFilename != "" && exists(f.TempFilename) {
- err := os.Remove(f.TempFilename)
- if err != nil && log != nil {
- log.Error("error removing the file", "err", err)
- }
- }
- }
-}
-
-// FileUpload represents singular file NewUpload.
-type FileUpload struct {
- // ID contains filename specified by the client.
- Name string `json:"name"`
-
- // Mime contains mime-type provided by the client.
- Mime string `json:"mime"`
-
- // Size of the uploaded file.
- Size int64 `json:"size"`
-
- // Error indicates file upload error (if any). See http://php.net/manual/en/features.file-upload.errors.php
- Error int `json:"error"`
-
- // TempFilename points to temporary file location.
- TempFilename string `json:"tmpName"`
-
- // associated file header
- header *multipart.FileHeader
-}
-
-// NewUpload wraps net/http upload into PRS-7 compatible structure.
-func NewUpload(f *multipart.FileHeader) *FileUpload {
- return &FileUpload{
- Name: f.Filename,
- Mime: f.Header.Get("Content-Type"),
- Error: UploadErrorOK,
- header: f,
- }
-}
-
-// Open moves file content into temporary file available for PHP.
-// NOTE:
-// There is 2 deferred functions, and in case of getting 2 errors from both functions
-// error from close of temp file would be overwritten by error from the main file
-// STACK
-// DEFER FILE CLOSE (2)
-// DEFER TMP CLOSE (1)
-func (f *FileUpload) Open(cfg config.Uploads) (err error) {
- if cfg.Forbids(f.Name) {
- f.Error = UploadErrorExtension
- return nil
- }
-
- file, err := f.header.Open()
- if err != nil {
- f.Error = UploadErrorNoFile
- return err
- }
-
- defer func() {
- // close the main file
- err = file.Close()
- }()
-
- tmp, err := ioutil.TempFile(cfg.TmpDir(), "upload")
- if err != nil {
- // most likely cause of this issue is missing tmp dir
- f.Error = UploadErrorNoTmpDir
- return err
- }
-
- f.TempFilename = tmp.Name()
- defer func() {
- // close the temp file
- err = tmp.Close()
- }()
-
- if f.Size, err = io.Copy(tmp, file); err != nil {
- f.Error = UploadErrorCantWrite
- }
-
- return err
-}
-
-// exists if file exists.
-func exists(path string) bool {
- if _, err := os.Stat(path); os.IsNotExist(err) {
- return false
- }
- return true
-}
diff --git a/plugins/amqp/amqpjobs/config.go b/plugins/amqp/amqpjobs/config.go
deleted file mode 100644
index ac2f6e53..00000000
--- a/plugins/amqp/amqpjobs/config.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package amqpjobs
-
-// pipeline rabbitmq info
-const (
- exchangeKey string = "exchange"
- exchangeType string = "exchange_type"
- queue string = "queue"
- routingKey string = "routing_key"
- prefetch string = "prefetch"
- exclusive string = "exclusive"
- priority string = "priority"
- multipleAsk string = "multiple_ask"
- requeueOnFail string = "requeue_on_fail"
-
- dlx string = "x-dead-letter-exchange"
- dlxRoutingKey string = "x-dead-letter-routing-key"
- dlxTTL string = "x-message-ttl"
- dlxExpires string = "x-expires"
-
- contentType string = "application/octet-stream"
-)
-
-type GlobalCfg struct {
- Addr string `mapstructure:"addr"`
-}
-
-// Config is used to parse pipeline configuration
-type Config struct {
- Prefetch int `mapstructure:"prefetch"`
- Queue string `mapstructure:"queue"`
- Priority int64 `mapstructure:"priority"`
- Exchange string `mapstructure:"exchange"`
- ExchangeType string `mapstructure:"exchange_type"`
- RoutingKey string `mapstructure:"routing_key"`
- Exclusive bool `mapstructure:"exclusive"`
- MultipleAck bool `mapstructure:"multiple_ask"`
- RequeueOnFail bool `mapstructure:"requeue_on_fail"`
-}
-
-func (c *Config) InitDefault() {
- // all options should be in sync with the pipeline defaults in the FromPipeline method
- if c.ExchangeType == "" {
- c.ExchangeType = "direct"
- }
-
- if c.Exchange == "" {
- c.Exchange = "amqp.default"
- }
-
- if c.Queue == "" {
- c.Queue = "default"
- }
-
- if c.Prefetch == 0 {
- c.Prefetch = 10
- }
-
- if c.Priority == 0 {
- c.Priority = 10
- }
-}
-
-func (c *GlobalCfg) InitDefault() {
- if c.Addr == "" {
- c.Addr = "amqp://guest:[email protected]:5672/"
- }
-}
diff --git a/plugins/amqp/amqpjobs/consumer.go b/plugins/amqp/amqpjobs/consumer.go
deleted file mode 100644
index 2ff0a40a..00000000
--- a/plugins/amqp/amqpjobs/consumer.go
+++ /dev/null
@@ -1,524 +0,0 @@
-package amqpjobs
-
-import (
- "context"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/google/uuid"
- amqp "github.com/rabbitmq/amqp091-go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-const (
- pluginName string = "amqp"
-)
-
-type consumer struct {
- sync.Mutex
- log logger.Logger
- pq priorityqueue.Queue
- eh events.Handler
-
- pipeline atomic.Value
-
- // amqp connection
- conn *amqp.Connection
- consumeChan *amqp.Channel
- publishChan chan *amqp.Channel
- consumeID string
- connStr string
-
- retryTimeout time.Duration
- //
- // prefetch QoS AMQP
- //
- prefetch int
- //
- // pipeline's priority
- //
- priority int64
- exchangeName string
- queue string
- exclusive bool
- exchangeType string
- routingKey string
- multipleAck bool
- requeueOnFail bool
-
- listeners uint32
- delayed *int64
- stopCh chan struct{}
-}
-
-// NewAMQPConsumer initializes rabbitmq pipeline
-func NewAMQPConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_amqp_consumer")
- // we need to obtain two parts of the amqp information here.
- // firs part - address to connect, it is located in the global section under the amqp pluginName
- // second part - queues and other pipeline information
- // if no such key - error
- if !cfg.Has(configKey) {
- return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey))
- }
-
- // if no global section
- if !cfg.Has(pluginName) {
- return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs"))
- }
-
- // PARSE CONFIGURATION START -------
- var pipeCfg Config
- var globalCfg GlobalCfg
-
- err := cfg.UnmarshalKey(configKey, &pipeCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pipeCfg.InitDefault()
-
- err = cfg.UnmarshalKey(pluginName, &globalCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- globalCfg.InitDefault()
- // PARSE CONFIGURATION END -------
-
- jb := &consumer{
- log: log,
- pq: pq,
- eh: e,
- consumeID: uuid.NewString(),
- stopCh: make(chan struct{}),
- // TODO to config
- retryTimeout: time.Minute * 5,
- priority: pipeCfg.Priority,
- delayed: utils.Int64(0),
-
- publishChan: make(chan *amqp.Channel, 1),
- routingKey: pipeCfg.RoutingKey,
- queue: pipeCfg.Queue,
- exchangeType: pipeCfg.ExchangeType,
- exchangeName: pipeCfg.Exchange,
- prefetch: pipeCfg.Prefetch,
- exclusive: pipeCfg.Exclusive,
- multipleAck: pipeCfg.MultipleAck,
- requeueOnFail: pipeCfg.RequeueOnFail,
- }
-
- jb.conn, err = amqp.Dial(globalCfg.Addr)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // save address
- jb.connStr = globalCfg.Addr
-
- err = jb.initRabbitMQ()
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pch, err := jb.conn.Channel()
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- jb.publishChan <- pch
-
- // run redialer and requeue listener for the connection
- jb.redialer()
-
- return jb, nil
-}
-
-func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_amqp_consumer_from_pipeline")
- // we need to obtain two parts of the amqp information here.
- // firs part - address to connect, it is located in the global section under the amqp pluginName
- // second part - queues and other pipeline information
-
- // only global section
- if !cfg.Has(pluginName) {
- return nil, errors.E(op, errors.Str("no global amqp configuration, global configuration should contain amqp addrs"))
- }
-
- // PARSE CONFIGURATION -------
- var globalCfg GlobalCfg
-
- err := cfg.UnmarshalKey(pluginName, &globalCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- globalCfg.InitDefault()
-
- // PARSE CONFIGURATION -------
-
- jb := &consumer{
- log: log,
- eh: e,
- pq: pq,
- consumeID: uuid.NewString(),
- stopCh: make(chan struct{}),
- retryTimeout: time.Minute * 5,
- delayed: utils.Int64(0),
-
- publishChan: make(chan *amqp.Channel, 1),
- routingKey: pipeline.String(routingKey, ""),
- queue: pipeline.String(queue, "default"),
- exchangeType: pipeline.String(exchangeType, "direct"),
- exchangeName: pipeline.String(exchangeKey, "amqp.default"),
- prefetch: pipeline.Int(prefetch, 10),
- priority: int64(pipeline.Int(priority, 10)),
- exclusive: pipeline.Bool(exclusive, false),
- multipleAck: pipeline.Bool(multipleAsk, false),
- requeueOnFail: pipeline.Bool(requeueOnFail, false),
- }
-
- jb.conn, err = amqp.Dial(globalCfg.Addr)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // save address
- jb.connStr = globalCfg.Addr
-
- err = jb.initRabbitMQ()
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pch, err := jb.conn.Channel()
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- jb.publishChan <- pch
-
- // register the pipeline
- // error here is always nil
- _ = jb.Register(context.Background(), pipeline)
-
- // run redialer for the connection
- jb.redialer()
-
- return jb, nil
-}
-
-func (c *consumer) Push(ctx context.Context, job *job.Job) error {
- const op = errors.Op("rabbitmq_push")
- // check if the pipeline registered
-
- // load atomic value
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != job.Options.Pipeline {
- return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", job.Options.Pipeline, pipe.Name()))
- }
-
- err := c.handleItem(ctx, fromJob(job))
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (c *consumer) Register(_ context.Context, p *pipeline.Pipeline) error {
- c.pipeline.Store(p)
- return nil
-}
-
-func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error {
- start := time.Now()
- const op = errors.Op("rabbit_run")
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p.Name() {
- return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name()))
- }
-
- // protect connection (redial)
- c.Lock()
- defer c.Unlock()
-
- var err error
- c.consumeChan, err = c.conn.Channel()
- if err != nil {
- return errors.E(op, err)
- }
-
- err = c.consumeChan.Qos(c.prefetch, 0, false)
- if err != nil {
- return errors.E(op, err)
- }
-
- // start reading messages from the channel
- deliv, err := c.consumeChan.Consume(
- c.queue,
- c.consumeID,
- false,
- false,
- false,
- false,
- nil,
- )
- if err != nil {
- return errors.E(op, err)
- }
-
- // run listener
- c.listener(deliv)
-
- atomic.StoreUint32(&c.listeners, 1)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (c *consumer) State(ctx context.Context) (*jobState.State, error) {
- const op = errors.Op("amqp_driver_state")
- select {
- case pch := <-c.publishChan:
- defer func() {
- c.publishChan <- pch
- }()
-
- q, err := pch.QueueInspect(c.queue)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
-
- return &jobState.State{
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Queue: q.Name,
- Active: int64(q.Messages),
- Delayed: atomic.LoadInt64(c.delayed),
- Ready: ready(atomic.LoadUint32(&c.listeners)),
- }, nil
-
- case <-ctx.Done():
- return nil, errors.E(op, errors.TimeOut, ctx.Err())
- }
-}
-
-func (c *consumer) Pause(_ context.Context, p string) {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested pause on: ", p)
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 0 {
- c.log.Warn("no active listeners, nothing to pause")
- return
- }
-
- atomic.AddUint32(&c.listeners, ^uint32(0))
-
- // protect connection (redial)
- c.Lock()
- defer c.Unlock()
-
- err := c.consumeChan.Cancel(c.consumeID, true)
- if err != nil {
- c.log.Error("cancel publish channel, forcing close", "error", err)
- errCl := c.consumeChan.Close()
- if errCl != nil {
- c.log.Error("force close failed", "error", err)
- return
- }
- return
- }
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipePaused,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) Resume(_ context.Context, p string) {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested resume on: ", p)
- }
-
- // protect connection (redial)
- c.Lock()
- defer c.Unlock()
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 1 {
- c.log.Warn("amqp listener already in the active state")
- return
- }
-
- var err error
- c.consumeChan, err = c.conn.Channel()
- if err != nil {
- c.log.Error("create channel on rabbitmq connection", "error", err)
- return
- }
-
- err = c.consumeChan.Qos(c.prefetch, 0, false)
- if err != nil {
- c.log.Error("qos set failed", "error", err)
- return
- }
-
- // start reading messages from the channel
- deliv, err := c.consumeChan.Consume(
- c.queue,
- c.consumeID,
- false,
- false,
- false,
- false,
- nil,
- )
- if err != nil {
- c.log.Error("consume operation failed", "error", err)
- return
- }
-
- // run listener
- c.listener(deliv)
-
- // increase number of listeners
- atomic.AddUint32(&c.listeners, 1)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) Stop(context.Context) error {
- start := time.Now()
- c.stopCh <- struct{}{}
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeStopped,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-// handleItem
-func (c *consumer) handleItem(ctx context.Context, msg *Item) error {
- const op = errors.Op("rabbitmq_handle_item")
- select {
- case pch := <-c.publishChan:
- // return the channel back
- defer func() {
- c.publishChan <- pch
- }()
-
- // convert
- table, err := pack(msg.ID(), msg)
- if err != nil {
- return errors.E(op, err)
- }
-
- const op = errors.Op("rabbitmq_handle_item")
- // handle timeouts
- if msg.Options.DelayDuration() > 0 {
- atomic.AddInt64(c.delayed, 1)
- // TODO declare separate method for this if condition
- // TODO dlx cache channel??
- delayMs := int64(msg.Options.DelayDuration().Seconds() * 1000)
- tmpQ := fmt.Sprintf("delayed-%d.%s.%s", delayMs, c.exchangeName, c.queue)
- _, err = pch.QueueDeclare(tmpQ, true, false, false, false, amqp.Table{
- dlx: c.exchangeName,
- dlxRoutingKey: c.routingKey,
- dlxTTL: delayMs,
- dlxExpires: delayMs * 2,
- })
- if err != nil {
- atomic.AddInt64(c.delayed, ^int64(0))
- return errors.E(op, err)
- }
-
- err = pch.QueueBind(tmpQ, tmpQ, c.exchangeName, false, nil)
- if err != nil {
- atomic.AddInt64(c.delayed, ^int64(0))
- return errors.E(op, err)
- }
-
- // insert to the local, limited pipeline
- err = pch.Publish(c.exchangeName, tmpQ, false, false, amqp.Publishing{
- Headers: table,
- ContentType: contentType,
- Timestamp: time.Now(),
- DeliveryMode: amqp.Persistent,
- Body: msg.Body(),
- })
-
- if err != nil {
- atomic.AddInt64(c.delayed, ^int64(0))
- return errors.E(op, err)
- }
-
- return nil
- }
-
- // insert to the local, limited pipeline
- err = pch.Publish(c.exchangeName, c.routingKey, false, false, amqp.Publishing{
- Headers: table,
- ContentType: contentType,
- Timestamp: time.Now(),
- DeliveryMode: amqp.Persistent,
- Body: msg.Body(),
- })
-
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
- case <-ctx.Done():
- return errors.E(op, errors.TimeOut, ctx.Err())
- }
-}
-
-func ready(r uint32) bool {
- return r > 0
-}
diff --git a/plugins/amqp/amqpjobs/item.go b/plugins/amqp/amqpjobs/item.go
deleted file mode 100644
index b837ff86..00000000
--- a/plugins/amqp/amqpjobs/item.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package amqpjobs
-
-import (
- "context"
- "fmt"
- "sync/atomic"
- "time"
-
- json "github.com/json-iterator/go"
- amqp "github.com/rabbitmq/amqp091-go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type Item struct {
- // Job contains pluginName of job broker (usually PHP class).
- Job string `json:"job"`
-
- // Ident is unique identifier of the job, should be provided from outside
- Ident string `json:"id"`
-
- // Payload is string data (usually JSON) passed to Job broker.
- Payload string `json:"payload"`
-
- // Headers with key-values pairs
- Headers map[string][]string `json:"headers"`
-
- // Options contains set of PipelineOptions specific to job execution. Can be empty.
- Options *Options `json:"options,omitempty"`
-}
-
-// Options carry information about how to handle given job.
-type Options struct {
- // Priority is job priority, default - 10
- // pointer to distinguish 0 as a priority and nil as priority not set
- Priority int64 `json:"priority"`
-
- // Pipeline manually specified pipeline.
- Pipeline string `json:"pipeline,omitempty"`
-
- // Delay defines time duration to delay execution for. Defaults to none.
- Delay int64 `json:"delay,omitempty"`
-
- // private
- // ack delegates an acknowledgement through the Acknowledger interface that the client or server has finished work on a delivery
- ack func(multiply bool) error
-
- // nack negatively acknowledge the delivery of message(s) identified by the delivery tag from either the client or server.
- // When multiple is true, nack messages up to and including delivered messages up until the delivery tag delivered on the same channel.
- // When requeue is true, request the server to deliver this message to a different consumer. If it is not possible or requeue is false, the message will be dropped or delivered to a server configured dead-letter queue.
- // This method must not be used to select or requeue messages the client wishes not to handle, rather it is to inform the server that the client is incapable of handling this message at this time
- nack func(multiply bool, requeue bool) error
-
- // requeueFn used as a pointer to the push function
- requeueFn func(context.Context, *Item) error
- // delayed jobs TODO(rustatian): figure out how to get stats from the DLX
- delayed *int64
- multipleAsk bool
- requeue bool
-}
-
-// DelayDuration returns delay duration in a form of time.Duration.
-func (o *Options) DelayDuration() time.Duration {
- return time.Second * time.Duration(o.Delay)
-}
-
-func (i *Item) ID() string {
- return i.Ident
-}
-
-func (i *Item) Priority() int64 {
- return i.Options.Priority
-}
-
-// Body packs job payload into binary payload.
-func (i *Item) Body() []byte {
- return utils.AsBytes(i.Payload)
-}
-
-// Context packs job context (job, id) into binary payload.
-// Not used in the amqp, amqp.Table used instead
-func (i *Item) Context() ([]byte, error) {
- ctx, err := json.Marshal(
- struct {
- ID string `json:"id"`
- Job string `json:"job"`
- Headers map[string][]string `json:"headers"`
- Pipeline string `json:"pipeline"`
- }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline},
- )
-
- if err != nil {
- return nil, err
- }
-
- return ctx, nil
-}
-
-func (i *Item) Ack() error {
- if i.Options.Delay > 0 {
- atomic.AddInt64(i.Options.delayed, ^int64(0))
- }
- return i.Options.ack(i.Options.multipleAsk)
-}
-
-func (i *Item) Nack() error {
- if i.Options.Delay > 0 {
- atomic.AddInt64(i.Options.delayed, ^int64(0))
- }
- return i.Options.nack(false, i.Options.requeue)
-}
-
-// Requeue with the provided delay, handled by the Nack
-func (i *Item) Requeue(headers map[string][]string, delay int64) error {
- if i.Options.Delay > 0 {
- atomic.AddInt64(i.Options.delayed, ^int64(0))
- }
- // overwrite the delay
- i.Options.Delay = delay
- i.Headers = headers
-
- err := i.Options.requeueFn(context.Background(), i)
- if err != nil {
- errNack := i.Options.nack(false, true)
- if errNack != nil {
- return fmt.Errorf("requeue error: %v\nack error: %v", err, errNack)
- }
-
- return err
- }
-
- // ack the job
- err = i.Options.ack(false)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// fromDelivery converts amqp.Delivery into an Item which will be pushed to the PQ
-func (c *consumer) fromDelivery(d amqp.Delivery) (*Item, error) {
- const op = errors.Op("from_delivery_convert")
- item, err := c.unpack(d)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- i := &Item{
- Job: item.Job,
- Ident: item.Ident,
- Payload: item.Payload,
- Headers: item.Headers,
- Options: item.Options,
- }
-
- item.Options.ack = d.Ack
- item.Options.nack = d.Nack
- item.Options.delayed = c.delayed
-
- // requeue func
- item.Options.requeueFn = c.handleItem
- return i, nil
-}
-
-func fromJob(job *job.Job) *Item {
- return &Item{
- Job: job.Job,
- Ident: job.Ident,
- Payload: job.Payload,
- Headers: job.Headers,
- Options: &Options{
- Priority: job.Options.Priority,
- Pipeline: job.Options.Pipeline,
- Delay: job.Options.Delay,
- },
- }
-}
-
-// pack job metadata into headers
-func pack(id string, j *Item) (amqp.Table, error) {
- headers, err := json.Marshal(j.Headers)
- if err != nil {
- return nil, err
- }
- return amqp.Table{
- job.RRID: id,
- job.RRJob: j.Job,
- job.RRPipeline: j.Options.Pipeline,
- job.RRHeaders: headers,
- job.RRDelay: j.Options.Delay,
- job.RRPriority: j.Options.Priority,
- }, nil
-}
-
-// unpack restores jobs.Options
-func (c *consumer) unpack(d amqp.Delivery) (*Item, error) {
- item := &Item{Payload: utils.AsString(d.Body), Options: &Options{
- multipleAsk: c.multipleAck,
- requeue: c.requeueOnFail,
- requeueFn: c.handleItem,
- }}
-
- if _, ok := d.Headers[job.RRID].(string); !ok {
- return nil, errors.E(errors.Errorf("missing header `%s`", job.RRID))
- }
-
- item.Ident = d.Headers[job.RRID].(string)
-
- if _, ok := d.Headers[job.RRJob].(string); !ok {
- return nil, errors.E(errors.Errorf("missing header `%s`", job.RRJob))
- }
-
- item.Job = d.Headers[job.RRJob].(string)
-
- if _, ok := d.Headers[job.RRPipeline].(string); ok {
- item.Options.Pipeline = d.Headers[job.RRPipeline].(string)
- }
-
- if h, ok := d.Headers[job.RRHeaders].([]byte); ok {
- err := json.Unmarshal(h, &item.Headers)
- if err != nil {
- return nil, err
- }
- }
-
- if t, ok := d.Headers[job.RRDelay]; ok {
- switch t.(type) {
- case int, int16, int32, int64:
- item.Options.Delay = t.(int64)
- default:
- c.log.Warn("unknown delay type", "want:", "int, int16, int32, int64", "actual", t)
- }
- }
-
- if t, ok := d.Headers[job.RRPriority]; !ok {
- // set pipe's priority
- item.Options.Priority = c.priority
- } else {
- switch t.(type) {
- case int, int16, int32, int64:
- item.Options.Priority = t.(int64)
- default:
- c.log.Warn("unknown priority type", "want:", "int, int16, int32, int64", "actual", t)
- }
- }
-
- return item, nil
-}
diff --git a/plugins/amqp/amqpjobs/listener.go b/plugins/amqp/amqpjobs/listener.go
deleted file mode 100644
index 75c61cad..00000000
--- a/plugins/amqp/amqpjobs/listener.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package amqpjobs
-
-import amqp "github.com/rabbitmq/amqp091-go"
-
-func (c *consumer) listener(deliv <-chan amqp.Delivery) {
- go func() {
- for { //nolint:gosimple
- select {
- case msg, ok := <-deliv:
- if !ok {
- c.log.Info("delivery channel closed, leaving the rabbit listener")
- return
- }
-
- d, err := c.fromDelivery(msg)
- if err != nil {
- c.log.Error("amqp delivery convert", "error", err)
- continue
- }
- // insert job into the main priority queue
- c.pq.Insert(d)
- }
- }
- }()
-}
diff --git a/plugins/amqp/amqpjobs/rabbit_init.go b/plugins/amqp/amqpjobs/rabbit_init.go
deleted file mode 100644
index fb5f6911..00000000
--- a/plugins/amqp/amqpjobs/rabbit_init.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package amqpjobs
-
-import (
- "github.com/spiral/errors"
-)
-
-func (c *consumer) initRabbitMQ() error {
- const op = errors.Op("jobs_plugin_rmq_init")
- // Channel opens a unique, concurrent server channel to process the bulk of AMQP
- // messages. Any error from methods on this receiver will render the receiver
- // invalid and a new Channel should be opened.
- channel, err := c.conn.Channel()
- if err != nil {
- return errors.E(op, err)
- }
-
- // declare an exchange (idempotent operation)
- err = channel.ExchangeDeclare(
- c.exchangeName,
- c.exchangeType,
- true,
- false,
- false,
- false,
- nil,
- )
- if err != nil {
- return errors.E(op, err)
- }
-
- // verify or declare a queue
- q, err := channel.QueueDeclare(
- c.queue,
- false,
- false,
- c.exclusive,
- false,
- nil,
- )
- if err != nil {
- return errors.E(op, err)
- }
-
- // bind queue to the exchange
- err = channel.QueueBind(
- q.Name,
- c.routingKey,
- c.exchangeName,
- false,
- nil,
- )
- if err != nil {
- return errors.E(op, err)
- }
-
- return channel.Close()
-}
diff --git a/plugins/amqp/amqpjobs/redial.go b/plugins/amqp/amqpjobs/redial.go
deleted file mode 100644
index 698a34a6..00000000
--- a/plugins/amqp/amqpjobs/redial.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package amqpjobs
-
-import (
- "time"
-
- "github.com/cenkalti/backoff/v4"
- amqp "github.com/rabbitmq/amqp091-go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
-)
-
-// redialer used to redial to the rabbitmq in case of the connection interrupts
-func (c *consumer) redialer() { //nolint:gocognit
- go func() {
- const op = errors.Op("rabbitmq_redial")
-
- for {
- select {
- case err := <-c.conn.NotifyClose(make(chan *amqp.Error)):
- if err == nil {
- return
- }
-
- c.Lock()
-
- // trash the broken publishing channel
- <-c.publishChan
-
- t := time.Now().UTC()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeError,
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Error: err,
- Start: time.Now().UTC(),
- })
-
- expb := backoff.NewExponentialBackOff()
- // set the retry timeout (minutes)
- expb.MaxElapsedTime = c.retryTimeout
- operation := func() error {
- c.log.Warn("rabbitmq reconnecting, caused by", "error", err)
- var dialErr error
- c.conn, dialErr = amqp.Dial(c.connStr)
- if dialErr != nil {
- return errors.E(op, dialErr)
- }
-
- c.log.Info("rabbitmq dial succeed. trying to redeclare queues and subscribers")
-
- // re-init connection
- errInit := c.initRabbitMQ()
- if errInit != nil {
- c.log.Error("rabbitmq dial", "error", errInit)
- return errInit
- }
-
- // redeclare consume channel
- var errConnCh error
- c.consumeChan, errConnCh = c.conn.Channel()
- if errConnCh != nil {
- return errors.E(op, errConnCh)
- }
-
- // redeclare publish channel
- pch, errPubCh := c.conn.Channel()
- if errPubCh != nil {
- return errors.E(op, errPubCh)
- }
-
- // start reading messages from the channel
- deliv, err := c.consumeChan.Consume(
- c.queue,
- c.consumeID,
- false,
- false,
- false,
- false,
- nil,
- )
- if err != nil {
- return errors.E(op, err)
- }
-
- // put the fresh publishing channel
- c.publishChan <- pch
- // restart listener
- c.listener(deliv)
-
- c.log.Info("queues and subscribers redeclared successfully")
-
- return nil
- }
-
- retryErr := backoff.Retry(operation, expb)
- if retryErr != nil {
- c.Unlock()
- c.log.Error("backoff failed", "error", retryErr)
- return
- }
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Start: t,
- Elapsed: time.Since(t),
- })
-
- c.Unlock()
-
- case <-c.stopCh:
- pch := <-c.publishChan
- err := pch.Close()
- if err != nil {
- c.log.Error("publish channel close", "error", err)
- }
-
- if c.consumeChan != nil {
- err = c.consumeChan.Close()
- if err != nil {
- c.log.Error("consume channel close", "error", err)
- }
- }
-
- err = c.conn.Close()
- if err != nil {
- c.log.Error("amqp connection close", "error", err)
- }
-
- return
- }
- }
- }()
-}
diff --git a/plugins/amqp/plugin.go b/plugins/amqp/plugin.go
deleted file mode 100644
index c4f5f1da..00000000
--- a/plugins/amqp/plugin.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package amqp
-
-import (
- "github.com/spiral/roadrunner/v2/common/jobs"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- "github.com/spiral/roadrunner/v2/plugins/amqp/amqpjobs"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- pluginName string = "amqp"
-)
-
-type Plugin struct {
- log logger.Logger
- cfg config.Configurer
-}
-
-func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- p.log = log
- p.cfg = cfg
- return nil
-}
-
-func (p *Plugin) Name() string {
- return pluginName
-}
-
-func (p *Plugin) Available() {}
-
-func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return amqpjobs.NewAMQPConsumer(configKey, p.log, p.cfg, e, pq)
-}
-
-// FromPipeline constructs AMQP driver from pipeline
-func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return amqpjobs.FromPipeline(pipe, p.log, p.cfg, e, pq)
-}
diff --git a/plugins/beanstalk/config.go b/plugins/beanstalk/config.go
deleted file mode 100644
index a8069f5d..00000000
--- a/plugins/beanstalk/config.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package beanstalk
-
-import (
- "time"
-
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-const (
- tubePriority string = "tube_priority"
- tube string = "tube"
- reserveTimeout string = "reserve_timeout"
-)
-
-type GlobalCfg struct {
- Addr string `mapstructure:"addr"`
- Timeout time.Duration `mapstructure:"timeout"`
-}
-
-func (c *GlobalCfg) InitDefault() {
- if c.Addr == "" {
- c.Addr = "tcp://127.0.0.1:11300"
- }
-
- if c.Timeout == 0 {
- c.Timeout = time.Second * 30
- }
-}
-
-type Config struct {
- PipePriority int64 `mapstructure:"priority"`
- TubePriority *uint32 `mapstructure:"tube_priority"`
- Tube string `mapstructure:"tube"`
- ReserveTimeout time.Duration `mapstructure:"reserve_timeout"`
-}
-
-func (c *Config) InitDefault() {
- if c.Tube == "" {
- c.Tube = "default"
- }
-
- if c.ReserveTimeout == 0 {
- c.ReserveTimeout = time.Second * 1
- }
-
- if c.TubePriority == nil {
- c.TubePriority = utils.Uint32(0)
- }
-
- if c.PipePriority == 0 {
- c.PipePriority = 10
- }
-}
diff --git a/plugins/beanstalk/connection.go b/plugins/beanstalk/connection.go
deleted file mode 100644
index d3241b37..00000000
--- a/plugins/beanstalk/connection.go
+++ /dev/null
@@ -1,223 +0,0 @@
-package beanstalk
-
-import (
- "context"
- "net"
- "sync"
- "time"
-
- "github.com/beanstalkd/go-beanstalk"
- "github.com/cenkalti/backoff/v4"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type ConnPool struct {
- sync.RWMutex
-
- log logger.Logger
-
- conn *beanstalk.Conn
- connT *beanstalk.Conn
- ts *beanstalk.TubeSet
- t *beanstalk.Tube
-
- network string
- address string
- tName string
- tout time.Duration
-}
-
-func NewConnPool(network, address, tName string, tout time.Duration, log logger.Logger) (*ConnPool, error) {
- connT, err := beanstalk.DialTimeout(network, address, tout)
- if err != nil {
- return nil, err
- }
-
- connTS, err := beanstalk.DialTimeout(network, address, tout)
- if err != nil {
- return nil, err
- }
-
- tube := beanstalk.NewTube(connT, tName)
- ts := beanstalk.NewTubeSet(connTS, tName)
-
- return &ConnPool{
- log: log,
- network: network,
- address: address,
- tName: tName,
- tout: tout,
- conn: connTS,
- connT: connT,
- ts: ts,
- t: tube,
- }, nil
-}
-
-// Put the payload
-// TODO use the context ??
-func (cp *ConnPool) Put(_ context.Context, body []byte, pri uint32, delay, ttr time.Duration) (uint64, error) {
- cp.RLock()
- defer cp.RUnlock()
-
- // TODO(rustatian): redial based on the token
- id, err := cp.t.Put(body, pri, delay, ttr)
- if err != nil {
- // errN contains both, err and internal checkAndRedial error
- errN := cp.checkAndRedial(err)
- if errN != nil {
- return 0, errors.Errorf("err: %s\nerr redial: %s", err, errN)
- } else {
- // retry put only when we redialed
- return cp.t.Put(body, pri, delay, ttr)
- }
- }
-
- return id, nil
-}
-
-// Reserve reserves and returns a job from one of the tubes in t. If no
-// job is available before time timeout has passed, Reserve returns a
-// ConnError recording ErrTimeout.
-//
-// Typically, a client will reserve a job, perform some work, then delete
-// the job with Conn.Delete.
-func (cp *ConnPool) Reserve(reserveTimeout time.Duration) (uint64, []byte, error) {
- cp.RLock()
- defer cp.RUnlock()
-
- id, body, err := cp.ts.Reserve(reserveTimeout)
- if err != nil {
- // errN contains both, err and internal checkAndRedial error
- errN := cp.checkAndRedial(err)
- if errN != nil {
- return 0, nil, errors.Errorf("err: %s\nerr redial: %s", err, errN)
- } else {
- // retry Reserve only when we redialed
- return cp.ts.Reserve(reserveTimeout)
- }
- }
-
- return id, body, nil
-}
-
-func (cp *ConnPool) Delete(_ context.Context, id uint64) error {
- cp.RLock()
- defer cp.RUnlock()
-
- err := cp.conn.Delete(id)
- if err != nil {
- // errN contains both, err and internal checkAndRedial error
- errN := cp.checkAndRedial(err)
- if errN != nil {
- return errors.Errorf("err: %s\nerr redial: %s", err, errN)
- } else {
- // retry Delete only when we redialed
- return cp.conn.Delete(id)
- }
- }
- return nil
-}
-
-func (cp *ConnPool) Stats(_ context.Context) (map[string]string, error) {
- cp.RLock()
- defer cp.RUnlock()
-
- stat, err := cp.conn.Stats()
- if err != nil {
- errR := cp.checkAndRedial(err)
- if errR != nil {
- return nil, errors.Errorf("err: %s\nerr redial: %s", err, errR)
- } else {
- return cp.conn.Stats()
- }
- }
-
- return stat, nil
-}
-
-func (cp *ConnPool) redial() error {
- const op = errors.Op("connection_pool_redial")
-
- cp.Lock()
- // backoff here
- expb := backoff.NewExponentialBackOff()
- // TODO(rustatian) set via config
- expb.MaxElapsedTime = time.Minute
-
- operation := func() error {
- connT, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout)
- if err != nil {
- return err
- }
- if connT == nil {
- return errors.E(op, errors.Str("connectionT is nil"))
- }
-
- connTS, err := beanstalk.DialTimeout(cp.network, cp.address, cp.tout)
- if err != nil {
- return err
- }
-
- if connTS == nil {
- return errors.E(op, errors.Str("connectionTS is nil"))
- }
-
- cp.t = beanstalk.NewTube(connT, cp.tName)
- cp.ts = beanstalk.NewTubeSet(connTS, cp.tName)
- cp.conn = connTS
- cp.connT = connT
-
- cp.log.Info("beanstalk redial was successful")
- return nil
- }
-
- retryErr := backoff.Retry(operation, expb)
- if retryErr != nil {
- cp.Unlock()
- return retryErr
- }
- cp.Unlock()
-
- return nil
-}
-
-var connErrors = map[string]struct{}{"EOF": {}}
-
-func (cp *ConnPool) checkAndRedial(err error) error {
- const op = errors.Op("connection_pool_check_redial")
- switch et := err.(type) { //nolint:gocritic
- // check if the error
- case beanstalk.ConnError:
- switch bErr := et.Err.(type) {
- case *net.OpError:
- cp.RUnlock()
- errR := cp.redial()
- cp.RLock()
- // if redial failed - return
- if errR != nil {
- return errors.E(op, errors.Errorf("%v:%v", bErr, errR))
- }
-
- // if redial was successful -> continue listening
- return nil
- default:
- if _, ok := connErrors[et.Err.Error()]; ok {
- // if error is related to the broken connection - redial
- cp.RUnlock()
- errR := cp.redial()
- cp.RLock()
- // if redial failed - return
- if errR != nil {
- return errors.E(op, errors.Errorf("%v:%v", err, errR))
- }
- // if redial was successful -> continue listening
- return nil
- }
- }
- }
-
- // return initial error
- return err
-}
diff --git a/plugins/beanstalk/consumer.go b/plugins/beanstalk/consumer.go
deleted file mode 100644
index 30807f03..00000000
--- a/plugins/beanstalk/consumer.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package beanstalk
-
-import (
- "bytes"
- "context"
- "encoding/gob"
- "strconv"
- "strings"
- "sync/atomic"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type consumer struct {
- log logger.Logger
- eh events.Handler
- pq priorityqueue.Queue
-
- pipeline atomic.Value
- listeners uint32
-
- // beanstalk
- pool *ConnPool
- addr string
- network string
- reserveTimeout time.Duration
- reconnectCh chan struct{}
- tout time.Duration
- // tube name
- tName string
- tubePriority *uint32
- priority int64
-
- stopCh chan struct{}
- requeueCh chan *Item
-}
-
-func NewBeanstalkConsumer(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_beanstalk_consumer")
-
- // PARSE CONFIGURATION -------
- var pipeCfg Config
- var globalCfg GlobalCfg
-
- if !cfg.Has(configKey) {
- return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey))
- }
-
- // if no global section
- if !cfg.Has(pluginName) {
- return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout"))
- }
-
- err := cfg.UnmarshalKey(configKey, &pipeCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pipeCfg.InitDefault()
-
- err = cfg.UnmarshalKey(pluginName, &globalCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- globalCfg.InitDefault()
-
- // PARSE CONFIGURATION -------
-
- dsn := strings.Split(globalCfg.Addr, "://")
- if len(dsn) != 2 {
- return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr))
- }
-
- cPool, err := NewConnPool(dsn[0], dsn[1], pipeCfg.Tube, globalCfg.Timeout, log)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // initialize job consumer
- jc := &consumer{
- pq: pq,
- log: log,
- eh: e,
- pool: cPool,
- network: dsn[0],
- addr: dsn[1],
- tout: globalCfg.Timeout,
- tName: pipeCfg.Tube,
- reserveTimeout: pipeCfg.ReserveTimeout,
- tubePriority: pipeCfg.TubePriority,
- priority: pipeCfg.PipePriority,
-
- // buffered with two because jobs root plugin can call Stop at the same time as Pause
- stopCh: make(chan struct{}, 2),
- requeueCh: make(chan *Item, 1000),
- reconnectCh: make(chan struct{}, 2),
- }
-
- return jc, nil
-}
-
-func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_beanstalk_consumer")
-
- // PARSE CONFIGURATION -------
- var globalCfg GlobalCfg
-
- // if no global section
- if !cfg.Has(pluginName) {
- return nil, errors.E(op, errors.Str("no global beanstalk configuration, global configuration should contain beanstalk addrs and timeout"))
- }
-
- err := cfg.UnmarshalKey(pluginName, &globalCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- globalCfg.InitDefault()
-
- // PARSE CONFIGURATION -------
-
- dsn := strings.Split(globalCfg.Addr, "://")
- if len(dsn) != 2 {
- return nil, errors.E(op, errors.Errorf("invalid socket DSN (tcp://127.0.0.1:11300, unix://beanstalk.sock), provided: %s", globalCfg.Addr))
- }
-
- cPool, err := NewConnPool(dsn[0], dsn[1], pipe.String(tube, "default"), globalCfg.Timeout, log)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // initialize job consumer
- jc := &consumer{
- pq: pq,
- log: log,
- eh: e,
- pool: cPool,
- network: dsn[0],
- addr: dsn[1],
- tout: globalCfg.Timeout,
- tName: pipe.String(tube, "default"),
- reserveTimeout: time.Second * time.Duration(pipe.Int(reserveTimeout, 5)),
- tubePriority: utils.Uint32(uint32(pipe.Int(tubePriority, 1))),
- priority: pipe.Priority(),
-
- // buffered with two because jobs root plugin can call Stop at the same time as Pause
- stopCh: make(chan struct{}, 2),
- requeueCh: make(chan *Item, 1000),
- reconnectCh: make(chan struct{}, 2),
- }
-
- return jc, nil
-}
-func (j *consumer) Push(ctx context.Context, jb *job.Job) error {
- const op = errors.Op("beanstalk_push")
- // check if the pipeline registered
-
- // load atomic value
- pipe := j.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != jb.Options.Pipeline {
- return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name()))
- }
-
- err := j.handleItem(ctx, fromJob(jb))
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (j *consumer) handleItem(ctx context.Context, item *Item) error {
- const op = errors.Op("beanstalk_handle_item")
-
- bb := new(bytes.Buffer)
- bb.Grow(64)
- err := gob.NewEncoder(bb).Encode(item)
- if err != nil {
- return errors.E(op, err)
- }
-
- body := make([]byte, bb.Len())
- copy(body, bb.Bytes())
- bb.Reset()
- bb = nil
-
- // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L458
- // <pri> is an integer < 2**32. Jobs with smaller priority values will be
- // scheduled before jobs with larger priorities. The most urgent priority is 0;
- // the least urgent priority is 4,294,967,295.
- //
- // <delay> is an integer number of seconds to wait before putting the job in
- // the ready queue. The job will be in the "delayed" state during this time.
- // Maximum delay is 2**32-1.
- //
- // <ttr> -- time to run -- is an integer number of seconds to allow a worker
- // to run this job. This time is counted from the moment a worker reserves
- // this job. If the worker does not delete, release, or bury the job within
- // <ttr> seconds, the job will time out and the server will release the job.
- // The minimum ttr is 1. If the client sends 0, the server will silently
- // increase the ttr to 1. Maximum ttr is 2**32-1.
- id, err := j.pool.Put(ctx, body, *j.tubePriority, item.Options.DelayDuration(), j.tout)
- if err != nil {
- errD := j.pool.Delete(ctx, id)
- if errD != nil {
- return errors.E(op, errors.Errorf("%s:%s", err.Error(), errD.Error()))
- }
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (j *consumer) Register(_ context.Context, p *pipeline.Pipeline) error {
- // register the pipeline
- j.pipeline.Store(p)
- return nil
-}
-
-// State https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L514
-func (j *consumer) State(ctx context.Context) (*jobState.State, error) {
- const op = errors.Op("beanstalk_state")
- stat, err := j.pool.Stats(ctx)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pipe := j.pipeline.Load().(*pipeline.Pipeline)
-
- out := &jobState.State{
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Queue: j.tName,
- Ready: ready(atomic.LoadUint32(&j.listeners)),
- }
-
- // set stat, skip errors (replace with 0)
- // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L523
- if v, err := strconv.Atoi(stat["current-jobs-ready"]); err == nil {
- out.Active = int64(v)
- }
-
- // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L525
- if v, err := strconv.Atoi(stat["current-jobs-reserved"]); err == nil {
- // this is not an error, reserved in beanstalk behaves like an active jobs
- out.Reserved = int64(v)
- }
-
- // https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt#L528
- if v, err := strconv.Atoi(stat["current-jobs-delayed"]); err == nil {
- out.Delayed = int64(v)
- }
-
- return out, nil
-}
-
-func (j *consumer) Run(_ context.Context, p *pipeline.Pipeline) error {
- const op = errors.Op("beanstalk_run")
- start := time.Now()
-
- // load atomic value
- // check if the pipeline registered
- pipe := j.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p.Name() {
- return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", p.Name(), pipe.Name()))
- }
-
- atomic.AddUint32(&j.listeners, 1)
-
- go j.listen()
-
- j.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (j *consumer) Stop(context.Context) error {
- start := time.Now()
- pipe := j.pipeline.Load().(*pipeline.Pipeline)
-
- if atomic.LoadUint32(&j.listeners) == 1 {
- j.stopCh <- struct{}{}
- }
-
- j.eh.Push(events.JobEvent{
- Event: events.EventPipeStopped,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (j *consumer) Pause(_ context.Context, p string) {
- start := time.Now()
- // load atomic value
- pipe := j.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name())
- return
- }
-
- l := atomic.LoadUint32(&j.listeners)
- // no active listeners
- if l == 0 {
- j.log.Warn("no active listeners, nothing to pause")
- return
- }
-
- atomic.AddUint32(&j.listeners, ^uint32(0))
-
- j.stopCh <- struct{}{}
-
- j.eh.Push(events.JobEvent{
- Event: events.EventPipePaused,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (j *consumer) Resume(_ context.Context, p string) {
- start := time.Now()
- // load atomic value
- pipe := j.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- j.log.Error("no such pipeline", "requested", p, "actual", pipe.Name())
- return
- }
-
- l := atomic.LoadUint32(&j.listeners)
- // no active listeners
- if l == 1 {
- j.log.Warn("sqs listener already in the active state")
- return
- }
-
- // start listener
- go j.listen()
-
- // increase num of listeners
- atomic.AddUint32(&j.listeners, 1)
-
- j.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func ready(r uint32) bool {
- return r > 0
-}
diff --git a/plugins/beanstalk/encode_test.go b/plugins/beanstalk/encode_test.go
deleted file mode 100644
index e43207eb..00000000
--- a/plugins/beanstalk/encode_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package beanstalk
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/gob"
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-func BenchmarkEncodeGob(b *testing.B) {
- tb := make([]byte, 1024*10)
- _, err := rand.Read(tb)
- if err != nil {
- b.Fatal(err)
- }
-
- item := &Item{
- Job: "/super/test/php/class/loooooong",
- Ident: "12341234-asdfasdfa-1234234-asdfasdfas",
- Payload: utils.AsString(tb),
- Headers: map[string][]string{"Test": {"test1", "test2"}},
- Options: &Options{
- Priority: 10,
- Pipeline: "test-local-pipe",
- Delay: 10,
- },
- }
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- bb := new(bytes.Buffer)
- err := gob.NewEncoder(bb).Encode(item)
- if err != nil {
- b.Fatal(err)
- }
- _ = bb.Bytes()
- bb.Reset()
- }
-}
-
-func BenchmarkEncodeJsonIter(b *testing.B) {
- tb := make([]byte, 1024*10)
- _, err := rand.Read(tb)
- if err != nil {
- b.Fatal(err)
- }
-
- item := &Item{
- Job: "/super/test/php/class/loooooong",
- Ident: "12341234-asdfasdfa-1234234-asdfasdfas",
- Payload: utils.AsString(tb),
- Headers: map[string][]string{"Test": {"test1", "test2"}},
- Options: &Options{
- Priority: 10,
- Pipeline: "test-local-pipe",
- Delay: 10,
- },
- }
-
- b.ResetTimer()
- b.ReportAllocs()
-
- for i := 0; i < b.N; i++ {
- bb, err := json.Marshal(item)
- if err != nil {
- b.Fatal(err)
- }
- _ = bb
- }
-}
diff --git a/plugins/beanstalk/item.go b/plugins/beanstalk/item.go
deleted file mode 100644
index 03060994..00000000
--- a/plugins/beanstalk/item.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package beanstalk
-
-import (
- "bytes"
- "context"
- "encoding/gob"
- "time"
-
- "github.com/beanstalkd/go-beanstalk"
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type Item struct {
- // Job contains pluginName of job broker (usually PHP class).
- Job string `json:"job"`
-
- // Ident is unique identifier of the job, should be provided from outside
- Ident string `json:"id"`
-
- // Payload is string data (usually JSON) passed to Job broker.
- Payload string `json:"payload"`
-
- // Headers with key-values pairs
- Headers map[string][]string `json:"headers"`
-
- // Options contains set of PipelineOptions specific to job execution. Can be empty.
- Options *Options `json:"options,omitempty"`
-}
-
-// Options carry information about how to handle given job.
-type Options struct {
- // Priority is job priority, default - 10
- // pointer to distinguish 0 as a priority and nil as priority not set
- Priority int64 `json:"priority"`
-
- // Pipeline manually specified pipeline.
- Pipeline string `json:"pipeline,omitempty"`
-
- // Delay defines time duration to delay execution for. Defaults to none.
- Delay int64 `json:"delay,omitempty"`
-
- // Private ================
- id uint64
- conn *beanstalk.Conn
- requeueFn func(context.Context, *Item) error
-}
-
-// DelayDuration returns delay duration in a form of time.Duration.
-func (o *Options) DelayDuration() time.Duration {
- return time.Second * time.Duration(o.Delay)
-}
-
-func (i *Item) ID() string {
- return i.Ident
-}
-
-func (i *Item) Priority() int64 {
- return i.Options.Priority
-}
-
-// Body packs job payload into binary payload.
-func (i *Item) Body() []byte {
- return utils.AsBytes(i.Payload)
-}
-
-// Context packs job context (job, id) into binary payload.
-// Not used in the sqs, MessageAttributes used instead
-func (i *Item) Context() ([]byte, error) {
- ctx, err := json.Marshal(
- struct {
- ID string `json:"id"`
- Job string `json:"job"`
- Headers map[string][]string `json:"headers"`
- Pipeline string `json:"pipeline"`
- }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline},
- )
-
- if err != nil {
- return nil, err
- }
-
- return ctx, nil
-}
-
-func (i *Item) Ack() error {
- return i.Options.conn.Delete(i.Options.id)
-}
-
-func (i *Item) Nack() error {
- return i.Options.conn.Delete(i.Options.id)
-}
-
-func (i *Item) Requeue(headers map[string][]string, delay int64) error {
- // overwrite the delay
- i.Options.Delay = delay
- i.Headers = headers
-
- err := i.Options.requeueFn(context.Background(), i)
- if err != nil {
- return err
- }
-
- // delete old job
- err = i.Options.conn.Delete(i.Options.id)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func fromJob(job *job.Job) *Item {
- return &Item{
- Job: job.Job,
- Ident: job.Ident,
- Payload: job.Payload,
- Headers: job.Headers,
- Options: &Options{
- Priority: job.Options.Priority,
- Pipeline: job.Options.Pipeline,
- Delay: job.Options.Delay,
- },
- }
-}
-
-func (j *consumer) unpack(id uint64, data []byte, out *Item) error {
- err := gob.NewDecoder(bytes.NewBuffer(data)).Decode(out)
- if err != nil {
- return err
- }
- out.Options.conn = j.pool.conn
- out.Options.id = id
- out.Options.requeueFn = j.handleItem
-
- return nil
-}
diff --git a/plugins/beanstalk/listen.go b/plugins/beanstalk/listen.go
deleted file mode 100644
index 6bb159ea..00000000
--- a/plugins/beanstalk/listen.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package beanstalk
-
-import (
- "github.com/beanstalkd/go-beanstalk"
-)
-
-func (j *consumer) listen() {
- for {
- select {
- case <-j.stopCh:
- j.log.Warn("beanstalk listener stopped")
- return
- default:
- id, body, err := j.pool.Reserve(j.reserveTimeout)
- if err != nil {
- if errB, ok := err.(beanstalk.ConnError); ok {
- switch errB.Err { //nolint:gocritic
- case beanstalk.ErrTimeout:
- j.log.Info("beanstalk reserve timeout", "warn", errB.Op)
- continue
- }
- }
- // in case of other error - continue
- j.log.Error("beanstalk reserve", "error", err)
- continue
- }
-
- item := &Item{}
- err = j.unpack(id, body, item)
- if err != nil {
- j.log.Error("beanstalk unpack item", "error", err)
- continue
- }
-
- // insert job into the priority queue
- j.pq.Insert(item)
- }
- }
-}
diff --git a/plugins/beanstalk/plugin.go b/plugins/beanstalk/plugin.go
deleted file mode 100644
index 529d1474..00000000
--- a/plugins/beanstalk/plugin.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package beanstalk
-
-import (
- "github.com/spiral/roadrunner/v2/common/jobs"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- pluginName string = "beanstalk"
-)
-
-type Plugin struct {
- log logger.Logger
- cfg config.Configurer
-}
-
-func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- p.log = log
- p.cfg = cfg
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- return make(chan error)
-}
-
-func (p *Plugin) Stop() error {
- return nil
-}
-
-func (p *Plugin) Name() string {
- return pluginName
-}
-
-func (p *Plugin) Available() {}
-
-func (p *Plugin) JobsConstruct(configKey string, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return NewBeanstalkConsumer(configKey, p.log, p.cfg, eh, pq)
-}
-
-func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, eh events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return FromPipeline(pipe, p.log, p.cfg, eh, pq)
-}
diff --git a/plugins/boltdb/boltjobs/config.go b/plugins/boltdb/boltjobs/config.go
deleted file mode 100644
index 8cc098c1..00000000
--- a/plugins/boltdb/boltjobs/config.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package boltjobs
-
-const (
- file string = "file"
- priority string = "priority"
- prefetch string = "prefetch"
-)
-
-type GlobalCfg struct {
- // db file permissions
- Permissions int `mapstructure:"permissions"`
- // consume timeout
-}
-
-func (c *GlobalCfg) InitDefaults() {
- if c.Permissions == 0 {
- c.Permissions = 0777
- }
-}
-
-type Config struct {
- File string `mapstructure:"file"`
- Priority int `mapstructure:"priority"`
- Prefetch int `mapstructure:"prefetch"`
-}
-
-func (c *Config) InitDefaults() {
- if c.File == "" {
- c.File = "rr.db"
- }
-
- if c.Priority == 0 {
- c.Priority = 10
- }
-
- if c.Prefetch == 0 {
- c.Prefetch = 1000
- }
-}
diff --git a/plugins/boltdb/boltjobs/consumer.go b/plugins/boltdb/boltjobs/consumer.go
deleted file mode 100644
index 62045d3b..00000000
--- a/plugins/boltdb/boltjobs/consumer.go
+++ /dev/null
@@ -1,430 +0,0 @@
-package boltjobs
-
-import (
- "bytes"
- "context"
- "encoding/gob"
- "os"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
- bolt "go.etcd.io/bbolt"
-)
-
-const (
- PluginName string = "boltdb"
- rrDB string = "rr.db"
-
- PushBucket string = "push"
- InQueueBucket string = "processing"
- DelayBucket string = "delayed"
-)
-
-type consumer struct {
- file string
- permissions int
- priority int
- prefetch int
-
- db *bolt.DB
-
- bPool sync.Pool
- log logger.Logger
- eh events.Handler
- pq priorityqueue.Queue
- pipeline atomic.Value
- cond *sync.Cond
-
- listeners uint32
- active *uint64
- delayed *uint64
-
- stopCh chan struct{}
-}
-
-func NewBoltDBJobs(configKey string, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("init_boltdb_jobs")
-
- if !cfg.Has(configKey) {
- return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey))
- }
-
- // if no global section
- if !cfg.Has(PluginName) {
- return nil, errors.E(op, errors.Str("no global boltdb configuration"))
- }
-
- conf := &GlobalCfg{}
- err := cfg.UnmarshalKey(PluginName, conf)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- localCfg := &Config{}
- err = cfg.UnmarshalKey(configKey, localCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- localCfg.InitDefaults()
- conf.InitDefaults()
-
- db, err := bolt.Open(localCfg.File, os.FileMode(conf.Permissions), &bolt.Options{
- Timeout: time.Second * 20,
- NoGrowSync: false,
- NoFreelistSync: false,
- ReadOnly: false,
- NoSync: false,
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // create bucket if it does not exist
- // tx.Commit invokes via the db.Update
- err = db.Update(func(tx *bolt.Tx) error {
- const upOp = errors.Op("boltdb_plugin_update")
- _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DelayBucket))
- if err != nil {
- return errors.E(op, upOp)
- }
-
- _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket))
- if err != nil {
- return errors.E(op, upOp)
- }
-
- _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket))
- if err != nil {
- return errors.E(op, upOp)
- }
-
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
- cursor := inQb.Cursor()
-
- pushB := tx.Bucket(utils.AsBytes(PushBucket))
-
- // get all items, which are in the InQueueBucket and put them into the PushBucket
- for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
- err = pushB.Put(k, v)
- if err != nil {
- return errors.E(op, err)
- }
- }
- return nil
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return &consumer{
- permissions: conf.Permissions,
- file: localCfg.File,
- priority: localCfg.Priority,
- prefetch: localCfg.Prefetch,
-
- bPool: sync.Pool{New: func() interface{} {
- return new(bytes.Buffer)
- }},
- cond: sync.NewCond(&sync.Mutex{}),
-
- delayed: utils.Uint64(0),
- active: utils.Uint64(0),
-
- db: db,
- log: log,
- eh: e,
- pq: pq,
- stopCh: make(chan struct{}, 2),
- }, nil
-}
-
-func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, cfg config.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("init_boltdb_jobs")
-
- // if no global section
- if !cfg.Has(PluginName) {
- return nil, errors.E(op, errors.Str("no global boltdb configuration"))
- }
-
- conf := &GlobalCfg{}
- err := cfg.UnmarshalKey(PluginName, conf)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // add default values
- conf.InitDefaults()
-
- db, err := bolt.Open(pipeline.String(file, rrDB), os.FileMode(conf.Permissions), &bolt.Options{
- Timeout: time.Second * 20,
- NoGrowSync: false,
- NoFreelistSync: false,
- ReadOnly: false,
- NoSync: false,
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // create bucket if it does not exist
- // tx.Commit invokes via the db.Update
- err = db.Update(func(tx *bolt.Tx) error {
- const upOp = errors.Op("boltdb_plugin_update")
- _, err = tx.CreateBucketIfNotExists(utils.AsBytes(DelayBucket))
- if err != nil {
- return errors.E(op, upOp)
- }
-
- _, err = tx.CreateBucketIfNotExists(utils.AsBytes(PushBucket))
- if err != nil {
- return errors.E(op, upOp)
- }
-
- _, err = tx.CreateBucketIfNotExists(utils.AsBytes(InQueueBucket))
- if err != nil {
- return errors.E(op, upOp)
- }
-
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
- cursor := inQb.Cursor()
-
- pushB := tx.Bucket(utils.AsBytes(PushBucket))
-
- // get all items, which are in the InQueueBucket and put them into the PushBucket
- for k, v := cursor.First(); k != nil; k, v = cursor.Next() {
- err = pushB.Put(k, v)
- if err != nil {
- return errors.E(op, err)
- }
- }
-
- return nil
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return &consumer{
- file: pipeline.String(file, rrDB),
- priority: pipeline.Int(priority, 10),
- prefetch: pipeline.Int(prefetch, 1000),
- permissions: conf.Permissions,
-
- bPool: sync.Pool{New: func() interface{} {
- return new(bytes.Buffer)
- }},
- cond: sync.NewCond(&sync.Mutex{}),
-
- delayed: utils.Uint64(0),
- active: utils.Uint64(0),
-
- db: db,
- log: log,
- eh: e,
- pq: pq,
- stopCh: make(chan struct{}, 2),
- }, nil
-}
-
-func (c *consumer) Push(_ context.Context, job *job.Job) error {
- const op = errors.Op("boltdb_jobs_push")
- err := c.db.Update(func(tx *bolt.Tx) error {
- item := fromJob(job)
- // pool with buffers
- buf := c.get()
- // encode the job
- enc := gob.NewEncoder(buf)
- err := enc.Encode(item)
- if err != nil {
- c.put(buf)
- return errors.E(op, err)
- }
-
- value := make([]byte, buf.Len())
- copy(value, buf.Bytes())
- c.put(buf)
-
- // handle delay
- if item.Options.Delay > 0 {
- b := tx.Bucket(utils.AsBytes(DelayBucket))
- tKey := time.Now().UTC().Add(time.Second * time.Duration(item.Options.Delay)).Format(time.RFC3339)
-
- err = b.Put(utils.AsBytes(tKey), value)
- if err != nil {
- return errors.E(op, err)
- }
-
- atomic.AddUint64(c.delayed, 1)
-
- return nil
- }
-
- b := tx.Bucket(utils.AsBytes(PushBucket))
- err = b.Put(utils.AsBytes(item.ID()), value)
- if err != nil {
- return errors.E(op, err)
- }
-
- // increment active counter
- atomic.AddUint64(c.active, 1)
-
- return nil
- })
-
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (c *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error {
- c.pipeline.Store(pipeline)
- return nil
-}
-
-func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error {
- const op = errors.Op("boltdb_run")
- start := time.Now()
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p.Name() {
- return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name()))
- }
-
- // run listener
- go c.listener()
- go c.delayedJobsListener()
-
- // increase number of listeners
- atomic.AddUint32(&c.listeners, 1)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (c *consumer) Stop(_ context.Context) error {
- start := time.Now()
- if atomic.LoadUint32(&c.listeners) > 0 {
- c.stopCh <- struct{}{}
- c.stopCh <- struct{}{}
- }
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeStopped,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
- return nil
-}
-
-func (c *consumer) Pause(_ context.Context, p string) {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested pause on: ", p)
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 0 {
- c.log.Warn("no active listeners, nothing to pause")
- return
- }
-
- c.stopCh <- struct{}{}
- c.stopCh <- struct{}{}
-
- atomic.AddUint32(&c.listeners, ^uint32(0))
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipePaused,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) Resume(_ context.Context, p string) {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested resume on: ", p)
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 1 {
- c.log.Warn("amqp listener already in the active state")
- return
- }
-
- // run listener
- go c.listener()
- go c.delayedJobsListener()
-
- // increase number of listeners
- atomic.AddUint32(&c.listeners, 1)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) State(_ context.Context) (*jobState.State, error) {
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
-
- return &jobState.State{
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Queue: PushBucket,
- Active: int64(atomic.LoadUint64(c.active)),
- Delayed: int64(atomic.LoadUint64(c.delayed)),
- Ready: toBool(atomic.LoadUint32(&c.listeners)),
- }, nil
-}
-
-// Private
-
-func (c *consumer) get() *bytes.Buffer {
- return c.bPool.Get().(*bytes.Buffer)
-}
-
-func (c *consumer) put(b *bytes.Buffer) {
- b.Reset()
- c.bPool.Put(b)
-}
-
-func toBool(r uint32) bool {
- return r > 0
-}
diff --git a/plugins/boltdb/boltjobs/item.go b/plugins/boltdb/boltjobs/item.go
deleted file mode 100644
index 837f8c63..00000000
--- a/plugins/boltdb/boltjobs/item.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package boltjobs
-
-import (
- "bytes"
- "encoding/gob"
- "sync/atomic"
- "time"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/utils"
- "go.etcd.io/bbolt"
-)
-
-type Item struct {
- // Job contains pluginName of job broker (usually PHP class).
- Job string `json:"job"`
-
- // Ident is unique identifier of the job, should be provided from outside
- Ident string `json:"id"`
-
- // Payload is string data (usually JSON) passed to Job broker.
- Payload string `json:"payload"`
-
- // Headers with key-values pairs
- Headers map[string][]string `json:"headers"`
-
- // Options contains set of PipelineOptions specific to job execution. Can be empty.
- Options *Options `json:"options,omitempty"`
-}
-
-// Options carry information about how to handle given job.
-type Options struct {
- // Priority is job priority, default - 10
- // pointer to distinguish 0 as a priority and nil as priority not set
- Priority int64 `json:"priority"`
-
- // Pipeline manually specified pipeline.
- Pipeline string `json:"pipeline,omitempty"`
-
- // Delay defines time duration to delay execution for. Defaults to none.
- Delay int64 `json:"delay,omitempty"`
-
- // private
- db *bbolt.DB
- active *uint64
- delayed *uint64
-}
-
-func (i *Item) ID() string {
- return i.Ident
-}
-
-func (i *Item) Priority() int64 {
- return i.Options.Priority
-}
-
-func (i *Item) Body() []byte {
- return utils.AsBytes(i.Payload)
-}
-
-func (i *Item) Context() ([]byte, error) {
- ctx, err := json.Marshal(
- struct {
- ID string `json:"id"`
- Job string `json:"job"`
- Headers map[string][]string `json:"headers"`
- Pipeline string `json:"pipeline"`
- }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline},
- )
-
- if err != nil {
- return nil, err
- }
-
- return ctx, nil
-}
-
-func (i *Item) Ack() error {
- const op = errors.Op("boltdb_item_ack")
- tx, err := i.Options.db.Begin(true)
- if err != nil {
- _ = tx.Rollback()
- return errors.E(op, err)
- }
-
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
- err = inQb.Delete(utils.AsBytes(i.ID()))
- if err != nil {
- _ = tx.Rollback()
- return errors.E(op, err)
- }
-
- if i.Options.Delay > 0 {
- atomic.AddUint64(i.Options.delayed, ^uint64(0))
- } else {
- atomic.AddUint64(i.Options.active, ^uint64(0))
- }
-
- return tx.Commit()
-}
-
-func (i *Item) Nack() error {
- const op = errors.Op("boltdb_item_ack")
- /*
- steps:
- 1. begin tx
- 2. get item by ID from the InQueueBucket (previously put in the listener)
- 3. put it back to the PushBucket
- 4. Delete it from the InQueueBucket
- */
- tx, err := i.Options.db.Begin(true)
- if err != nil {
- _ = tx.Rollback()
- return errors.E(op, err)
- }
-
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
- v := inQb.Get(utils.AsBytes(i.ID()))
-
- pushB := tx.Bucket(utils.AsBytes(PushBucket))
-
- err = pushB.Put(utils.AsBytes(i.ID()), v)
- if err != nil {
- _ = tx.Rollback()
- return errors.E(op, err)
- }
-
- err = inQb.Delete(utils.AsBytes(i.ID()))
- if err != nil {
- _ = tx.Rollback()
- return errors.E(op, err)
- }
-
- return tx.Commit()
-}
-
-/*
-Requeue algorithm:
-1. Rewrite item headers and delay.
-2. Begin writable transaction on attached to the item db.
-3. Delete item from the InQueueBucket
-4. Handle items with the delay:
- 4.1. Get DelayBucket
- 4.2. Make a key by adding the delay to the time.Now() in RFC3339 format
- 4.3. Put this key with value to the DelayBucket
-5. W/o delay, put the key with value to the PushBucket (requeue)
-*/
-func (i *Item) Requeue(headers map[string][]string, delay int64) error {
- const op = errors.Op("boltdb_item_requeue")
- i.Headers = headers
- i.Options.Delay = delay
-
- tx, err := i.Options.db.Begin(true)
- if err != nil {
- return errors.E(op, err)
- }
-
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
- err = inQb.Delete(utils.AsBytes(i.ID()))
- if err != nil {
- return errors.E(op, i.rollback(err, tx))
- }
-
- // encode the item
- buf := new(bytes.Buffer)
- enc := gob.NewEncoder(buf)
- err = enc.Encode(i)
- val := make([]byte, buf.Len())
- copy(val, buf.Bytes())
- buf.Reset()
-
- if delay > 0 {
- delayB := tx.Bucket(utils.AsBytes(DelayBucket))
- tKey := time.Now().UTC().Add(time.Second * time.Duration(delay)).Format(time.RFC3339)
-
- if err != nil {
- return errors.E(op, i.rollback(err, tx))
- }
-
- err = delayB.Put(utils.AsBytes(tKey), val)
- if err != nil {
- return errors.E(op, i.rollback(err, tx))
- }
-
- return tx.Commit()
- }
-
- pushB := tx.Bucket(utils.AsBytes(PushBucket))
- if err != nil {
- return errors.E(op, i.rollback(err, tx))
- }
-
- err = pushB.Put(utils.AsBytes(i.ID()), val)
- if err != nil {
- return errors.E(op, i.rollback(err, tx))
- }
-
- return tx.Commit()
-}
-
-func (i *Item) attachDB(db *bbolt.DB, active, delayed *uint64) {
- i.Options.db = db
- i.Options.active = active
- i.Options.delayed = delayed
-}
-
-func (i *Item) rollback(err error, tx *bbolt.Tx) error {
- errR := tx.Rollback()
- if errR != nil {
- return errors.Errorf("transaction commit error: %v, rollback failed: %v", err, errR)
- }
- return errors.Errorf("transaction commit error: %v", err)
-}
-
-func fromJob(job *job.Job) *Item {
- return &Item{
- Job: job.Job,
- Ident: job.Ident,
- Payload: job.Payload,
- Headers: job.Headers,
- Options: &Options{
- Priority: job.Options.Priority,
- Pipeline: job.Options.Pipeline,
- Delay: job.Options.Delay,
- },
- }
-}
diff --git a/plugins/boltdb/boltjobs/listener.go b/plugins/boltdb/boltjobs/listener.go
deleted file mode 100644
index 081d3f57..00000000
--- a/plugins/boltdb/boltjobs/listener.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package boltjobs
-
-import (
- "bytes"
- "encoding/gob"
- "sync/atomic"
- "time"
-
- "github.com/spiral/roadrunner/v2/utils"
- bolt "go.etcd.io/bbolt"
-)
-
-func (c *consumer) listener() {
- tt := time.NewTicker(time.Millisecond)
- defer tt.Stop()
- for {
- select {
- case <-c.stopCh:
- c.log.Info("boltdb listener stopped")
- return
- case <-tt.C:
- if atomic.LoadUint64(c.active) > uint64(c.prefetch) {
- time.Sleep(time.Second)
- continue
- }
- tx, err := c.db.Begin(true)
- if err != nil {
- c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err)
- continue
- }
-
- b := tx.Bucket(utils.AsBytes(PushBucket))
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
-
- // get first item
- k, v := b.Cursor().First()
- if k == nil && v == nil {
- _ = tx.Commit()
- continue
- }
-
- buf := bytes.NewReader(v)
- dec := gob.NewDecoder(buf)
-
- item := &Item{}
- err = dec.Decode(item)
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- err = inQb.Put(utils.AsBytes(item.ID()), v)
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- // delete key from the PushBucket
- err = b.Delete(k)
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- err = tx.Commit()
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- // attach pointer to the DB
- item.attachDB(c.db, c.active, c.delayed)
- // as the last step, after commit, put the item into the PQ
- c.pq.Insert(item)
- }
- }
-}
-
-func (c *consumer) delayedJobsListener() {
- tt := time.NewTicker(time.Second)
- defer tt.Stop()
-
- // just some 90's
- loc, err := time.LoadLocation("UTC")
- if err != nil {
- c.log.Error("failed to load location, delayed jobs won't work", "error", err)
- return
- }
-
- var startDate = utils.AsBytes(time.Date(1990, 1, 1, 0, 0, 0, 0, loc).Format(time.RFC3339))
-
- for {
- select {
- case <-c.stopCh:
- c.log.Info("boltdb listener stopped")
- return
- case <-tt.C:
- tx, err := c.db.Begin(true)
- if err != nil {
- c.log.Error("failed to begin writable transaction, job will be read on the next attempt", "error", err)
- continue
- }
-
- delayB := tx.Bucket(utils.AsBytes(DelayBucket))
- inQb := tx.Bucket(utils.AsBytes(InQueueBucket))
-
- cursor := delayB.Cursor()
- endDate := utils.AsBytes(time.Now().UTC().Format(time.RFC3339))
-
- for k, v := cursor.Seek(startDate); k != nil && bytes.Compare(k, endDate) <= 0; k, v = cursor.Next() {
- buf := bytes.NewReader(v)
- dec := gob.NewDecoder(buf)
-
- item := &Item{}
- err = dec.Decode(item)
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- err = inQb.Put(utils.AsBytes(item.ID()), v)
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- // delete key from the PushBucket
- err = delayB.Delete(k)
- if err != nil {
- c.rollback(err, tx)
- continue
- }
-
- // attach pointer to the DB
- item.attachDB(c.db, c.active, c.delayed)
- // as the last step, after commit, put the item into the PQ
- c.pq.Insert(item)
- }
-
- err = tx.Commit()
- if err != nil {
- c.rollback(err, tx)
- continue
- }
- }
- }
-}
-
-func (c *consumer) rollback(err error, tx *bolt.Tx) {
- errR := tx.Rollback()
- if errR != nil {
- c.log.Error("transaction commit error, rollback failed", "error", err, "rollback error", errR)
- }
-
- c.log.Error("transaction commit error, rollback succeed", "error", err)
-}
diff --git a/plugins/boltdb/boltkv/config.go b/plugins/boltdb/boltkv/config.go
deleted file mode 100644
index 56d00674..00000000
--- a/plugins/boltdb/boltkv/config.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package boltkv
-
-type Config struct {
- // File is boltDB file. No need to create it by your own,
- // boltdb driver is able to create the file, or read existing
- File string
- // Bucket to store data in boltDB
- bucket string
- // db file permissions
- Permissions int
- // timeout
- Interval int `mapstructure:"interval"`
-}
-
-// InitDefaults initializes default values for the boltdb
-func (s *Config) InitDefaults() {
- s.bucket = "default"
-
- if s.File == "" {
- s.File = "rr.db" // default file name
- }
-
- if s.Permissions == 0 {
- s.Permissions = 0777 // free for all
- }
-
- if s.Interval == 0 {
- s.Interval = 60 // default is 60 seconds timeout
- }
-}
diff --git a/plugins/boltdb/boltkv/driver.go b/plugins/boltdb/boltkv/driver.go
deleted file mode 100644
index 656d572e..00000000
--- a/plugins/boltdb/boltkv/driver.go
+++ /dev/null
@@ -1,472 +0,0 @@
-package boltkv
-
-import (
- "bytes"
- "encoding/gob"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
- "github.com/spiral/roadrunner/v2/utils"
- bolt "go.etcd.io/bbolt"
-)
-
-const (
- RootPluginName string = "kv"
-)
-
-type Driver struct {
- clearMu sync.RWMutex
- // db instance
- DB *bolt.DB
- // name should be UTF-8
- bucket []byte
- log logger.Logger
- cfg *Config
-
- // gc contains keys with timeouts
- gc sync.Map
- // default timeout for cache cleanup is 1 minute
- timeout time.Duration
-
- // stop is used to stop keys GC and close boltdb connection
- stop chan struct{}
-}
-
-func NewBoltDBDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) {
- const op = errors.Op("new_boltdb_driver")
-
- if !cfgPlugin.Has(RootPluginName) {
- return nil, errors.E(op, errors.Str("no kv section in the configuration"))
- }
-
- d := &Driver{
- log: log,
- stop: make(chan struct{}),
- }
-
- err := cfgPlugin.UnmarshalKey(key, &d.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // add default values
- d.cfg.InitDefaults()
-
- d.bucket = []byte(d.cfg.bucket)
- d.timeout = time.Duration(d.cfg.Interval) * time.Second
- d.gc = sync.Map{}
-
- db, err := bolt.Open(d.cfg.File, os.FileMode(d.cfg.Permissions), &bolt.Options{
- Timeout: time.Second * 20,
- NoGrowSync: false,
- NoFreelistSync: false,
- ReadOnly: false,
- NoSync: false,
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- d.DB = db
-
- // create bucket if it does not exist
- // tx.Commit invokes via the db.Update
- err = db.Update(func(tx *bolt.Tx) error {
- const upOp = errors.Op("boltdb_plugin_update")
- _, err = tx.CreateBucketIfNotExists([]byte(d.cfg.bucket))
- if err != nil {
- return errors.E(op, upOp)
- }
- return nil
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- go d.startGCLoop()
-
- return d, nil
-}
-
-func (d *Driver) Has(keys ...string) (map[string]bool, error) {
- const op = errors.Op("boltdb_driver_has")
- d.log.Debug("boltdb HAS method called", "args", keys)
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- m := make(map[string]bool, len(keys))
-
- // this is readable transaction
- err := d.DB.View(func(tx *bolt.Tx) error {
- // Get retrieves the value for a key in the bucket.
- // Returns a nil value if the key does not exist or if the key is a nested bucket.
- // The returned value is only valid for the life of the transaction.
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return errors.E(op, errors.EmptyKey)
- }
- b := tx.Bucket(d.bucket)
- if b == nil {
- return errors.E(op, errors.NoSuchBucket)
- }
- exist := b.Get([]byte(keys[i]))
- if exist != nil {
- m[keys[i]] = true
- }
- }
- return nil
- })
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- d.log.Debug("boltdb HAS method finished")
- return m, nil
-}
-
-// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist or if the key is a nested bucket.
-// The returned value is only valid for the life of the transaction.
-func (d *Driver) Get(key string) ([]byte, error) {
- const op = errors.Op("boltdb_driver_get")
- // to get cases like " "
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
-
- var val []byte
- err := d.DB.View(func(tx *bolt.Tx) error {
- b := tx.Bucket(d.bucket)
- if b == nil {
- return errors.E(op, errors.NoSuchBucket)
- }
- val = b.Get([]byte(key))
-
- // try to decode values
- if val != nil {
- buf := bytes.NewReader(val)
- decoder := gob.NewDecoder(buf)
-
- var i string
- err := decoder.Decode(&i)
- if err != nil {
- // unsafe (w/o runes) convert
- return errors.E(op, err)
- }
-
- // set the value
- val = utils.AsBytes(i)
- }
- return nil
- })
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return val, nil
-}
-
-func (d *Driver) MGet(keys ...string) (map[string][]byte, error) {
- const op = errors.Op("boltdb_driver_mget")
- // defense
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string][]byte, len(keys))
-
- err := d.DB.View(func(tx *bolt.Tx) error {
- b := tx.Bucket(d.bucket)
- if b == nil {
- return errors.E(op, errors.NoSuchBucket)
- }
-
- buf := new(bytes.Buffer)
- var out []byte
- buf.Grow(100)
- for i := range keys {
- value := b.Get([]byte(keys[i]))
- buf.Write(value)
- // allocate enough space
- dec := gob.NewDecoder(buf)
- if value != nil {
- err := dec.Decode(&out)
- if err != nil {
- return errors.E(op, err)
- }
- m[keys[i]] = out
- buf.Reset()
- out = nil
- }
- }
-
- return nil
- })
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return m, nil
-}
-
-// Set puts the K/V to the bolt
-func (d *Driver) Set(items ...*kvv1.Item) error {
- const op = errors.Op("boltdb_driver_set")
- if items == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- // start writable transaction
- tx, err := d.DB.Begin(true)
- if err != nil {
- return errors.E(op, err)
- }
- defer func() {
- err = tx.Commit()
- if err != nil {
- errRb := tx.Rollback()
- if errRb != nil {
- d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb)
- }
- }
- }()
-
- b := tx.Bucket(d.bucket)
- // use access by index to avoid copying
- for i := range items {
- // performance note: pass a prepared bytes slice with initial cap
- // we can't move buf and gob out of loop, because we need to clear both from data
- // but gob will contain (w/o re-init) the past data
- buf := new(bytes.Buffer)
- encoder := gob.NewEncoder(buf)
- if errors.Is(errors.EmptyItem, err) {
- return errors.E(op, errors.EmptyItem)
- }
-
- // Encode value
- err = encoder.Encode(&items[i].Value)
- if err != nil {
- return errors.E(op, err)
- }
- // buf.Bytes will copy the underlying slice. Take a look in case of performance problems
- err = b.Put([]byte(items[i].Key), buf.Bytes())
- if err != nil {
- return errors.E(op, err)
- }
-
- // if there are no errors, and TTL > 0, we put the key with timeout to the hashmap, for future check
- // we do not need mutex here, since we use sync.Map
- if items[i].Timeout != "" {
- // check correctness of provided TTL
- _, err := time.Parse(time.RFC3339, items[i].Timeout)
- if err != nil {
- return errors.E(op, err)
- }
- // Store key TTL in the separate map
- d.gc.Store(items[i].Key, items[i].Timeout)
- }
-
- buf.Reset()
- }
-
- return nil
-}
-
-// Delete all keys from DB
-func (d *Driver) Delete(keys ...string) error {
- const op = errors.Op("boltdb_driver_delete")
- if keys == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for _, key := range keys {
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return errors.E(op, errors.EmptyKey)
- }
- }
-
- // start writable transaction
- tx, err := d.DB.Begin(true)
- if err != nil {
- return errors.E(op, err)
- }
-
- defer func() {
- err = tx.Commit()
- if err != nil {
- errRb := tx.Rollback()
- if errRb != nil {
- d.log.Error("during the commit, Rollback error occurred", "commit error", err, "rollback error", errRb)
- }
- }
- }()
-
- b := tx.Bucket(d.bucket)
- if b == nil {
- return errors.E(op, errors.NoSuchBucket)
- }
-
- for _, key := range keys {
- err = b.Delete([]byte(key))
- if err != nil {
- return errors.E(op, err)
- }
- }
-
- return nil
-}
-
-// MExpire sets the expiration time to the key
-// If key already has the expiration time, it will be overwritten
-func (d *Driver) MExpire(items ...*kvv1.Item) error {
- const op = errors.Op("boltdb_driver_mexpire")
- for i := range items {
- if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" {
- return errors.E(op, errors.Str("should set timeout and at least one key"))
- }
-
- // verify provided TTL
- _, err := time.Parse(time.RFC3339, items[i].Timeout)
- if err != nil {
- return errors.E(op, err)
- }
-
- d.gc.Store(items[i].Key, items[i].Timeout)
- }
- return nil
-}
-
-func (d *Driver) TTL(keys ...string) (map[string]string, error) {
- const op = errors.Op("boltdb_driver_ttl")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string]string, len(keys))
-
- for i := range keys {
- if item, ok := d.gc.Load(keys[i]); ok {
- // a little bit dangerous operation, but user can't store value other that kv.Item.TTL --> int64
- m[keys[i]] = item.(string)
- }
- }
- return m, nil
-}
-
-func (d *Driver) Clear() error {
- err := d.DB.Update(func(tx *bolt.Tx) error {
- err := tx.DeleteBucket(d.bucket)
- if err != nil {
- d.log.Error("boltdb delete bucket", "error", err)
- return err
- }
-
- _, err = tx.CreateBucket(d.bucket)
- if err != nil {
- d.log.Error("boltdb create bucket", "error", err)
- return err
- }
-
- return nil
- })
-
- if err != nil {
- d.log.Error("clear transaction failed", "error", err)
- return err
- }
-
- d.clearMu.Lock()
- d.gc = sync.Map{}
- d.clearMu.Unlock()
-
- return nil
-}
-
-func (d *Driver) Stop() {
- d.stop <- struct{}{}
-}
-
-// ========================= PRIVATE =================================
-
-func (d *Driver) startGCLoop() { //nolint:gocognit
- go func() {
- t := time.NewTicker(d.timeout)
- defer t.Stop()
- for {
- select {
- case <-t.C:
- d.clearMu.RLock()
-
- // calculate current time before loop started to be fair
- now := time.Now()
- d.gc.Range(func(key, value interface{}) bool {
- const op = errors.Op("boltdb_plugin_gc")
- k := key.(string)
- v, err := time.Parse(time.RFC3339, value.(string))
- if err != nil {
- return false
- }
-
- if now.After(v) {
- // time expired
- d.gc.Delete(k)
- d.log.Debug("key deleted", "key", k)
- err := d.DB.Update(func(tx *bolt.Tx) error {
- b := tx.Bucket(d.bucket)
- if b == nil {
- return errors.E(op, errors.NoSuchBucket)
- }
- err := b.Delete(utils.AsBytes(k))
- if err != nil {
- return errors.E(op, err)
- }
- return nil
- })
- if err != nil {
- d.log.Error("error during the gc phase of update", "error", err)
- return false
- }
- }
- return true
- })
-
- d.clearMu.RUnlock()
- case <-d.stop:
- err := d.DB.Close()
- if err != nil {
- d.log.Error("error")
- }
- return
- }
- }
- }()
-}
diff --git a/plugins/boltdb/doc/boltjobs.drawio b/plugins/boltdb/doc/boltjobs.drawio
deleted file mode 100644
index 7d1f3531..00000000
--- a/plugins/boltdb/doc/boltjobs.drawio
+++ /dev/null
@@ -1 +0,0 @@
-<mxfile host="Electron" modified="2021-08-31T09:34:11.357Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/91.0.4472.164 Electron/13.2.3 Safari/537.36" etag="KiNZAPNeIcd5kV3EE5lF" version="14.6.13" type="device"><diagram id="NuJwivb--D1hymDgb9NQ" name="Page-1">7V1bc5s4GP01nmkfkgHE9TF2km13up20me1uH2WQbRJsuYATe3/9ijtICiHmIrvBnWmNEAjrfNejT3QCZuv9Hz7crv7CDvImiuTsJ+B6oigKABr5J2o5JC0yUJWkZem7TtpWNNy7/6G0UUpbd66DgkrHEGMvdLfVRhtvNsgOK23Q9/FztdsCe9VRt3CJmIZ7G3ps6z+uE67SVl2SihOfkLtcZUMr2Zk1zHqnDcEKOvi51ARuJmDmYxwm39b7GfKi6csmJrnu9oWz+ZP5aBM2ucDcf0ZP8pX6w3iw/K8/fijw1/VFikYQHrJfjBwyAekh9sMVXuIN9G6K1qmPdxsHRXeVyFHR5wvGW9Iok8YHFIaHFE24CzFpWoVrLz1LHtg//Btdf6llhz/T28UH1/vK0SE9WuBNeAvXrhc1zGBgQ8eF5OlnRO7S0+mYMiDHyU+Lfs+LM5aJFPSXKKyZJjnHi4g6wmtEHotc5yMPhu5T9f4wFbll3i+/9A67ZGRFSvVDV1PROORiVb1F8lzpVWVoX7uRSt0owDvfRsyNyJfS7ymaYsl5gxRl2jqwGO3dsCRF5Ohn6UwhQ9HBoSxQpyR6CTKviV4DEVVbimhFJt4sAMl9n6C3S0e6+/v+E2n5fjdjRKMK/PPKDdH9FsaT8EzcSBXkN078wvW8GfawHw8F5tA2nag9CH38iEpnFKCqmpND9YT8EO3rwWInN71AM6vqp2R6/FxyGZkbWJW8Ba2mneFhiNRHuaSNhW7y9VGcXoGGeqWJ1CvzXHAs7GrFqhZG9uTsalP8hdpVa8RfNP66SPwB41enJPm5nrb0qVUnuSAf0+Q5SaADCzjHwtTKp+pVl6qZGutSZY5LBX25VFlmJv1EdVGcTrW1lfzsxqDCK7VhmtRVdqOywe0uWJGW6c5+RGG3uqhFf3i6qMefFL+yjsYfITqqaBQwoGHcSwPYmZIqQh1mNRGVapX0pUQ050dqHGZNwmM7aG7OubacfG5vX5eTDs2Bfg4hlgxOR2Qachfy5ExiLFk5hyBL1hgDP8PbaPwHPOcKxxc4R14VUOi5yw35bpNJQkTlppFldW3oXaUn1q7jJLKDAvc/OI/vF8GzjfxW/Ju06US75ul4E43lWPKUFE/HmuRMdBnKGp140e4TkbOUdNJa+vYLWbIqPuRCoXwDXiwC1A9rqYvVfKO56tMS0RONKUL1ZaOXoJEkB1QGMXDUmE0AGzZ6bhCiDTESivRBltbBx24jyAXSbZsXATiGNZeOdgitIkUmhOdFiuqQkaLMRvXyZTwHheWXiO6Sv1aI/P15822HdigL+XWPPOB07ldw03/tosW8eAIvgngGr0gHGWz38TRm58m3ZfSvEg14jTwURiMsfLzOh4tE5YX0gqAQ8n2PhxZhO88TmasghZ4RoQ3eIDHSQ5EBwGSlJ5eUQfh1WSgx+5ag8Qw9h9HUc1jDeA59aM9hsJ7j010k7th/RH4QzSvGXkuvQckFE79SbgXJjoYMnluxdANAvamItDIEskQtdGuG6JU2RSgtKF0qw6x9vzkd6cYUWA1NQamgRkACmeVCp+8NfmcReCFnHWihRjCL9IZcsisZYByBFH9OVDrEcowW49G/o2CLNwEa1os7EJkLbnKo2yaaL8R4cV28FxdaMCOVdLf5QnvLdYPBdDQD8rRJ4OwpSyr6FdqPw6rnwrQRn7uZm5qqNbatrdTTtKraaUrCtZOl0m58n0xNc2qkC1q+KTnSBXXfHC4rq4xI4TI41JrGQYvm2bsLhQRnREdFQsq5GNOhypbiS698Hx5KHVKdaE6dWDq1J4G6QDFr+5MvySN0yq8obFXV1dCW3tGQ6ag8S28qc6ALolPEm3ogOJU+jk4xWlmPU0ikFLWhXWmdZh9nWHJ3lUmqVW9YZBnUXtCPZQGCV5TbkwC/t+y2ZQmPlF2qTM6iN+pRF1j1/XsSXbEZ8HslsBrLLhBKYCmcGlgfh9h+n8tQ8smlyOrZVKe32SlyatqrNVReVejyFFveeE3GjcbvoYC9PcM8QKF642Xj3sqPsui0Uh0WxQDzmHQsVx4dVQr0+/JdJDyqYsmxvQpvZ1B/hBdLT+ZFXX/GdWSV0q5KJdll3seN6jocMquXI8xlFc2CYJWFGRiDwnxCe0veT4AMGgfIpkgfq57hAmI1/jJaB2C3t5Jknqh0iKHDKXrVMl8hraj+hqZRgtlH4s+mdt/Rr8hDtYwL37r2CR0D8tc+VV2T1GEyO5pntIS/yiN7YdY5WZauCXERlsVsaFmEvh8ke0o2txtVV5Y04bqritTd41gZSndbx4wCdDfDs/eNx0dFBQZd/SbVRwV0f1Wu7w/q+/cTRagsn5GRTD1vd2tdMtUD36Q14ZsG3e6mM/B83pDjb0mcJ817IAJbVzgMQAQackMX0d8bK7gePCGPHl4hjxi83iVRRJUQ8dZilEGJIpUl3JOtpSsUc7tOahh9uFlGmH6A8T0J2kH82OEqsecJ4nGvj+Udp6XNowlnjPZb1ydqmt+jLXs83EbSYWVFpdbtdB6paA0aIdao/6j4tWAa1EKAxdsVzHOy/Sk+u80jUdDEkEeaHan7V/w8iZ5hmtkCot3SIzpUFTcOn0a4ixfCqVW4uekdV3l7w1tjg152Gf7ssKLeLxblbzHn34WvpsqDAcf+8qKv/hBkX1zM0m0jgiUEqWU5VTiCCoMguxVzRLBA0KJiII2TAg2LIFuOz9JmI4IFgnL+4tMsjOVs+RkWQnYNia3jHyEsQUjvvODt2hoWQjZpHSGsh5AiHgzOUuGwELIk3whhvSEFp6aF7Kt/2C0lI4SleJR6NRiX/RsUQZbQYUuDRgQLBOktoDonrR8WQZbFGXWwVgfp/Uqic8LMJowINq6doBdRLNEQssTMaEbrIKTfsMplwwdFkCVm2JdmjggWCOpKA4K7IwjJYfEfTyY1IMV/4Alu/gc=</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/boltdb/doc/job_lifecycle.md b/plugins/boltdb/doc/job_lifecycle.md
deleted file mode 100644
index 1424e586..00000000
--- a/plugins/boltdb/doc/job_lifecycle.md
+++ /dev/null
@@ -1,9 +0,0 @@
-### Job lifecycle
-
-There are several boltdb buckets:
-
-1. `PushBucket` - used for pushed jobs via RPC.
-2. `InQueueBucket` - when the job consumed from the `PushBucket`, in the same transaction, it copied into the priority queue and
-get into the `InQueueBucket` waiting to acknowledgement.
-3. `DelayBucket` - used for delayed jobs. RFC3339 used as a timestamp to track delay expiration.
-
diff --git a/plugins/boltdb/plugin.go b/plugins/boltdb/plugin.go
deleted file mode 100644
index ad98cf3c..00000000
--- a/plugins/boltdb/plugin.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package boltdb
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/jobs"
- "github.com/spiral/roadrunner/v2/common/kv"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- "github.com/spiral/roadrunner/v2/plugins/boltdb/boltjobs"
- "github.com/spiral/roadrunner/v2/plugins/boltdb/boltkv"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- PluginName string = "boltdb"
-)
-
-// Plugin BoltDB K/V storage.
-type Plugin struct {
- cfg config.Configurer
- // logger
- log logger.Logger
-}
-
-func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- p.log = log
- p.cfg = cfg
- return nil
-}
-
-// Serve is noop here
-func (p *Plugin) Serve() chan error {
- return make(chan error, 1)
-}
-
-func (p *Plugin) Stop() error {
- return nil
-}
-
-// Name returns plugin name
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (p *Plugin) Available() {}
-
-func (p *Plugin) KVConstruct(key string) (kv.Storage, error) {
- const op = errors.Op("boltdb_plugin_provide")
- st, err := boltkv.NewBoltDBDriver(p.log, key, p.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return st, nil
-}
-
-// JOBS bbolt implementation
-
-func (p *Plugin) JobsConstruct(configKey string, e events.Handler, queue priorityqueue.Queue) (jobs.Consumer, error) {
- return boltjobs.NewBoltDBJobs(configKey, p.log, p.cfg, e, queue)
-}
-
-func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, queue priorityqueue.Queue) (jobs.Consumer, error) {
- return boltjobs.FromPipeline(pipe, p.log, p.cfg, e, queue)
-}
diff --git a/plugins/broadcast/config.go b/plugins/broadcast/config.go
deleted file mode 100644
index 9531025b..00000000
--- a/plugins/broadcast/config.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package broadcast
-
-/*
-
-# Global redis config (priority - 2)
-default:
- # redis configuration here
-
-websockets: # <----- one of possible subscribers
- path: /ws
- broker: default # <------ broadcast broker to use --------------- |
- | match
-broadcast: # <-------- broadcast entry point plugin |
- default: # <----------------------------------------------------- |
- driver: redis
- # local redis config (priority - 1)
- test:
- driver: memory
-
-
-priority local -> global
-*/
-
-// Config ...
-type Config struct {
- Data map[string]interface{} `mapstructure:"broadcast"`
-}
diff --git a/plugins/broadcast/doc/broadcast_arch.drawio b/plugins/broadcast/doc/broadcast_arch.drawio
deleted file mode 100644
index fd5ff1f9..00000000
--- a/plugins/broadcast/doc/broadcast_arch.drawio
+++ /dev/null
@@ -1 +0,0 @@
-<mxfile host="Electron" modified="2021-06-18T09:34:25.915Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/89.0.4389.128 Electron/12.0.9 Safari/537.36" etag="THNfOcV33EQGG0gzo1UK" version="14.6.13" type="device"><diagram id="xG4Au6HO45p6fae_AhkE" name="Page-1">7V1bc6M4Fv41rk1vVVIg7o+Jk8l01fR2Np7e7n7a4iLbbDB4AMdJ//qVQGCQZBsHEMSTviRGIAznfj4dSRNlunq5j+318kvkwWACJO9lotxOALBkE/3EDa95AzAVJW9ZxL6Xt8m7hpn/C5JGibRufA8mtQvTKApSf11vdKMwhG5aa7PjONrWL5tHQf1b1/YCMg0z1w7Y1u++ly7Ja0iStDvxO/QXy5Q+s7KLq0lDsrS9aFtpUu4myjSOojT/tHqZwgBTryBM3u+3PWfLJ4thmDbpMFV1/fPNfXhp/FAuZ+Hz8kd0f6mr+W2e7WBDXpk8bfpa0CCONqEH8V2kiXKzXfopnK1tF5/dIq6jtmW6CtCRjD7O/SCYRkEUZ30Vz4bm3EXtSRpHT7ByRndN6MzRGfY9yKs9wziFL5Um8l73MFrBNH5Fl5CzikloTMRM1cjxtsazvG1ZZZdFGm0iJ4vy3jtKog+EmCcQVtP7Jexcw3+5hM3+4B5RmFba8z/dEFyW5DrFDYOluGpyKK7oWl8U186b4qBO8ctSoAckec/WY2iSU0KuSNrQFLcY+kIPOSxyGMXpMlpEoR3c7Vpv6hzYXfNHFK0J3f8H0/SVeF97k0Z1rsAXP/2Bu19p5OgnuRn+fPtSPXgtDkL0upVO+PBncT98sOuWHRX99vItiTaxCw+QpggP7HgB00PXEYZhwh0UgxgGduo/1yMBHkdJ14fIR89cio8BpCtZBaZmkJ81WdIkyvPkz03uQUlJ+VBvF5xCIoeSHFAVHbmh6FQFpyJHe0SHNh6mC11u6OGYGo4QRAhbEV91LGzXcWy/Vi5YY7FJ9suibmo16dNVKlakr5fbXW/Kh69XJKnV9ZqlUUqSU6RTlSkcxs653Tx+vb6dXs/+7NbLtRDU5t5M1ygSciI2WeU4M62vEFkd0iLJFXO082unGSRwqkWazwGf0Z7u6JreyiKBpu6va4PUSgYAo2OPd7efZ6jp4dvN7NtNt4o2h/oe+huWI3WkaCoVqJcRYlXRgEhFG9T1d6FoY9Iz5V3qmcLo2Ze7L18ff56TonE9mlBFM967ohlj0jT9XWoai/tdf/n3w0T5Df87I23T9aG1zWQojV49wKMA6Muy7vaz7Qe2g4iMXn7jJBsHffBiJANxwrAA0SGt09kO/EWIPruIahCR9AZTy3ft4JqcWPmelysxTPxf2RflDCT5GLqvdjPRbvG9kN4muQrLDKvCKIT9IOGaynKJj4T3xSVZGdIojhyykpqauHHZuOK5q6lxHNmeaydY+XysLnNsvbo0c54GTU/lmTkTOIp+0NeckCbrb4ze1d4USP9QoGN6cVyBtK4V6E04nEE50WL8cR/uZRlmq+v1I9fLsiUd6tAPsKaBYSXaaC7SAkRT7dy2v000DaUmCbJ5ouhQHXoSnUGjiXqKZTTNsUZnDYE1CpEzKQlSSay4T+To62VZO1VGqR49DRywIwcToAc4u1jXZFf/a4Orf25WiGE+Siiu0Vlp/YJ+ZnIg5e2XKRZTfE6tnMPpyiXJUPA5kqSU90SfFuS3vcKRVcAeVQI10lzvUhxlFztx5ZZ5S9FQBn/ZA5Av+s2hr0dta7ptydy1OxKRcwGcp8fpl13FUO8eprdZvnjxBF+zB4v9cPEJfZptnEdoe7U3rrwdZaSSpb3GH+OsSK1qWyJ083mQFYbhgHdPWNwh7nJCLEyN6imclL+s1KvGwkZvsfBH/cPREPe49RdUAMFYX42GgnqueZDZirDv0Eki9wmmLP7TJjd1ZM+bSzx9lCVDsToCd2RZoRFv44otSeKmp0pfFUmywRD5c+inFxhqK/3CJ4baZ4G2MUV5OuCgBWLhNhYVfYQuxFoMJKcRSHOmrOEgobIukjWlJX9PuUvVdzUYH3q7+yrIftx9AUHuC9BYuiJdYYDd0sjPRs7s1KRILqMnCsLfm+SwHUxKVnvIccCg1TvjjsSai3I/qCQ3EruSJUVVTWBopg7qUI9Kz4LoOS4r6FPxUV+3Ye6fnnBCJSUoTfKjMHNTk2x2z9xfnEXEJnO8kNDhBDDseFzNC4GGXkiue6GjdXctVBc0Vd3Ok6h2XAWMShW4hOc/F7DEdpf9YMyjxC0ql3B6lXBQ6CTrHDwhipoBJx6c25sgbXM7OwiiLfT+G8V+CfLssJh/1mCZN3/JOpuQV781us82OX77NnaniJ1zFGogM6RQM1lUi5M28op/e0NywMeo5tGw4LgRMkTFD6Z1tZvIohbfW+CCouMHFtcpkYarHWJbGilioTBqe0lj21Uc9/2FFxJV1K8qbHgBuDPUetNrFg6aIXHAwgRfoIs0EhF6momk7aFfK5gk9gIiIywFWIvfIRPoGE/hIQ1iYzzzw7jupY3V0Lgq/UzdajBIWc47FWVPLUZnv6bLLBF7CDaLLAVjsavx13IxmolS4OEBc4XNfv++gLksWRxjKRQxV9jR+g/EPFcfHlghFDJXwJCObOyQeeGhjrsyXZAroyFzyxoEMpeLVXgaY+aohwDQXPmohe9AmE1xcdkB0NwSXMxQ0GdI0NyTXAgBL2zTdE02+kqorKYT+3pLqJRhV8+o+aHxgeZK05l63adU7bjKW6OLgZLzVKg7vDyFSQpa3GxfNaY40FqQGaBBa0sbGrRWBp3gO3L/bTY1Av0Un58KWpui/TdbmDU20FqQXtOgtcmBAMSC1goLgM1g/Nzx/MVhgieTs0yg2OCpuPGH1dw/+ev4LDFRKTyNRtNi0bOZVFk0ro5Fs4hQq7UTbM/yuCtM2lCVFdCTUurDw9AqYAj9N4Khafhn8LptlV2h5wOFrkX4g4HQ6ogWqhsfCK02Tf5VUbgdDUKDOgYtm2ojj9YahVZOxKCJ69t7PVPEVbu+H8RaHXjZ2FHHbk0lv8h3BkasL2kL2Xcox8JrwhFr0fHdcEnWsNDUyBFqtSk41X2a1Y6rLGi0D6HOs6NprkLdotXyKTcbHJ0WpPK0Ny5nGx9Fp3vL6D4mxx91wcd9dT+r8ZyKTl/Kgp11QaARw9OCFJuGpy85K8CJhac1FhPrHp4eCAm7lAcf3NfPvipH5M4YmtHQ0o5rSUuNnbfw+DCt4237YptkbYdFWwnVXT1snMBPlhdkhsOnSiRU7dCpEkPZ06DB46ylG4rd1bpASn1dIIM3xGQIVeFBl1Y4SYXRwQOMffTe2I8OFQ41zYe0zmeYtdNSNh/aJJwwo1hua7MKrl085WgHff9hOzB4iBI/Qx6UWydK02g1YbHxbEmx2kpdmzTwQ6RUxVaTB3nQXJ0Mido8h7N7ocLRJr03bWKBmxkMvUk5WwtLToS/Gf0nZo7DhLMYj1AokNbgrQfMs3W9DUforKd6iKNn38Nsqa3ElzMpqg2fnum65zq1QRRoulRdF2y6MR7+9f3PG/mv6eNqMdV/RL/Pvg27FwS131rjqBIICSspZ3LUhXHpyymc4F5ntvRX/PRdo5YJlzXaGneXr/Nff9ApnvVByaZ4b0v5aryXsFD5kkUFRNxvx/1HY2aabuvYKnkdqRSALszMqUPOjBUy9GNjzvLhHq1HkQ9Rkb9dRJcYVlMPxAQWHOnYG2to9KxijbeaSl8QFt8MMATO8dpMAhc+IodNBloTdwlX9miDwFaMAQxjOJlUb7VDXMaMqHRoXFvvdWKdOXvx7bc/Q7loTmEftcUljSt+Xq0DuIKZ6cf59Szbt2oC8NDKw2wahYi4mwzW6NR4Nt1GrGMd5S0A0Nc+tFwGsasJkX3EkvzbjhH9LGwnjXPw+SLUdrIoVIUP6ISLQXRpXWIfXuHyWI1y7ARi/CpzgekS7vrh1rzuaFP6yGyp/8ZMJlUI42cxoDASWee4Ry7S2MXIJpfFA1csGG/KXoxa+rJziSN0kByYf3wOskhJPoKkYWWgn0IXnV7dSpOkq7w8VdJNC7+uSNyMHUCa3T3+544ThIX+biPRfPNQHAqgmMpd4vYYcirnkjV0/bmfeRV/52v6KGwVEq0xqa7atFqji8Epfqo78GzCt/gMqtD11F2WBUNejTGvLuzFyXuGGSplTo7sAWape0CBxh10st5ovxAZZxUyKk2ULrBN8lHc+SszMHY2JJvg1TWh94m1YLOyuv4Cvtg4p/xErdjcxh4dWSKiqci2Mk/0yLms6iwSB4rlYrteIoLPx/e4rrHRBpBvXHPUjXUCY7ZOLL5ebJu+19rQ4dHJHQz5cIdyT6I9HXqyZywk3dPeW43lr5WpKXeO3PkRkXN++DRmK0Hu8wnDq9qO93MMWpFQdIXfqZh3ddAbnAWspZu0M+dVc/Pgxt5grWIPwyreuITuU4Uv58kKpmTCZJ21JZITgI25fuKVyHHZ72f8EHamTOnST6rMmWbPh/M/KTvxinzWZAc/niXvaKejS5xAS6gaAd6CCekmxvEuqUvMx03KAm4cLa+L9awvYuhl3CNpes5iEiWfJQ8NqmpbNjgrKyhCa+SKTcRqQy+VgRYphDCH6HN9w5rnQNfeJFj5thnoH4X/wMHOUxhtJyyUb6el5yOADsqXFvCKzZZKedn62YM7uI/9bPtBxjxSTOnkcE42PJQ9mPNafsFd6G0yqxCFQRY0zvMqzOoXZxxLEuhdNVnY7xzFkAlfAWtJVN440xsGIdBhHOGdfXaRLYoml18iD+Ir/g8=</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/broadcast/interface.go b/plugins/broadcast/interface.go
deleted file mode 100644
index eda3572f..00000000
--- a/plugins/broadcast/interface.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package broadcast
-
-import "github.com/spiral/roadrunner/v2/common/pubsub"
-
-type Broadcaster interface {
- GetDriver(key string) (pubsub.SubReader, error)
-}
diff --git a/plugins/broadcast/plugin.go b/plugins/broadcast/plugin.go
deleted file mode 100644
index 40263eaa..00000000
--- a/plugins/broadcast/plugin.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package broadcast
-
-import (
- "fmt"
- "sync"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- PluginName string = "broadcast"
- // driver is the mandatory field which should present in every storage
- driver string = "driver"
-
- // every driver should have config section for the local configuration
- conf string = "config"
-)
-
-type Plugin struct {
- sync.RWMutex
-
- cfg *Config
- cfgPlugin config.Configurer
- log logger.Logger
- // publishers implement Publisher interface
- // and able to receive a payload
- publishers map[string]pubsub.PubSub
- constructors map[string]pubsub.Constructor
-}
-
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("broadcast_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
- p.cfg = &Config{}
- // unmarshal config section
- err := cfg.UnmarshalKey(PluginName, &p.cfg.Data)
- if err != nil {
- return errors.E(op, err)
- }
-
- p.publishers = make(map[string]pubsub.PubSub)
- p.constructors = make(map[string]pubsub.Constructor)
-
- p.log = log
- p.cfgPlugin = cfg
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- return make(chan error, 1)
-}
-
-func (p *Plugin) Stop() error {
- return nil
-}
-
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.CollectPublishers,
- }
-}
-
-// CollectPublishers collect all plugins who implement pubsub.Publisher interface
-func (p *Plugin) CollectPublishers(name endure.Named, constructor pubsub.Constructor) {
- // key redis, value - interface
- p.constructors[name.Name()] = constructor
-}
-
-// Publish is an entry point to the websocket PUBSUB
-func (p *Plugin) Publish(m *pubsub.Message) error {
- p.Lock()
- defer p.Unlock()
-
- const op = errors.Op("broadcast_plugin_publish")
-
- // check if any publisher registered
- if len(p.publishers) > 0 {
- for j := range p.publishers {
- err := p.publishers[j].Publish(m)
- if err != nil {
- return errors.E(op, err)
- }
- }
- return nil
- } else {
- p.log.Warn("no publishers registered")
- }
-
- return nil
-}
-
-func (p *Plugin) PublishAsync(m *pubsub.Message) {
- // TODO(rustatian) channel here?
- go func() {
- p.Lock()
- defer p.Unlock()
- // check if any publisher registered
- if len(p.publishers) > 0 {
- for j := range p.publishers {
- err := p.publishers[j].Publish(m)
- if err != nil {
- p.log.Error("publishAsync", "error", err)
- // continue publishing to the other registered publishers
- continue
- }
- }
- } else {
- p.log.Warn("no publishers registered")
- }
- }()
-}
-
-func (p *Plugin) GetDriver(key string) (pubsub.SubReader, error) {
- const op = errors.Op("broadcast_plugin_get_driver")
-
- // choose a driver
- if val, ok := p.cfg.Data[key]; ok {
- // check type of the v
- // should be a map[string]interface{}
- switch t := val.(type) {
- // correct type
- case map[string]interface{}:
- if _, ok := t[driver]; !ok {
- panic(errors.E(op, errors.Errorf("could not find mandatory driver field in the %s storage", val)))
- }
- default:
- return nil, errors.E(op, errors.Str("wrong type detected in the configuration, please, check yaml indentation"))
- }
-
- // config key for the particular sub-driver broadcast.memcached.config
- configKey := fmt.Sprintf("%s.%s.%s", PluginName, key, conf)
-
- drName := val.(map[string]interface{})[driver]
-
- // driver name should be a string
- if drStr, ok := drName.(string); ok {
- if _, ok := p.constructors[drStr]; !ok {
- return nil, errors.E(op, errors.Errorf("no drivers with the requested name registered, registered: %s, requested: %s", p.publishers, drStr))
- }
-
- switch {
- // try local config first
- case p.cfgPlugin.Has(configKey):
- // we found a local configuration
- ps, err := p.constructors[drStr].PSConstruct(configKey)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // save the initialized publisher channel
- // for the in-memory, register new publishers
- p.publishers[configKey] = ps
-
- return ps, nil
- case p.cfgPlugin.Has(key):
- // try global driver section after local
- ps, err := p.constructors[drStr].PSConstruct(key)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // save the initialized publisher channel
- // for the in-memory, register new publishers
- p.publishers[configKey] = ps
-
- return ps, nil
- default:
- p.log.Error("can't find local or global configuration, this section will be skipped", "local: ", configKey, "global: ", key)
- }
- }
- }
- return nil, errors.E(op, errors.Str("could not find driver by provided key"))
-}
-
-func (p *Plugin) RPC() interface{} {
- return &rpc{
- plugin: p,
- log: p.log,
- }
-}
-
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-func (p *Plugin) Available() {}
diff --git a/plugins/broadcast/rpc.go b/plugins/broadcast/rpc.go
deleted file mode 100644
index 475076a0..00000000
--- a/plugins/broadcast/rpc.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package broadcast
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- websocketsv1 "github.com/spiral/roadrunner/v2/proto/websockets/v1beta"
-)
-
-// rpc collectors struct
-type rpc struct {
- plugin *Plugin
- log logger.Logger
-}
-
-// Publish ... msg is a proto decoded payload
-// see: root/proto
-func (r *rpc) Publish(in *websocketsv1.Request, out *websocketsv1.Response) error {
- const op = errors.Op("broadcast_publish")
-
- // just return in case of nil message
- if in == nil {
- out.Ok = false
- return nil
- }
-
- r.log.Debug("message published", "msg", in.String())
- msgLen := len(in.GetMessages())
-
- for i := 0; i < msgLen; i++ {
- for j := 0; j < len(in.GetMessages()[i].GetTopics()); j++ {
- if in.GetMessages()[i].GetTopics()[j] == "" {
- r.log.Warn("message with empty topic, skipping")
- // skip empty topics
- continue
- }
-
- tmp := &pubsub.Message{
- Topic: in.GetMessages()[i].GetTopics()[j],
- Payload: in.GetMessages()[i].GetPayload(),
- }
-
- err := r.plugin.Publish(tmp)
- if err != nil {
- out.Ok = false
- return errors.E(op, err)
- }
- }
- }
-
- out.Ok = true
- return nil
-}
-
-// PublishAsync ...
-// see: root/proto
-func (r *rpc) PublishAsync(in *websocketsv1.Request, out *websocketsv1.Response) error {
- // just return in case of nil message
- if in == nil {
- out.Ok = false
- return nil
- }
-
- r.log.Debug("message published", "msg", in.GetMessages())
-
- msgLen := len(in.GetMessages())
-
- for i := 0; i < msgLen; i++ {
- for j := 0; j < len(in.GetMessages()[i].GetTopics()); j++ {
- if in.GetMessages()[i].GetTopics()[j] == "" {
- r.log.Warn("message with empty topic, skipping")
- // skip empty topics
- continue
- }
-
- tmp := &pubsub.Message{
- Topic: in.GetMessages()[i].GetTopics()[j],
- Payload: in.GetMessages()[i].GetPayload(),
- }
-
- r.plugin.PublishAsync(tmp)
- }
- }
-
- out.Ok = true
- return nil
-}
diff --git a/plugins/config/config.go b/plugins/config/config.go
deleted file mode 100644
index b5807921..00000000
--- a/plugins/config/config.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package config
-
-import "time"
-
-// General is the part of the config plugin which contains general for the whole RR2 parameters
-// For example - http timeouts, headers sizes etc and also graceful shutdown timeout should be the same across whole application
-type General struct {
- // GracefulTimeout for the temporal and http
- GracefulTimeout time.Duration
-}
diff --git a/plugins/config/interface.go b/plugins/config/interface.go
deleted file mode 100644
index b3854e09..00000000
--- a/plugins/config/interface.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package config
-
-type Configurer interface {
- // UnmarshalKey takes a single key and unmarshals it into a Struct.
- //
- // func (h *HttpService) Init(cp config.Configurer) error {
- // h.config := &HttpConfig{}
- // if err := configProvider.UnmarshalKey("http", h.config); err != nil {
- // return err
- // }
- // }
- UnmarshalKey(name string, out interface{}) error
-
- // Unmarshal unmarshal the config into a Struct. Make sure that the tags
- // on the fields of the structure are properly set.
- Unmarshal(out interface{}) error
-
- // Get used to get config section
- Get(name string) interface{}
-
- // Overwrite used to overwrite particular values in the unmarshalled config
- Overwrite(values map[string]interface{}) error
-
- // Has checks if config section exists.
- Has(name string) bool
-
- // GetCommonConfig returns General section. Read-only
- GetCommonConfig() *General
-}
diff --git a/plugins/config/plugin.go b/plugins/config/plugin.go
deleted file mode 100755
index 918381c4..00000000
--- a/plugins/config/plugin.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package config
-
-import (
- "bytes"
- "fmt"
- "os"
- "strings"
-
- "github.com/spf13/viper"
- "github.com/spiral/errors"
-)
-
-const PluginName string = "config"
-
-type Viper struct {
- viper *viper.Viper
- Path string
- Prefix string
- Type string
- ReadInCfg []byte
- // user defined Flags in the form of <option>.<key> = <value>
- // which overwrites initial config key
- Flags []string
-
- CommonConfig *General
-}
-
-// Init config provider.
-func (v *Viper) Init() error {
- const op = errors.Op("config_plugin_init")
- v.viper = viper.New()
- // If user provided []byte data with config, read it and ignore Path and Prefix
- if v.ReadInCfg != nil && v.Type != "" {
- v.viper.SetConfigType("yaml")
- return v.viper.ReadConfig(bytes.NewBuffer(v.ReadInCfg))
- }
-
- // read in environment variables that match
- v.viper.AutomaticEnv()
- if v.Prefix == "" {
- return errors.E(op, errors.Str("prefix should be set"))
- }
-
- v.viper.SetEnvPrefix(v.Prefix)
- if v.Path == "" {
- return errors.E(op, errors.Str("path should be set"))
- }
-
- v.viper.SetConfigFile(v.Path)
- v.viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
-
- err := v.viper.ReadInConfig()
- if err != nil {
- return errors.E(op, err)
- }
-
- // automatically inject ENV variables using ${ENV} pattern
- for _, key := range v.viper.AllKeys() {
- val := v.viper.Get(key)
- v.viper.Set(key, parseEnv(val))
- }
-
- // override config Flags
- if len(v.Flags) > 0 {
- for _, f := range v.Flags {
- key, val, err := parseFlag(f)
- if err != nil {
- return errors.E(op, err)
- }
-
- v.viper.Set(key, val)
- }
- }
-
- return nil
-}
-
-// Overwrite overwrites existing config with provided values
-func (v *Viper) Overwrite(values map[string]interface{}) error {
- if len(values) != 0 {
- for key, value := range values {
- v.viper.Set(key, value)
- }
- }
-
- return nil
-}
-
-// UnmarshalKey reads configuration section into configuration object.
-func (v *Viper) UnmarshalKey(name string, out interface{}) error {
- const op = errors.Op("config_plugin_unmarshal_key")
- err := v.viper.UnmarshalKey(name, &out)
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-func (v *Viper) Unmarshal(out interface{}) error {
- const op = errors.Op("config_plugin_unmarshal")
- err := v.viper.Unmarshal(&out)
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-// Get raw config in a form of config section.
-func (v *Viper) Get(name string) interface{} {
- return v.viper.Get(name)
-}
-
-// Has checks if config section exists.
-func (v *Viper) Has(name string) bool {
- return v.viper.IsSet(name)
-}
-
-// GetCommonConfig Returns common config parameters
-func (v *Viper) GetCommonConfig() *General {
- return v.CommonConfig
-}
-
-func (v *Viper) Serve() chan error {
- return make(chan error, 1)
-}
-
-func (v *Viper) Stop() error {
- return nil
-}
-
-// Name returns user-friendly plugin name
-func (v *Viper) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (v *Viper) Available() {}
-
-func parseFlag(flag string) (string, string, error) {
- const op = errors.Op("parse_flag")
- if !strings.Contains(flag, "=") {
- return "", "", errors.E(op, errors.Errorf("invalid flag `%s`", flag))
- }
-
- parts := strings.SplitN(strings.TrimLeft(flag, " \"'`"), "=", 2)
-
- return strings.Trim(parts[0], " \n\t"), parseValue(strings.Trim(parts[1], " \n\t")), nil
-}
-
-func parseValue(value string) string {
- escape := []rune(value)[0]
-
- if escape == '"' || escape == '\'' || escape == '`' {
- value = strings.Trim(value, string(escape))
- value = strings.ReplaceAll(value, fmt.Sprintf("\\%s", string(escape)), string(escape))
- }
-
- return value
-}
-
-func parseEnv(value interface{}) interface{} {
- str, ok := value.(string)
- if !ok || len(str) <= 3 {
- return value
- }
-
- if str[0:2] == "${" && str[len(str)-1:] == "}" {
- if v, ok := os.LookupEnv(str[2 : len(str)-1]); ok {
- return v
- }
- }
-
- return str
-}
diff --git a/plugins/grpc/codec/codec.go b/plugins/grpc/codec/codec.go
deleted file mode 100644
index a9d89ac5..00000000
--- a/plugins/grpc/codec/codec.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package codec
-
-import "google.golang.org/grpc/encoding"
-
-type RawMessage []byte
-
-// By default, gRPC registers and uses the "proto" codec, so it is not necessary to do this in your own code to send and receive proto messages.
-// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec
-const cName string = "proto"
-const rm string = "rawMessage"
-
-func (r RawMessage) Reset() {}
-func (RawMessage) ProtoMessage() {}
-func (RawMessage) String() string { return rm }
-
-type Codec struct{ base encoding.Codec }
-
-// Marshal returns the wire format of v. rawMessages would be returned without encoding.
-func (c *Codec) Marshal(v interface{}) ([]byte, error) {
- if raw, ok := v.(RawMessage); ok {
- return raw, nil
- }
-
- return c.base.Marshal(v)
-}
-
-// Unmarshal parses the wire format into v. rawMessages would not be unmarshalled.
-func (c *Codec) Unmarshal(data []byte, v interface{}) error {
- if raw, ok := v.(*RawMessage); ok {
- *raw = data
- return nil
- }
-
- return c.base.Unmarshal(data, v)
-}
-
-func (c *Codec) Name() string {
- return cName
-}
-
-// String return codec name.
-func (c *Codec) String() string {
- return "raw:" + c.base.Name()
-}
diff --git a/plugins/grpc/codec/codec_test.go b/plugins/grpc/codec/codec_test.go
deleted file mode 100644
index 60efb072..00000000
--- a/plugins/grpc/codec/codec_test.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package codec
-
-import (
- "testing"
-
- json "github.com/json-iterator/go"
- "github.com/stretchr/testify/assert"
-)
-
-type jsonCodec struct{}
-
-func (jsonCodec) Marshal(v interface{}) ([]byte, error) {
- return json.Marshal(v)
-}
-
-func (jsonCodec) Unmarshal(data []byte, v interface{}) error {
- return json.Unmarshal(data, v)
-}
-
-func (jsonCodec) Name() string {
- return "json"
-}
-
-func TestCodec_String(t *testing.T) {
- c := Codec{jsonCodec{}}
-
- assert.Equal(t, "raw:json", c.String())
-
- r := RawMessage{}
- r.Reset()
- r.ProtoMessage()
- assert.Equal(t, "rawMessage", r.String())
-}
-
-func TestCodec_Unmarshal_ByPass(t *testing.T) {
- c := Codec{jsonCodec{}}
-
- s := struct {
- Name string
- }{}
-
- assert.NoError(t, c.Unmarshal([]byte(`{"name":"name"}`), &s))
- assert.Equal(t, "name", s.Name)
-}
-
-func TestCodec_Marshal_ByPass(t *testing.T) {
- c := Codec{jsonCodec{}}
-
- s := struct {
- Name string
- }{
- Name: "name",
- }
-
- d, err := c.Marshal(s)
- assert.NoError(t, err)
-
- assert.Equal(t, `{"Name":"name"}`, string(d))
-}
-
-func TestCodec_Unmarshal_Raw(t *testing.T) {
- c := Codec{jsonCodec{}}
-
- s := RawMessage{}
-
- assert.NoError(t, c.Unmarshal([]byte(`{"name":"name"}`), &s))
- assert.Equal(t, `{"name":"name"}`, string(s))
-}
-
-func TestCodec_Marshal_Raw(t *testing.T) {
- c := Codec{jsonCodec{}}
-
- s := RawMessage(`{"Name":"name"}`)
-
- d, err := c.Marshal(s)
- assert.NoError(t, err)
-
- assert.Equal(t, `{"Name":"name"}`, string(d))
-}
diff --git a/plugins/grpc/config.go b/plugins/grpc/config.go
deleted file mode 100644
index fedd4998..00000000
--- a/plugins/grpc/config.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package grpc
-
-import (
- "math"
- "os"
- "strings"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/pool"
-)
-
-type Config struct {
- Listen string `mapstructure:"listen"`
- Proto string `mapstructure:"proto"`
-
- TLS *TLS
-
- // Env is environment variables passed to the http pool
- Env map[string]string `mapstructure:"env"`
-
- GrpcPool *pool.Config `mapstructure:"pool"`
- MaxSendMsgSize int64 `mapstructure:"max_send_msg_size"`
- MaxRecvMsgSize int64 `mapstructure:"max_recv_msg_size"`
- MaxConnectionIdle time.Duration `mapstructure:"max_connection_idle"`
- MaxConnectionAge time.Duration `mapstructure:"max_connection_age"`
- MaxConnectionAgeGrace time.Duration `mapstructure:"max_connection_age_grace"`
- MaxConcurrentStreams int64 `mapstructure:"max_concurrent_streams"`
- PingTime time.Duration `mapstructure:"ping_time"`
- Timeout time.Duration `mapstructure:"timeout"`
-}
-
-type TLS struct {
- Key string
- Cert string
- RootCA string
-}
-
-func (c *Config) InitDefaults() error { //nolint:gocognit
- const op = errors.Op("grpc_plugin_config")
- if c.GrpcPool == nil {
- c.GrpcPool = &pool.Config{}
- }
-
- c.GrpcPool.InitDefaults()
-
- if !strings.Contains(c.Listen, ":") {
- return errors.E(op, errors.Errorf("mailformed grpc grpc address, provided: %s", c.Listen))
- }
-
- if c.EnableTLS() {
- if _, err := os.Stat(c.TLS.Key); err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("key file '%s' does not exists", c.TLS.Key))
- }
-
- return errors.E(op, err)
- }
-
- if _, err := os.Stat(c.TLS.Cert); err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("cert file '%s' does not exists", c.TLS.Cert))
- }
-
- return errors.E(op, err)
- }
-
- // RootCA is optional, but if provided - check it
- if c.TLS.RootCA != "" {
- if _, err := os.Stat(c.TLS.RootCA); err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("root ca path provided, but key file '%s' does not exists", c.TLS.RootCA))
- }
- return errors.E(op, err)
- }
- }
- }
-
- // used to set max time
- infinity := time.Duration(math.MaxInt64)
-
- if c.PingTime == 0 {
- c.PingTime = time.Hour * 2
- }
-
- if c.Timeout == 0 {
- c.Timeout = time.Second * 20
- }
-
- if c.MaxConcurrentStreams == 0 {
- c.MaxConcurrentStreams = 10
- }
- // set default
- if c.MaxConnectionAge == 0 {
- c.MaxConnectionAge = infinity
- }
-
- // set default
- if c.MaxConnectionIdle == 0 {
- c.MaxConnectionIdle = infinity
- }
-
- if c.MaxConnectionAgeGrace == 0 {
- c.MaxConnectionAgeGrace = infinity
- }
-
- if c.MaxRecvMsgSize == 0 {
- c.MaxRecvMsgSize = 1024 * 1024 * 50
- } else {
- c.MaxRecvMsgSize = 1024 * 1024 * c.MaxRecvMsgSize
- }
-
- if c.MaxSendMsgSize == 0 {
- c.MaxSendMsgSize = 1024 * 1024 * 50
- } else {
- c.MaxSendMsgSize = 1024 * 1024 * c.MaxSendMsgSize
- }
-
- return nil
-}
-
-func (c *Config) EnableTLS() bool {
- if c.TLS != nil {
- return (c.TLS.RootCA != "" && c.TLS.Key != "" && c.TLS.Cert != "") || (c.TLS.Key != "" && c.TLS.Cert != "")
- }
-
- return false
-}
diff --git a/plugins/grpc/parser/message.proto b/plugins/grpc/parser/message.proto
deleted file mode 100644
index a4012010..00000000
--- a/plugins/grpc/parser/message.proto
+++ /dev/null
@@ -1,7 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-message Message {
- string msg = 1;
- int64 value = 2;
-} \ No newline at end of file
diff --git a/plugins/grpc/parser/parse.go b/plugins/grpc/parser/parse.go
deleted file mode 100644
index d59b0927..00000000
--- a/plugins/grpc/parser/parse.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package parser
-
-import (
- "bytes"
- "io"
- "os"
-
- pp "github.com/emicklei/proto"
-)
-
-// Service contains information about singular GRPC service.
-type Service struct {
- // Package defines service namespace.
- Package string
-
- // Name defines service name.
- Name string
-
- // Methods list.
- Methods []Method
-}
-
-// Method describes singular RPC method.
-type Method struct {
- // Name is method name.
- Name string
-
- // StreamsRequest defines if method accept stream input.
- StreamsRequest bool
-
- // RequestType defines message name (from the same package) of method input.
- RequestType string
-
- // StreamsReturns defines if method streams result.
- StreamsReturns bool
-
- // ReturnsType defines message name (from the same package) of method return value.
- ReturnsType string
-}
-
-// File parses given proto file or returns error.
-func File(file string, importPath string) ([]Service, error) {
- reader, _ := os.Open(file)
- defer reader.Close()
-
- return parse(reader, importPath)
-}
-
-// Bytes parses string into proto definition.
-func Bytes(data []byte) ([]Service, error) {
- return parse(bytes.NewBuffer(data), "")
-}
-
-func parse(reader io.Reader, importPath string) ([]Service, error) {
- proto, err := pp.NewParser(reader).Parse()
- if err != nil {
- return nil, err
- }
-
- return parseServices(
- proto,
- parsePackage(proto),
- importPath,
- )
-}
-
-func parsePackage(proto *pp.Proto) string {
- for _, e := range proto.Elements {
- if p, ok := e.(*pp.Package); ok {
- return p.Name
- }
- }
-
- return ""
-}
-
-func parseServices(proto *pp.Proto, pkg string, importPath string) ([]Service, error) {
- services := make([]Service, 0)
-
- pp.Walk(proto, pp.WithService(func(service *pp.Service) {
- services = append(services, Service{
- Package: pkg,
- Name: service.Name,
- Methods: parseMethods(service),
- })
- }))
-
- pp.Walk(proto, func(v pp.Visitee) {
- if i, ok := v.(*pp.Import); ok {
- if im, err := File(importPath+"/"+i.Filename, importPath); err == nil {
- services = append(services, im...)
- }
- }
- })
-
- return services, nil
-}
-
-func parseMethods(s *pp.Service) []Method {
- methods := make([]Method, 0)
- for _, e := range s.Elements {
- if m, ok := e.(*pp.RPC); ok {
- methods = append(methods, Method{
- Name: m.Name,
- StreamsRequest: m.StreamsRequest,
- RequestType: m.RequestType,
- StreamsReturns: m.StreamsReturns,
- ReturnsType: m.ReturnsType,
- })
- }
- }
-
- return methods
-}
diff --git a/plugins/grpc/parser/parse_test.go b/plugins/grpc/parser/parse_test.go
deleted file mode 100644
index b71c133d..00000000
--- a/plugins/grpc/parser/parse_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package parser
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestParseFile(t *testing.T) {
- services, err := File("test.proto", "")
- assert.NoError(t, err)
- assert.Len(t, services, 2)
-
- assert.Equal(t, "app.namespace", services[0].Package)
-}
-
-func TestParseFileWithImportsNestedFolder(t *testing.T) {
- services, err := File("./test_nested/test_import.proto", "./test_nested")
- assert.NoError(t, err)
- assert.Len(t, services, 2)
-
- assert.Equal(t, "app.namespace", services[0].Package)
-}
-
-func TestParseFileWithImports(t *testing.T) {
- services, err := File("test_import.proto", ".")
- assert.NoError(t, err)
- assert.Len(t, services, 2)
-
- assert.Equal(t, "app.namespace", services[0].Package)
-}
-
-func TestParseNotFound(t *testing.T) {
- _, err := File("test2.proto", "")
- assert.Error(t, err)
-}
-
-func TestParseBytes(t *testing.T) {
- services, err := Bytes([]byte{})
- assert.NoError(t, err)
- assert.Len(t, services, 0)
-}
-
-func TestParseString(t *testing.T) {
- services, err := Bytes([]byte(`
-syntax = "proto3";
-package app.namespace;
-
-// Ping Service.
-service PingService {
- // Ping Method.
- rpc Ping (Message) returns (Message) {
- }
-}
-
-// Pong service.
-service PongService {
- rpc Pong (stream Message) returns (stream Message) {
- }
-}
-
-message Message {
- string msg = 1;
- int64 value = 2;
-}
-`))
- assert.NoError(t, err)
- assert.Len(t, services, 2)
-
- assert.Equal(t, "app.namespace", services[0].Package)
-}
diff --git a/plugins/grpc/parser/pong.proto b/plugins/grpc/parser/pong.proto
deleted file mode 100644
index 9756fabe..00000000
--- a/plugins/grpc/parser/pong.proto
+++ /dev/null
@@ -1,10 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-import "message.proto";
-
-// Pong service.
-service PongService {
- rpc Pong (stream Message) returns (stream Message) {
- }
-} \ No newline at end of file
diff --git a/plugins/grpc/parser/test.proto b/plugins/grpc/parser/test.proto
deleted file mode 100644
index e2230954..00000000
--- a/plugins/grpc/parser/test.proto
+++ /dev/null
@@ -1,20 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-// Ping Service.
-service PingService {
- // Ping Method.
- rpc Ping (Message) returns (Message) {
- }
-}
-
-// Pong service.
-service PongService {
- rpc Pong (stream Message) returns (stream Message) {
- }
-}
-
-message Message {
- string msg = 1;
- int64 value = 2;
-} \ No newline at end of file
diff --git a/plugins/grpc/parser/test_import.proto b/plugins/grpc/parser/test_import.proto
deleted file mode 100644
index 1b954fc1..00000000
--- a/plugins/grpc/parser/test_import.proto
+++ /dev/null
@@ -1,12 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-import "message.proto";
-import "pong.proto";
-
-// Ping Service.
-service PingService {
- // Ping Method.
- rpc Ping (Message) returns (Message) {
- }
-} \ No newline at end of file
diff --git a/plugins/grpc/parser/test_nested/message.proto b/plugins/grpc/parser/test_nested/message.proto
deleted file mode 100644
index a4012010..00000000
--- a/plugins/grpc/parser/test_nested/message.proto
+++ /dev/null
@@ -1,7 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-message Message {
- string msg = 1;
- int64 value = 2;
-} \ No newline at end of file
diff --git a/plugins/grpc/parser/test_nested/pong.proto b/plugins/grpc/parser/test_nested/pong.proto
deleted file mode 100644
index 9756fabe..00000000
--- a/plugins/grpc/parser/test_nested/pong.proto
+++ /dev/null
@@ -1,10 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-import "message.proto";
-
-// Pong service.
-service PongService {
- rpc Pong (stream Message) returns (stream Message) {
- }
-} \ No newline at end of file
diff --git a/plugins/grpc/parser/test_nested/test_import.proto b/plugins/grpc/parser/test_nested/test_import.proto
deleted file mode 100644
index a3a476ba..00000000
--- a/plugins/grpc/parser/test_nested/test_import.proto
+++ /dev/null
@@ -1,12 +0,0 @@
-syntax = "proto3";
-package app.namespace;
-
-import "message.proto";
-import "pong.proto";
-
-// Ping Service.
-service PingService {
- // Ping Method.
- rpc Ping (Message) returns (Message) {
- }
-}
diff --git a/plugins/grpc/plugin.go b/plugins/grpc/plugin.go
deleted file mode 100644
index 7518d352..00000000
--- a/plugins/grpc/plugin.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package grpc
-
-import (
- "context"
- "sync"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/grpc/codec"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/utils"
- "google.golang.org/grpc"
- "google.golang.org/grpc/encoding"
-)
-
-const (
- name string = "grpc"
- RrGrpc string = "RR_GRPC"
-)
-
-type Plugin struct {
- mu *sync.RWMutex
- config *Config
- gPool pool.Pool
- opts []grpc.ServerOption
- services []func(server *grpc.Server)
- server *grpc.Server
- rrServer server.Server
-
- // events handler
- events events.Handler
- log logger.Logger
-}
-
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger, server server.Server) error {
- const op = errors.Op("grpc_plugin_init")
-
- if !cfg.Has(name) {
- return errors.E(errors.Disabled)
- }
- // register the codec
- encoding.RegisterCodec(&codec.Codec{})
-
- err := cfg.UnmarshalKey(name, &p.config)
- if err != nil {
- return errors.E(op, err)
- }
-
- err = p.config.InitDefaults()
- if err != nil {
- return errors.E(op, err)
- }
-
- p.opts = make([]grpc.ServerOption, 0)
- p.services = make([]func(server *grpc.Server), 0)
- p.events = events.NewEventsHandler()
- p.events.AddListener(p.collectGRPCEvents)
- p.rrServer = server
-
- // worker's GRPC mode
- if p.config.Env == nil {
- p.config.Env = make(map[string]string)
- }
- p.config.Env[RrGrpc] = "true"
-
- p.log = log
- p.mu = &sync.RWMutex{}
-
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- const op = errors.Op("grpc_plugin_serve")
- errCh := make(chan error, 1)
-
- var err error
- p.gPool, err = p.rrServer.NewWorkerPool(context.Background(), &pool.Config{
- Debug: p.config.GrpcPool.Debug,
- NumWorkers: p.config.GrpcPool.NumWorkers,
- MaxJobs: p.config.GrpcPool.MaxJobs,
- AllocateTimeout: p.config.GrpcPool.AllocateTimeout,
- DestroyTimeout: p.config.GrpcPool.DestroyTimeout,
- Supervisor: p.config.GrpcPool.Supervisor,
- }, p.config.Env, p.collectGRPCEvents)
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
-
- go func() {
- var err error
- p.mu.Lock()
- p.server, err = p.createGRPCserver()
- if err != nil {
- p.log.Error("create grpc server", "error", err)
- errCh <- errors.E(op, err)
- return
- }
-
- l, err := utils.CreateListener(p.config.Listen)
- if err != nil {
- p.log.Error("create grpc listener", "error", err)
- errCh <- errors.E(op, err)
- }
-
- // protect serve
- p.mu.Unlock()
- err = p.server.Serve(l)
- if err != nil {
- // skip errors when stopping the server
- if err == grpc.ErrServerStopped {
- return
- }
-
- p.log.Error("grpc server stopped", "error", err)
- errCh <- errors.E(op, err)
- return
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin) Stop() error {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.server != nil {
- p.server.Stop()
- }
- return nil
-}
-
-func (p *Plugin) Available() {}
-
-func (p *Plugin) Name() string {
- return name
-}
-
-func (p *Plugin) Reset() error {
- p.mu.Lock()
- defer p.mu.Unlock()
- const op = errors.Op("grpc_plugin_reset")
-
- // destroy old pool
- p.gPool.Destroy(context.Background())
-
- var err error
- p.gPool, err = p.rrServer.NewWorkerPool(context.Background(), &pool.Config{
- Debug: p.config.GrpcPool.Debug,
- NumWorkers: p.config.GrpcPool.NumWorkers,
- MaxJobs: p.config.GrpcPool.MaxJobs,
- AllocateTimeout: p.config.GrpcPool.AllocateTimeout,
- DestroyTimeout: p.config.GrpcPool.DestroyTimeout,
- Supervisor: p.config.GrpcPool.Supervisor,
- }, p.config.Env, p.collectGRPCEvents)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (p *Plugin) Workers() []*process.State {
- p.mu.RLock()
- defer p.mu.RUnlock()
-
- workers := p.gPool.Workers()
-
- ps := make([]*process.State, 0, len(workers))
- for i := 0; i < len(workers); i++ {
- state, err := process.WorkerProcessState(workers[i])
- if err != nil {
- return nil
- }
- ps = append(ps, state)
- }
-
- return ps
-}
-
-func (p *Plugin) collectGRPCEvents(event interface{}) {
- if gev, ok := event.(events.GRPCEvent); ok {
- switch gev.Event {
- case events.EventUnaryCallOk:
- p.log.Info("method called", "method", gev.Info.FullMethod, "started", gev.Start, "elapsed", gev.Elapsed)
- case events.EventUnaryCallErr:
- p.log.Info("method call finished with error", "error", gev.Error, "method", gev.Info.FullMethod, "started", gev.Start, "elapsed", gev.Elapsed)
- }
- }
-}
diff --git a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/main.go b/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/main.go
deleted file mode 100644
index 0894a7a8..00000000
--- a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/main.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package main
-
-import (
- "io"
- "io/ioutil"
- "os"
-
- "github.com/spiral/roadrunner/v2/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php"
- "google.golang.org/protobuf/proto"
- plugin "google.golang.org/protobuf/types/pluginpb"
-)
-
-func main() {
- req, err := readRequest(os.Stdin)
- if err != nil {
- panic(err)
- }
-
- if err = writeResponse(os.Stdout, php.Generate(req)); err != nil {
- panic(err)
- }
-}
-
-func readRequest(in io.Reader) (*plugin.CodeGeneratorRequest, error) {
- data, err := ioutil.ReadAll(in)
- if err != nil {
- return nil, err
- }
-
- req := new(plugin.CodeGeneratorRequest)
- if err = proto.Unmarshal(data, req); err != nil {
- return nil, err
- }
-
- return req, nil
-}
-
-func writeResponse(out io.Writer, resp *plugin.CodeGeneratorResponse) error {
- data, err := proto.Marshal(resp)
- if err != nil {
- return err
- }
-
- _, err = out.Write(data)
- return err
-}
diff --git a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/generate.go b/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/generate.go
deleted file mode 100644
index 03c48ac8..00000000
--- a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/generate.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package php
-
-import (
- desc "google.golang.org/protobuf/types/descriptorpb"
- plugin "google.golang.org/protobuf/types/pluginpb"
-)
-
-// Generate generates needed service classes
-func Generate(req *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse {
- resp := &plugin.CodeGeneratorResponse{}
-
- for _, file := range req.ProtoFile {
- for _, service := range file.Service {
- resp.File = append(resp.File, generate(req, file, service))
- }
- }
-
- return resp
-}
-
-func generate(
- req *plugin.CodeGeneratorRequest,
- file *desc.FileDescriptorProto,
- service *desc.ServiceDescriptorProto,
-) *plugin.CodeGeneratorResponse_File {
- return &plugin.CodeGeneratorResponse_File{
- Name: str(filename(file, service.Name)),
- Content: str(body(req, file, service)),
- }
-}
-
-// helper to convert string into string pointer
-func str(str string) *string {
- return &str
-}
diff --git a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/keywords.go b/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/keywords.go
deleted file mode 100644
index 32579e33..00000000
--- a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/keywords.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package php
-
-import (
- "bytes"
- "strings"
- "unicode"
-)
-
-// @see https://github.com/protocolbuffers/protobuf/blob/master/php/ext/google/protobuf/protobuf.c#L168
-var reservedKeywords = []string{
- "abstract", "and", "array", "as", "break",
- "callable", "case", "catch", "class", "clone",
- "const", "continue", "declare", "default", "die",
- "do", "echo", "else", "elseif", "empty",
- "enddeclare", "endfor", "endforeach", "endif", "endswitch",
- "endwhile", "eval", "exit", "extends", "final",
- "for", "foreach", "function", "global", "goto",
- "if", "implements", "include", "include_once", "instanceof",
- "insteadof", "interface", "isset", "list", "namespace",
- "new", "or", "print", "private", "protected",
- "public", "require", "require_once", "return", "static",
- "switch", "throw", "trait", "try", "unset",
- "use", "var", "while", "xor", "int",
- "float", "bool", "string", "true", "false",
- "null", "void", "iterable",
-}
-
-// Check if given name/keyword is reserved by php.
-func isReserved(name string) bool {
- name = strings.ToLower(name)
- for _, k := range reservedKeywords {
- if name == k {
- return true
- }
- }
-
- return false
-}
-
-// generate php namespace or path
-func namespace(pkg *string, sep string) string {
- if pkg == nil {
- return ""
- }
-
- result := bytes.NewBuffer(nil)
- for _, p := range strings.Split(*pkg, ".") {
- result.WriteString(identifier(p, ""))
- result.WriteString(sep)
- }
-
- return strings.Trim(result.String(), sep)
-}
-
-// create php identifier for class or message
-func identifier(name string, suffix string) string {
- name = Camelize(name)
- if suffix != "" {
- return name + Camelize(suffix)
- }
-
- return name
-}
-
-func resolveReserved(identifier string, pkg string) string {
- if isReserved(strings.ToLower(identifier)) {
- if pkg == ".google.protobuf" {
- return "GPB" + identifier
- }
- return "PB" + identifier
- }
-
- return identifier
-}
-
-// Camelize "dino_party" -> "DinoParty"
-func Camelize(word string) string {
- words := splitAtCaseChangeWithTitlecase(word)
- return strings.Join(words, "")
-}
-
-func splitAtCaseChangeWithTitlecase(s string) []string {
- words := make([]string, 0)
- word := make([]rune, 0)
- for _, c := range s {
- spacer := isSpacerChar(c)
- if len(word) > 0 {
- if unicode.IsUpper(c) || spacer {
- words = append(words, string(word))
- word = make([]rune, 0)
- }
- }
- if !spacer {
- if len(word) > 0 {
- word = append(word, unicode.ToLower(c))
- } else {
- word = append(word, unicode.ToUpper(c))
- }
- }
- }
- words = append(words, string(word))
- return words
-}
-
-func isSpacerChar(c rune) bool {
- switch {
- case c == rune("_"[0]):
- return true
- case c == rune(" "[0]):
- return true
- case c == rune(":"[0]):
- return true
- case c == rune("-"[0]):
- return true
- }
- return false
-}
diff --git a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/ns.go b/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/ns.go
deleted file mode 100644
index c1dc3898..00000000
--- a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/ns.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package php
-
-import (
- "bytes"
- "fmt"
- "strings"
-
- desc "google.golang.org/protobuf/types/descriptorpb"
- plugin "google.golang.org/protobuf/types/pluginpb"
-)
-
-// manages internal name representation of the package
-type ns struct {
- // Package defines file package.
- Package string
-
- // Root namespace of the package
- Namespace string
-
- // Import declares what namespaces to be imported
- Import map[string]string
-}
-
-// newNamespace creates new work namespace.
-func newNamespace(req *plugin.CodeGeneratorRequest, file *desc.FileDescriptorProto, service *desc.ServiceDescriptorProto) *ns {
- ns := &ns{
- Package: *file.Package,
- Namespace: namespace(file.Package, "\\"),
- Import: make(map[string]string),
- }
-
- if file.Options != nil && file.Options.PhpNamespace != nil {
- ns.Namespace = *file.Options.PhpNamespace
- }
-
- for k := range service.Method {
- ns.importMessage(req, service.Method[k].InputType)
- ns.importMessage(req, service.Method[k].OutputType)
- }
-
- return ns
-}
-
-// importMessage registers new import message namespace (only the namespace).
-func (ns *ns) importMessage(req *plugin.CodeGeneratorRequest, msg *string) {
- if msg == nil {
- return
- }
-
- chunks := strings.Split(*msg, ".")
- pkg := strings.Join(chunks[:len(chunks)-1], ".")
-
- result := bytes.NewBuffer(nil)
- for _, p := range chunks[:len(chunks)-1] {
- result.WriteString(identifier(p, ""))
- result.WriteString(`\`)
- }
-
- if pkg == "."+ns.Package {
- // root package
- return
- }
-
- for _, f := range req.ProtoFile {
- if pkg == "."+*f.Package {
- if f.Options != nil && f.Options.PhpNamespace != nil {
- // custom imported namespace
- ns.Import[pkg] = *f.Options.PhpNamespace
- return
- }
- }
- }
-
- ns.Import[pkg] = strings.Trim(result.String(), `\`)
-}
-
-// resolve message alias
-func (ns *ns) resolve(msg *string) string {
- chunks := strings.Split(*msg, ".")
- pkg := strings.Join(chunks[:len(chunks)-1], ".")
-
- if pkg == "."+ns.Package {
- // root message
- return identifier(chunks[len(chunks)-1], "")
- }
-
- for iPkg, ns := range ns.Import {
- if pkg == iPkg {
- // use last namespace chunk
- nsChunks := strings.Split(ns, `\`)
- identifier := identifier(chunks[len(chunks)-1], "")
-
- return fmt.Sprintf(
- `%s\%s`,
- nsChunks[len(nsChunks)-1],
- resolveReserved(identifier, pkg),
- )
- }
- }
-
- // fully clarified name (fallback)
- return "\\" + namespace(msg, "\\")
-}
diff --git a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/template.go b/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/template.go
deleted file mode 100644
index e00c6fdd..00000000
--- a/plugins/grpc/protoc_plugins/protoc-gen-php-grpc/php/template.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// MIT License
-//
-// Copyright (c) 2018 SpiralScout
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-
-package php
-
-import (
- "bytes"
- "fmt"
- "strings"
- "text/template"
-
- desc "google.golang.org/protobuf/types/descriptorpb"
- plugin "google.golang.org/protobuf/types/pluginpb"
-)
-
-const phpBody = `<?php
-# Generated by the protocol buffer compiler (spiral/php-grpc). DO NOT EDIT!
-# source: {{ .File.Name }}
-{{ $ns := .Namespace -}}
-{{if $ns.Namespace}}
-namespace {{ $ns.Namespace }};
-{{end}}
-use Spiral\GRPC;
-{{- range $n := $ns.Import}}
-use {{ $n }};
-{{- end}}
-
-interface {{ .Service.Name | interface }} extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "{{ .File.Package }}.{{ .Service.Name }}";{{ "\n" }}
-{{- range $m := .Service.Method}}
- /**
- * @param GRPC\ContextInterface $ctx
- * @param {{ name $ns $m.InputType }} $in
- * @return {{ name $ns $m.OutputType }}
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function {{ $m.Name }}(GRPC\ContextInterface $ctx, {{ name $ns $m.InputType }} $in): {{ name $ns $m.OutputType }};
-{{end -}}
-}
-`
-
-// generate php filename
-func filename(file *desc.FileDescriptorProto, name *string) string {
- ns := namespace(file.Package, "/")
- if file.Options != nil && file.Options.PhpNamespace != nil {
- ns = strings.ReplaceAll(*file.Options.PhpNamespace, `\`, `/`)
- }
-
- return fmt.Sprintf("%s/%s.php", ns, identifier(*name, "interface"))
-}
-
-// generate php file body
-func body(req *plugin.CodeGeneratorRequest, file *desc.FileDescriptorProto, service *desc.ServiceDescriptorProto) string {
- out := bytes.NewBuffer(nil)
-
- data := struct {
- Namespace *ns
- File *desc.FileDescriptorProto
- Service *desc.ServiceDescriptorProto
- }{
- Namespace: newNamespace(req, file, service),
- File: file,
- Service: service,
- }
-
- tpl := template.Must(template.New("phpBody").Funcs(template.FuncMap{
- "interface": func(name *string) string {
- return identifier(*name, "interface")
- },
- "name": func(ns *ns, name *string) string {
- return ns.resolve(name)
- },
- }).Parse(phpBody))
-
- err := tpl.Execute(out, data)
- if err != nil {
- panic(err)
- }
-
- return out.String()
-}
diff --git a/plugins/grpc/proxy/proxy.go b/plugins/grpc/proxy/proxy.go
deleted file mode 100644
index 074aac85..00000000
--- a/plugins/grpc/proxy/proxy.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package proxy
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
- "sync"
-
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/plugins/grpc/codec"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/known/anypb"
-)
-
-const (
- peerAddr string = ":peer.address"
- peerAuthType string = ":peer.auth-type"
- delimiter string = "|:|"
-)
-
-// base interface for Proxy class
-type proxyService interface {
- // RegisterMethod registers new RPC method.
- RegisterMethod(method string)
-
- // ServiceDesc returns service description for the proxy.
- ServiceDesc() *grpc.ServiceDesc
-}
-
-// carry details about service, method and RPC context to PHP process
-type rpcContext struct {
- Service string `json:"service"`
- Method string `json:"method"`
- Context map[string][]string `json:"context"`
-}
-
-// Proxy manages GRPC/RoadRunner bridge.
-type Proxy struct {
- mu *sync.RWMutex
- grpcPool pool.Pool
- name string
- metadata string
- methods []string
-}
-
-// NewProxy creates new service proxy object.
-func NewProxy(name string, metadata string, grpcPool pool.Pool, mu *sync.RWMutex) *Proxy {
- return &Proxy{
- mu: mu,
- grpcPool: grpcPool,
- name: name,
- metadata: metadata,
- methods: make([]string, 0),
- }
-}
-
-// RegisterMethod registers new RPC method.
-func (p *Proxy) RegisterMethod(method string) {
- p.methods = append(p.methods, method)
-}
-
-// ServiceDesc returns service description for the proxy.
-func (p *Proxy) ServiceDesc() *grpc.ServiceDesc {
- desc := &grpc.ServiceDesc{
- ServiceName: p.name,
- Metadata: p.metadata,
- HandlerType: (*proxyService)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{},
- }
-
- // Registering methods
- for _, m := range p.methods {
- desc.Methods = append(desc.Methods, grpc.MethodDesc{
- MethodName: m,
- Handler: p.methodHandler(m),
- })
- }
-
- return desc
-}
-
-// Generate method handler proxy.
-func (p *Proxy) methodHandler(method string) func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- return func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := codec.RawMessage{}
- if err := dec(&in); err != nil {
- return nil, wrapError(err)
- }
-
- if interceptor == nil {
- return p.invoke(ctx, method, in)
- }
-
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: fmt.Sprintf("/%s/%s", p.name, method),
- }
-
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return p.invoke(ctx, method, req.(codec.RawMessage))
- }
-
- return interceptor(ctx, in, info, handler)
- }
-}
-
-func (p *Proxy) invoke(ctx context.Context, method string, in codec.RawMessage) (interface{}, error) {
- payload, err := p.makePayload(ctx, method, in)
- if err != nil {
- return nil, err
- }
-
- p.mu.RLock()
- resp, err := p.grpcPool.Exec(payload)
- p.mu.RUnlock()
-
- if err != nil {
- return nil, wrapError(err)
- }
-
- md, err := p.responseMetadata(resp)
- if err != nil {
- return nil, err
- }
- ctx = metadata.NewIncomingContext(ctx, md)
- err = grpc.SetHeader(ctx, md)
- if err != nil {
- return nil, err
- }
-
- return codec.RawMessage(resp.Body), nil
-}
-
-// responseMetadata extracts metadata from roadrunner response Payload.Context and converts it to metadata.MD
-func (p *Proxy) responseMetadata(resp *payload.Payload) (metadata.MD, error) {
- var md metadata.MD
- if resp == nil || len(resp.Context) == 0 {
- return md, nil
- }
-
- var rpcMetadata map[string]string
- err := json.Unmarshal(resp.Context, &rpcMetadata)
- if err != nil {
- return md, err
- }
-
- if len(rpcMetadata) > 0 {
- md = metadata.New(rpcMetadata)
- }
-
- return md, nil
-}
-
-// makePayload generates RoadRunner compatible payload based on GRPC message. todo: return error
-func (p *Proxy) makePayload(ctx context.Context, method string, body codec.RawMessage) (*payload.Payload, error) {
- ctxMD := make(map[string][]string)
-
- if md, ok := metadata.FromIncomingContext(ctx); ok {
- for k, v := range md {
- ctxMD[k] = v
- }
- }
-
- if pr, ok := peer.FromContext(ctx); ok {
- ctxMD[peerAddr] = []string{pr.Addr.String()}
- if pr.AuthInfo != nil {
- ctxMD[peerAuthType] = []string{pr.AuthInfo.AuthType()}
- }
- }
-
- ctxData, err := json.Marshal(rpcContext{Service: p.name, Method: method, Context: ctxMD})
-
- if err != nil {
- return nil, err
- }
-
- return &payload.Payload{Context: ctxData, Body: body}, nil
-}
-
-// mounts proper error code for the error
-func wrapError(err error) error {
- // internal agreement
- if strings.Contains(err.Error(), delimiter) {
- chunks := strings.Split(err.Error(), delimiter)
- code := codes.Internal
-
- // protect the slice access
- if len(chunks) < 2 {
- return err
- }
-
- if phpCode, errConv := strconv.ParseUint(chunks[0], 10, 32); errConv == nil {
- code = codes.Code(phpCode)
- }
-
- st := status.New(code, chunks[1]).Proto()
-
- for _, detailsMessage := range chunks[2:] {
- anyDetailsMessage := anypb.Any{}
- errP := proto.Unmarshal([]byte(detailsMessage), &anyDetailsMessage)
- if errP == nil {
- st.Details = append(st.Details, &anyDetailsMessage)
- }
- }
-
- return status.ErrorProto(st)
- }
-
- return status.Error(codes.Internal, err.Error())
-}
diff --git a/plugins/grpc/proxy/proxy_test.go b/plugins/grpc/proxy/proxy_test.go
deleted file mode 100644
index 2c024ee3..00000000
--- a/plugins/grpc/proxy/proxy_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package proxy
-
-// import (
-// "testing"
-// "time"
-
-// "github.com/sirupsen/logrus"
-// "github.com/sirupsen/logrus/hooks/test"
-// "github.com/stretchr/testify/assert"
-// "golang.org/x/net/context"
-// "google.golang.org/grpc"
-// "google.golang.org/grpc/codes"
-// "google.golang.org/grpc/metadata"
-// "google.golang.org/grpc/status"
-// )
-
-// const addr = "localhost:9080"
-
-// func Test_Proxy_Error(t *testing.T) {
-// logger, _ := test.NewNullLogger()
-// logger.SetLevel(logrus.DebugLevel)
-
-// c := service.NewContainer(logger)
-// c.Register(ID, &Service{})
-
-// assert.NoError(t, c.Init(&testCfg{
-// grpcCfg: `{
-// "listen": "tcp://:9080",
-// "tls": {
-// "key": "tests/server.key",
-// "cert": "tests/server.crt"
-// },
-// "proto": "tests/test.proto",
-// "workers":{
-// "command": "php tests/worker.php",
-// "relay": "pipes",
-// "pool": {
-// "numWorkers": 1,
-// "allocateTimeout": 10,
-// "destroyTimeout": 10
-// }
-// }
-// }`,
-// }))
-
-// s, st := c.Get(ID)
-// assert.NotNil(t, s)
-// assert.Equal(t, service.StatusOK, st)
-
-// // should do nothing
-// s.(*Service).Stop()
-
-// go func() { assert.NoError(t, c.Serve()) }()
-// time.Sleep(time.Millisecond * 100)
-// defer c.Stop()
-
-// cl, cn := getClient(addr)
-// defer cn.Close()
-
-// _, err := cl.Throw(context.Background(), &tests.Message{Msg: "notFound"})
-
-// assert.Error(t, err)
-// se, _ := status.FromError(err)
-// assert.Equal(t, "nothing here", se.Message())
-// assert.Equal(t, codes.NotFound, se.Code())
-
-// _, errWithDetails := cl.Throw(context.Background(), &tests.Message{Msg: "withDetails"})
-
-// assert.Error(t, errWithDetails)
-// statusWithDetails, _ := status.FromError(errWithDetails)
-// assert.Equal(t, "main exception message", statusWithDetails.Message())
-// assert.Equal(t, codes.InvalidArgument, statusWithDetails.Code())
-
-// details := statusWithDetails.Details()
-
-// detailsMessageForException := details[0].(*tests.DetailsMessageForException)
-
-// assert.Equal(t, detailsMessageForException.Code, uint64(1))
-// assert.Equal(t, detailsMessageForException.Message, "details message")
-// }
-
-// func Test_Proxy_Metadata(t *testing.T) {
-// logger, _ := test.NewNullLogger()
-// logger.SetLevel(logrus.DebugLevel)
-
-// c := service.NewContainer(logger)
-// c.Register(ID, &Service{})
-
-// assert.NoError(t, c.Init(&testCfg{
-// grpcCfg: `{
-// "listen": "tcp://:9080",
-// "tls": {
-// "key": "tests/server.key",
-// "cert": "tests/server.crt"
-// },
-// "proto": "tests/test.proto",
-// "workers":{
-// "command": "php tests/worker.php",
-// "relay": "pipes",
-// "pool": {
-// "numWorkers": 1,
-// "allocateTimeout": 10,
-// "destroyTimeout": 10
-// }
-// }
-// }`,
-// }))
-
-// s, st := c.Get(ID)
-// assert.NotNil(t, s)
-// assert.Equal(t, service.StatusOK, st)
-
-// // should do nothing
-// s.(*Service).Stop()
-
-// go func() { assert.NoError(t, c.Serve()) }()
-// time.Sleep(time.Millisecond * 100)
-// defer c.Stop()
-
-// cl, cn := getClient(addr)
-// defer cn.Close()
-
-// ctx := metadata.AppendToOutgoingContext(context.Background(), "key", "proxy-value")
-// var header metadata.MD
-// out, err := cl.Info(
-// ctx,
-// &tests.Message{Msg: "MD"},
-// grpc.Header(&header),
-// grpc.WaitForReady(true),
-// )
-// assert.Equal(t, []string{"bar"}, header.Get("foo"))
-// assert.NoError(t, err)
-// assert.Equal(t, `["proxy-value"]`, out.Msg)
-// }
diff --git a/plugins/grpc/server.go b/plugins/grpc/server.go
deleted file mode 100644
index 323f73a0..00000000
--- a/plugins/grpc/server.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package grpc
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "os"
- "path"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/plugins/grpc/parser"
- "github.com/spiral/roadrunner/v2/plugins/grpc/proxy"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/keepalive"
-)
-
-func (p *Plugin) createGRPCserver() (*grpc.Server, error) {
- const op = errors.Op("grpc_plugin_create_server")
- opts, err := p.serverOptions()
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- server := grpc.NewServer(opts...)
-
- if p.config.Proto != "" {
- // php proxy services
- services, err := parser.File(p.config.Proto, path.Dir(p.config.Proto))
- if err != nil {
- return nil, err
- }
-
- for _, service := range services {
- p := proxy.NewProxy(fmt.Sprintf("%s.%s", service.Package, service.Name), p.config.Proto, p.gPool, p.mu)
- for _, m := range service.Methods {
- p.RegisterMethod(m.Name)
- }
-
- server.RegisterService(p.ServiceDesc(), p)
- }
- }
-
- // external and native services
- for _, r := range p.services {
- r(server)
- }
-
- return server, nil
-}
-
-func (p *Plugin) interceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- start := time.Now()
- resp, err := handler(ctx, req)
- if err != nil {
- p.events.Push(events.GRPCEvent{
- Event: events.EventUnaryCallErr,
- Info: info,
- Error: err,
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil, err
- }
-
- p.events.Push(events.GRPCEvent{
- Event: events.EventUnaryCallOk,
- Info: info,
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return resp, nil
-}
-
-func (p *Plugin) serverOptions() ([]grpc.ServerOption, error) {
- const op = errors.Op("grpc_plugin_server_options")
-
- var tcreds credentials.TransportCredentials
- var opts []grpc.ServerOption
- var cert tls.Certificate
- var certPool *x509.CertPool
- var rca []byte
- var err error
-
- if p.config.EnableTLS() {
- // if client CA is not empty we combine it with Cert and Key
- if p.config.TLS.RootCA != "" {
- cert, err = tls.LoadX509KeyPair(p.config.TLS.Cert, p.config.TLS.Key)
- if err != nil {
- return nil, err
- }
-
- certPool, err = x509.SystemCertPool()
- if err != nil {
- return nil, err
- }
- if certPool == nil {
- certPool = x509.NewCertPool()
- }
-
- rca, err = os.ReadFile(p.config.TLS.RootCA)
- if err != nil {
- return nil, err
- }
-
- if ok := certPool.AppendCertsFromPEM(rca); !ok {
- return nil, errors.E(op, errors.Str("could not append Certs from PEM"))
- }
-
- tcreds = credentials.NewTLS(&tls.Config{
- MinVersion: tls.VersionTLS12,
- ClientAuth: tls.RequireAndVerifyClientCert,
- Certificates: []tls.Certificate{cert},
- ClientCAs: certPool,
- })
- } else {
- tcreds, err = credentials.NewServerTLSFromFile(p.config.TLS.Cert, p.config.TLS.Key)
- if err != nil {
- return nil, err
- }
- }
-
- serverOptions := []grpc.ServerOption{
- grpc.MaxSendMsgSize(int(p.config.MaxSendMsgSize)),
- grpc.MaxRecvMsgSize(int(p.config.MaxRecvMsgSize)),
- grpc.KeepaliveParams(keepalive.ServerParameters{
- MaxConnectionIdle: p.config.MaxConnectionIdle,
- MaxConnectionAge: p.config.MaxConnectionAge,
- MaxConnectionAgeGrace: p.config.MaxConnectionAge,
- Time: p.config.PingTime,
- Timeout: p.config.Timeout,
- }),
- grpc.MaxConcurrentStreams(uint32(p.config.MaxConcurrentStreams)),
- }
-
- opts = append(opts, grpc.Creds(tcreds))
- opts = append(opts, serverOptions...)
- }
-
- opts = append(opts, p.opts...)
-
- // custom codec is required to bypass protobuf, common interceptor used for debug and stats
- return append(
- opts,
- grpc.UnaryInterceptor(p.interceptor),
- // TODO(rustatian): check deprecation
- // grpc.CustomCodec(&codec{encoding.GetCodec(encCodec)}),
- ), nil
-}
diff --git a/plugins/gzip/plugin.go b/plugins/gzip/plugin.go
deleted file mode 100644
index 05f1eb63..00000000
--- a/plugins/gzip/plugin.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package gzip
-
-import (
- "net/http"
-
- "github.com/klauspost/compress/gzhttp"
-)
-
-const PluginName = "gzip"
-
-type Plugin struct{}
-
-func (g *Plugin) Init() error {
- return nil
-}
-
-func (g *Plugin) Middleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- gzhttp.GzipHandler(next).ServeHTTP(w, r)
- })
-}
-
-// Available interface implementation
-func (g *Plugin) Available() {}
-
-func (g *Plugin) Name() string {
- return PluginName
-}
diff --git a/plugins/headers/config.go b/plugins/headers/config.go
deleted file mode 100644
index 688b4764..00000000
--- a/plugins/headers/config.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package headers
-
-// Config declares headers service configuration.
-type Config struct {
- Headers *struct {
- // CORS settings.
- CORS *CORSConfig
-
- // Request headers to add to every payload send to PHP.
- Request map[string]string
-
- // Response headers to add to every payload generated by PHP.
- Response map[string]string
- }
-}
-
-// CORSConfig headers configuration.
-type CORSConfig struct {
- // AllowedOrigin: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin
- AllowedOrigin string `mapstructure:"allowed_origin"`
-
- // AllowedHeaders: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers
- AllowedHeaders string `mapstructure:"allowed_headers"`
-
- // AllowedMethods: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Methods
- AllowedMethods string `mapstructure:"allowed_methods"`
-
- // AllowCredentials https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials
- AllowCredentials *bool `mapstructure:"allow_credentials"`
-
- // ExposeHeaders: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers
- ExposedHeaders string `mapstructure:"exposed_headers"`
-
- // MaxAge of CORS headers in seconds/
- MaxAge int `mapstructure:"max_age"`
-}
diff --git a/plugins/headers/plugin.go b/plugins/headers/plugin.go
deleted file mode 100644
index 19c444df..00000000
--- a/plugins/headers/plugin.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package headers
-
-import (
- "net/http"
- "strconv"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-// PluginName contains default service name.
-const PluginName = "headers"
-const RootPluginName = "http"
-
-// Plugin serves headers files. Potentially convert into middleware?
-type Plugin struct {
- // server configuration (location, forbidden files and etc)
- cfg *Config
-}
-
-// Init must return configure service and return true if service hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (s *Plugin) Init(cfg config.Configurer) error {
- const op = errors.Op("headers_plugin_init")
- if !cfg.Has(RootPluginName) {
- return errors.E(op, errors.Disabled)
- }
- err := cfg.UnmarshalKey(RootPluginName, &s.cfg)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- if s.cfg.Headers == nil {
- return errors.E(op, errors.Disabled)
- }
-
- return nil
-}
-
-// Middleware is HTTP plugin middleware to serve headers
-func (s *Plugin) Middleware(next http.Handler) http.Handler {
- // Define the http.HandlerFunc
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if s.cfg.Headers.Request != nil {
- for k, v := range s.cfg.Headers.Request {
- r.Header.Add(k, v)
- }
- }
-
- if s.cfg.Headers.Response != nil {
- for k, v := range s.cfg.Headers.Response {
- w.Header().Set(k, v)
- }
- }
-
- if s.cfg.Headers.CORS != nil {
- if r.Method == http.MethodOptions {
- s.preflightRequest(w)
- return
- }
- s.corsHeaders(w)
- }
-
- next.ServeHTTP(w, r)
- })
-}
-
-func (s *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (s *Plugin) Available() {}
-
-// configure OPTIONS response
-func (s *Plugin) preflightRequest(w http.ResponseWriter) {
- headers := w.Header()
-
- headers.Add("Vary", "Origin")
- headers.Add("Vary", "Access-Control-Request-Method")
- headers.Add("Vary", "Access-Control-Request-Headers")
-
- if s.cfg.Headers.CORS.AllowedOrigin != "" {
- headers.Set("Access-Control-Allow-Origin", s.cfg.Headers.CORS.AllowedOrigin)
- }
-
- if s.cfg.Headers.CORS.AllowedHeaders != "" {
- headers.Set("Access-Control-Allow-Headers", s.cfg.Headers.CORS.AllowedHeaders)
- }
-
- if s.cfg.Headers.CORS.AllowedMethods != "" {
- headers.Set("Access-Control-Allow-Methods", s.cfg.Headers.CORS.AllowedMethods)
- }
-
- if s.cfg.Headers.CORS.AllowCredentials != nil {
- headers.Set("Access-Control-Allow-Credentials", strconv.FormatBool(*s.cfg.Headers.CORS.AllowCredentials))
- }
-
- if s.cfg.Headers.CORS.MaxAge > 0 {
- headers.Set("Access-Control-Max-Age", strconv.Itoa(s.cfg.Headers.CORS.MaxAge))
- }
-
- w.WriteHeader(http.StatusOK)
-}
-
-// configure CORS headers
-func (s *Plugin) corsHeaders(w http.ResponseWriter) {
- headers := w.Header()
-
- headers.Add("Vary", "Origin")
-
- if s.cfg.Headers.CORS.AllowedOrigin != "" {
- headers.Set("Access-Control-Allow-Origin", s.cfg.Headers.CORS.AllowedOrigin)
- }
-
- if s.cfg.Headers.CORS.AllowedHeaders != "" {
- headers.Set("Access-Control-Allow-Headers", s.cfg.Headers.CORS.AllowedHeaders)
- }
-
- if s.cfg.Headers.CORS.ExposedHeaders != "" {
- headers.Set("Access-Control-Expose-Headers", s.cfg.Headers.CORS.ExposedHeaders)
- }
-
- if s.cfg.Headers.CORS.AllowCredentials != nil {
- headers.Set("Access-Control-Allow-Credentials", strconv.FormatBool(*s.cfg.Headers.CORS.AllowCredentials))
- }
-}
diff --git a/plugins/http/attributes/attributes.go b/plugins/http/attributes/attributes.go
deleted file mode 100644
index 201c2d5e..00000000
--- a/plugins/http/attributes/attributes.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package attributes
-
-import (
- "context"
- "errors"
- "net/http"
-)
-
-// contextKey is a value for use with context.WithValue. It's used as
-// a pointer fits an interface{} without allocation.
-type contextKey struct {
- name string
-}
-
-func (k *contextKey) String() string { return k.name }
-
-var (
- // PsrContextKey is a context key. It can be used in the http attributes
- PsrContextKey = &contextKey{"psr_attributes"}
-)
-
-type attrs map[string]interface{}
-
-func (v attrs) get(key string) interface{} {
- if v == nil {
- return ""
- }
-
- return v[key]
-}
-
-func (v attrs) set(key string, value interface{}) {
- v[key] = value
-}
-
-func (v attrs) del(key string) {
- delete(v, key)
-}
-
-// Init is idempotent returns request with new context and attribute bag.
-func Init(r *http.Request) *http.Request {
- // do not overwrite psr attributes
- if val := r.Context().Value(PsrContextKey); val == nil {
- return r.WithContext(context.WithValue(r.Context(), PsrContextKey, attrs{}))
- }
- return r
-}
-
-// All returns all context attributes.
-func All(r *http.Request) map[string]interface{} {
- v := r.Context().Value(PsrContextKey)
- if v == nil {
- return attrs{}
- }
-
- return v.(attrs)
-}
-
-// Get gets the value from request context. It replaces any existing
-// values.
-func Get(r *http.Request, key string) interface{} {
- v := r.Context().Value(PsrContextKey)
- if v == nil {
- return nil
- }
-
- return v.(attrs).get(key)
-}
-
-// Set sets the key to value. It replaces any existing
-// values. Context specific.
-func Set(r *http.Request, key string, value interface{}) error {
- v := r.Context().Value(PsrContextKey)
- if v == nil {
- return errors.New("unable to find `psr:attributes` context key")
- }
-
- v.(attrs).set(key, value)
- return nil
-}
-
-// Delete deletes values associated with attribute key.
-func (v attrs) Delete(key string) {
- if v == nil {
- return
- }
-
- v.del(key)
-}
diff --git a/plugins/http/config/fcgi.go b/plugins/http/config/fcgi.go
deleted file mode 100644
index 3d4acbe1..00000000
--- a/plugins/http/config/fcgi.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package config
-
-// FCGI for FastCGI server.
-type FCGI struct {
- // Address and port to handle as http server.
- Address string
-}
diff --git a/plugins/http/config/http.go b/plugins/http/config/http.go
deleted file mode 100644
index f06adc49..00000000
--- a/plugins/http/config/http.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package config
-
-import (
- "net"
- "runtime"
- "strings"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/pool"
-)
-
-// HTTP configures RoadRunner HTTP server.
-type HTTP struct {
- // Host and port to handle as http server.
- Address string
-
- // InternalErrorCode used to override default 500 (InternalServerError) http code
- InternalErrorCode uint64 `mapstructure:"internal_error_code"`
-
- // SSLConfig defines https server options.
- SSLConfig *SSL `mapstructure:"ssl"`
-
- // FCGIConfig configuration. You can use FastCGI without HTTP server.
- FCGIConfig *FCGI `mapstructure:"fcgi"`
-
- // HTTP2Config configuration
- HTTP2Config *HTTP2 `mapstructure:"http2"`
-
- // MaxRequestSize specified max size for payload body in megabytes, set 0 to unlimited.
- MaxRequestSize uint64 `mapstructure:"max_request_size"`
-
- // TrustedSubnets declare IP subnets which are allowed to set ip using X-Real-Ip and X-Forwarded-For
- TrustedSubnets []string `mapstructure:"trusted_subnets"`
-
- // Uploads configures uploads configuration.
- Uploads *Uploads `mapstructure:"uploads"`
-
- // Pool configures worker pool.
- Pool *pool.Config `mapstructure:"pool"`
-
- // Env is environment variables passed to the http pool
- Env map[string]string
-
- // List of the middleware names (order will be preserved)
- Middleware []string
-
- // slice of net.IPNet
- Cidrs Cidrs
-}
-
-// EnableHTTP is true when http server must run.
-func (c *HTTP) EnableHTTP() bool {
- return c.Address != ""
-}
-
-// EnableTLS returns true if pool must listen TLS connections.
-func (c *HTTP) EnableTLS() bool {
- return c.SSLConfig.Key != "" || c.SSLConfig.Cert != ""
-}
-
-// EnableH2C when HTTP/2 extension must be enabled on TCP.
-func (c *HTTP) EnableH2C() bool {
- return c.HTTP2Config.H2C
-}
-
-// EnableFCGI is true when FastCGI server must be enabled.
-func (c *HTTP) EnableFCGI() bool {
- return c.FCGIConfig.Address != ""
-}
-
-// InitDefaults must populate HTTP values using given HTTP source. Must return error if HTTP is not valid.
-func (c *HTTP) InitDefaults() error {
- if c.Pool == nil {
- // default pool
- c.Pool = &pool.Config{
- Debug: false,
- NumWorkers: uint64(runtime.NumCPU()),
- MaxJobs: 0,
- AllocateTimeout: time.Second * 60,
- DestroyTimeout: time.Second * 60,
- Supervisor: nil,
- }
- }
-
- if c.InternalErrorCode == 0 {
- c.InternalErrorCode = 500
- }
-
- if c.HTTP2Config == nil {
- c.HTTP2Config = &HTTP2{}
- }
-
- if c.FCGIConfig == nil {
- c.FCGIConfig = &FCGI{}
- }
-
- if c.Uploads == nil {
- c.Uploads = &Uploads{}
- }
-
- if c.SSLConfig == nil {
- c.SSLConfig = &SSL{}
- }
-
- if c.SSLConfig.Address == "" {
- c.SSLConfig.Address = "127.0.0.1:443"
- }
-
- err := c.HTTP2Config.InitDefaults()
- if err != nil {
- return err
- }
- err = c.Uploads.InitDefaults()
- if err != nil {
- return err
- }
-
- if c.TrustedSubnets == nil {
- // @see https://en.wikipedia.org/wiki/Reserved_IP_addresses
- c.TrustedSubnets = []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- }
- }
-
- cidrs, err := ParseCIDRs(c.TrustedSubnets)
- if err != nil {
- return err
- }
- c.Cidrs = cidrs
-
- return c.Valid()
-}
-
-// ParseCIDRs parse IPNet addresses and return slice of its
-func ParseCIDRs(subnets []string) (Cidrs, error) {
- c := make(Cidrs, 0, len(subnets))
- for _, cidr := range subnets {
- _, cr, err := net.ParseCIDR(cidr)
- if err != nil {
- return nil, err
- }
-
- c = append(c, cr)
- }
-
- return c, nil
-}
-
-// Valid validates the configuration.
-func (c *HTTP) Valid() error {
- const op = errors.Op("validation")
- if c.Uploads == nil {
- return errors.E(op, errors.Str("malformed uploads config"))
- }
-
- if c.HTTP2Config == nil {
- return errors.E(op, errors.Str("malformed http2 config"))
- }
-
- if c.Pool == nil {
- return errors.E(op, "malformed pool config")
- }
-
- if !c.EnableHTTP() && !c.EnableTLS() && !c.EnableFCGI() {
- return errors.E(op, errors.Str("unable to run http service, no method has been specified (http, https, http/2 or FastCGI)"))
- }
-
- if c.Address != "" && !strings.Contains(c.Address, ":") {
- return errors.E(op, errors.Str("malformed http server address"))
- }
-
- if c.EnableTLS() {
- err := c.SSLConfig.Valid()
- if err != nil {
- return errors.E(op, err)
- }
- }
-
- return nil
-}
diff --git a/plugins/http/config/http2.go b/plugins/http/config/http2.go
deleted file mode 100644
index b1e109e9..00000000
--- a/plugins/http/config/http2.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package config
-
-// HTTP2 HTTP/2 server customizations.
-type HTTP2 struct {
- // h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
- // that should be h2c traffic. There are two ways to begin a h2c connection
- // (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
- // works by starting an h2c connection with a string of bytes that is valid
- // HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
- // h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
- // h2c. When either of those situations occur we hijack the HTTP/1 connection,
- // convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
-
- // H2C enables HTTP/2 over TCP
- H2C bool
-
- // MaxConcurrentStreams defaults to 128.
- MaxConcurrentStreams uint32 `mapstructure:"max_concurrent_streams"`
-}
-
-// InitDefaults sets default values for HTTP/2 configuration.
-func (cfg *HTTP2) InitDefaults() error {
- if cfg.MaxConcurrentStreams == 0 {
- cfg.MaxConcurrentStreams = 128
- }
-
- return nil
-}
diff --git a/plugins/http/config/ip.go b/plugins/http/config/ip.go
deleted file mode 100644
index c4981f74..00000000
--- a/plugins/http/config/ip.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package config
-
-import "net"
-
-// Cidrs is a slice of IPNet addresses
-type Cidrs []*net.IPNet
-
-// IsTrusted checks if the ip address exists in the provided in the config addresses
-func (c *Cidrs) IsTrusted(ip string) bool {
- if len(*c) == 0 {
- return false
- }
-
- i := net.ParseIP(ip)
- if i == nil {
- return false
- }
-
- for _, cird := range *c {
- if cird.Contains(i) {
- return true
- }
- }
-
- return false
-}
diff --git a/plugins/http/config/ssl.go b/plugins/http/config/ssl.go
deleted file mode 100644
index 0e3c0caf..00000000
--- a/plugins/http/config/ssl.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package config
-
-import (
- "os"
- "strconv"
- "strings"
-
- "github.com/spiral/errors"
-)
-
-// SSL defines https server configuration.
-type SSL struct {
- // Address to listen as HTTPS server, defaults to 0.0.0.0:443.
- Address string
-
- // Redirect when enabled forces all http connections to switch to https.
- Redirect bool
-
- // Key defined private server key.
- Key string
-
- // Cert is https certificate.
- Cert string
-
- // Root CA file
- RootCA string `mapstructure:"root_ca"`
-
- // internal
- host string
- Port int
-}
-
-func (s *SSL) Valid() error {
- const op = errors.Op("ssl_valid")
-
- parts := strings.Split(s.Address, ":")
- switch len(parts) {
- // :443 form
- // 127.0.0.1:443 form
- // use 0.0.0.0 as host and 443 as port
- case 2:
- if parts[0] == "" {
- s.host = "127.0.0.1"
- } else {
- s.host = parts[0]
- }
-
- port, err := strconv.Atoi(parts[1])
- if err != nil {
- return errors.E(op, err)
- }
- s.Port = port
- default:
- return errors.E(op, errors.Errorf("unknown format, accepted format is [:<port> or <host>:<port>], provided: %s", s.Address))
- }
-
- if _, err := os.Stat(s.Key); err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("key file '%s' does not exists", s.Key))
- }
-
- return err
- }
-
- if _, err := os.Stat(s.Cert); err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("cert file '%s' does not exists", s.Cert))
- }
-
- return err
- }
-
- // RootCA is optional, but if provided - check it
- if s.RootCA != "" {
- if _, err := os.Stat(s.RootCA); err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("root ca path provided, but path '%s' does not exists", s.RootCA))
- }
- return err
- }
- }
-
- return nil
-}
diff --git a/plugins/http/config/ssl_config_test.go b/plugins/http/config/ssl_config_test.go
deleted file mode 100644
index 8f6cf40e..00000000
--- a/plugins/http/config/ssl_config_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package config
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestSSL_Valid1(t *testing.T) {
- conf := &SSL{
- Address: "",
- Redirect: false,
- Key: "",
- Cert: "",
- RootCA: "",
- host: "",
- Port: 0,
- }
-
- err := conf.Valid()
- assert.Error(t, err)
-}
-
-func TestSSL_Valid2(t *testing.T) {
- conf := &SSL{
- Address: ":hello",
- Redirect: false,
- Key: "",
- Cert: "",
- RootCA: "",
- host: "",
- Port: 0,
- }
-
- err := conf.Valid()
- assert.Error(t, err)
-}
-
-func TestSSL_Valid3(t *testing.T) {
- conf := &SSL{
- Address: ":555",
- Redirect: false,
- Key: "",
- Cert: "",
- RootCA: "",
- host: "",
- Port: 0,
- }
-
- err := conf.Valid()
- assert.Error(t, err)
-}
-
-func TestSSL_Valid4(t *testing.T) {
- conf := &SSL{
- Address: ":555",
- Redirect: false,
- Key: "../../../tests/plugins/http/fixtures/server.key",
- Cert: "../../../tests/plugins/http/fixtures/server.crt",
- RootCA: "",
- host: "",
- // private
- Port: 0,
- }
-
- err := conf.Valid()
- assert.NoError(t, err)
-}
-
-func TestSSL_Valid5(t *testing.T) {
- conf := &SSL{
- Address: "a:b:c",
- Redirect: false,
- Key: "../../../tests/plugins/http/fixtures/server.key",
- Cert: "../../../tests/plugins/http/fixtures/server.crt",
- RootCA: "",
- host: "",
- // private
- Port: 0,
- }
-
- err := conf.Valid()
- assert.Error(t, err)
-}
-
-func TestSSL_Valid6(t *testing.T) {
- conf := &SSL{
- Address: ":",
- Redirect: false,
- Key: "../../../tests/plugins/http/fixtures/server.key",
- Cert: "../../../tests/plugins/http/fixtures/server.crt",
- RootCA: "",
- host: "",
- // private
- Port: 0,
- }
-
- err := conf.Valid()
- assert.Error(t, err)
-}
-
-func TestSSL_Valid7(t *testing.T) {
- conf := &SSL{
- Address: "127.0.0.1:555:1",
- Redirect: false,
- Key: "../../../tests/plugins/http/fixtures/server.key",
- Cert: "../../../tests/plugins/http/fixtures/server.crt",
- RootCA: "",
- host: "",
- // private
- Port: 0,
- }
-
- err := conf.Valid()
- assert.Error(t, err)
-}
diff --git a/plugins/http/config/uploads_config.go b/plugins/http/config/uploads_config.go
deleted file mode 100644
index 5edb0ab7..00000000
--- a/plugins/http/config/uploads_config.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package config
-
-import (
- "os"
- "path"
- "strings"
-)
-
-// Uploads describes file location and controls access to them.
-type Uploads struct {
- // Dir contains name of directory to control access to.
- Dir string
-
- // Forbid specifies list of file extensions which are forbidden for access.
- // Example: .php, .exe, .bat, .htaccess and etc.
- Forbid []string
-}
-
-// InitDefaults sets missing values to their default values.
-func (cfg *Uploads) InitDefaults() error {
- cfg.Forbid = []string{".php", ".exe", ".bat"}
- cfg.Dir = os.TempDir()
- return nil
-}
-
-// TmpDir returns temporary directory.
-func (cfg *Uploads) TmpDir() string {
- if cfg.Dir != "" {
- return cfg.Dir
- }
-
- return os.TempDir()
-}
-
-// Forbids must return true if file extension is not allowed for the upload.
-func (cfg *Uploads) Forbids(filename string) bool {
- ext := strings.ToLower(path.Ext(filename))
-
- for _, v := range cfg.Forbid {
- if ext == v {
- return true
- }
- }
-
- return false
-}
diff --git a/plugins/http/metrics.go b/plugins/http/metrics.go
deleted file mode 100644
index d7a9110b..00000000
--- a/plugins/http/metrics.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package http
-
-import (
- "strconv"
-
- "github.com/prometheus/client_golang/prometheus"
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
-)
-
-func (p *Plugin) MetricsCollector() []prometheus.Collector {
- // p - implements Exporter interface (workers)
- // other - request duration and count
- return []prometheus.Collector{p, p.requestsExporter.requestDuration, p.requestsExporter.requestCounter}
-}
-
-func (p *Plugin) metricsCallback(event interface{}) {
- switch e := event.(type) {
- case handler.ResponseEvent:
- p.requestsExporter.requestCounter.With(prometheus.Labels{
- "status": strconv.Itoa(e.Response.Status),
- }).Inc()
-
- p.requestsExporter.requestDuration.With(prometheus.Labels{
- "status": strconv.Itoa(e.Response.Status),
- }).Observe(e.Elapsed().Seconds())
- case handler.ErrorEvent:
- p.requestsExporter.requestCounter.With(prometheus.Labels{
- "status": "500",
- }).Inc()
-
- p.requestsExporter.requestDuration.With(prometheus.Labels{
- "status": "500",
- }).Observe(e.Elapsed().Seconds())
- }
-}
-
-type workersExporter struct {
- wm *prometheus.Desc
- workersMemory uint64
-}
-
-func newWorkersExporter() *workersExporter {
- return &workersExporter{
- wm: prometheus.NewDesc("rr_http_workers_memory_bytes", "Memory usage by HTTP workers.", nil, nil),
- workersMemory: 0,
- }
-}
-
-func (p *Plugin) Describe(d chan<- *prometheus.Desc) {
- // send description
- d <- p.workersExporter.wm
-}
-
-func (p *Plugin) Collect(ch chan<- prometheus.Metric) {
- // get the copy of the processes
- workers := p.Workers()
-
- // cumulative RSS memory in bytes
- var cum uint64
-
- // collect the memory
- for i := 0; i < len(workers); i++ {
- cum += workers[i].MemoryUsage
- }
-
- // send the values to the prometheus
- ch <- prometheus.MustNewConstMetric(p.workersExporter.wm, prometheus.GaugeValue, float64(cum))
-}
-
-type requestsExporter struct {
- requestCounter *prometheus.CounterVec
- requestDuration *prometheus.HistogramVec
-}
-
-func newRequestsExporter() *requestsExporter {
- return &requestsExporter{
- requestCounter: prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Name: "rr_http_request_total",
- Help: "Total number of handled http requests after server restart.",
- },
- []string{"status"},
- ),
- requestDuration: prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Name: "rr_http_request_duration_seconds",
- Help: "HTTP request duration.",
- },
- []string{"status"},
- ),
- }
-}
diff --git a/plugins/http/plugin.go b/plugins/http/plugin.go
deleted file mode 100644
index dc887f87..00000000
--- a/plugins/http/plugin.go
+++ /dev/null
@@ -1,412 +0,0 @@
-package http
-
-import (
- "context"
- "fmt"
- "log"
- "net/http"
- "sync"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/http/attributes"
- httpConfig "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/status"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/h2c"
-)
-
-const (
- // PluginName declares plugin name.
- PluginName = "http"
-
- // RrMode RR_HTTP env variable key (internal) if the HTTP presents
- RrMode = "RR_MODE"
-
- HTTPSScheme = "https"
-)
-
-// Middleware interface
-type Middleware interface {
- Middleware(f http.Handler) http.Handler
-}
-
-type middleware map[string]Middleware
-
-// Plugin manages pool, http servers. The main http plugin structure
-type Plugin struct {
- sync.RWMutex
-
- // plugins
- server server.Server
- log logger.Logger
- // stdlog passed to the http/https/fcgi servers to log their internal messages
- stdLog *log.Logger
-
- // http configuration
- cfg *httpConfig.HTTP `mapstructure:"http"`
-
- // middlewares to chain
- mdwr middleware
-
- // Pool which attached to all servers
- pool pool.Pool
-
- // servers RR handler
- handler *handler.Handler
-
- // metrics
- workersExporter *workersExporter
- requestsExporter *requestsExporter
-
- // servers
- http *http.Server
- https *http.Server
- fcgi *http.Server
-}
-
-// Init must return configure svc and return true if svc hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (p *Plugin) Init(cfg config.Configurer, rrLogger logger.Logger, server server.Server) error {
- const op = errors.Op("http_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &p.cfg)
- if err != nil {
- return errors.E(op, err)
- }
-
- err = p.cfg.InitDefaults()
- if err != nil {
- return errors.E(op, err)
- }
-
- // rr logger (via plugin)
- p.log = rrLogger
- // use time and date in UTC format
- p.stdLog = log.New(logger.NewStdAdapter(p.log), "http_plugin: ", log.Ldate|log.Ltime|log.LUTC)
-
- p.mdwr = make(map[string]Middleware)
-
- if !p.cfg.EnableHTTP() && !p.cfg.EnableTLS() && !p.cfg.EnableFCGI() {
- return errors.E(op, errors.Disabled)
- }
-
- // init if nil
- if p.cfg.Env == nil {
- p.cfg.Env = make(map[string]string)
- }
-
- // initialize workersExporter
- p.workersExporter = newWorkersExporter()
- // initialize requests exporter
- p.requestsExporter = newRequestsExporter()
-
- p.cfg.Env[RrMode] = "http"
- p.server = server
-
- return nil
-}
-
-func (p *Plugin) logCallback(event interface{}) {
- if ev, ok := event.(handler.ResponseEvent); ok {
- p.log.Debug(fmt.Sprintf("%d %s %s", ev.Response.Status, ev.Request.Method, ev.Request.URI),
- "remote", ev.Request.RemoteAddr,
- "elapsed", ev.Elapsed().String(),
- )
- }
-}
-
-// Serve serves the svc.
-func (p *Plugin) Serve() chan error {
- errCh := make(chan error, 2)
- // run whole process in the goroutine
- go func() {
- // protect http initialization
- p.Lock()
- p.serve(errCh)
- p.Unlock()
- }()
-
- return errCh
-}
-
-func (p *Plugin) serve(errCh chan error) {
- var err error
- const op = errors.Op("http_plugin_serve")
- p.pool, err = p.server.NewWorkerPool(context.Background(), &pool.Config{
- Debug: p.cfg.Pool.Debug,
- NumWorkers: p.cfg.Pool.NumWorkers,
- MaxJobs: p.cfg.Pool.MaxJobs,
- AllocateTimeout: p.cfg.Pool.AllocateTimeout,
- DestroyTimeout: p.cfg.Pool.DestroyTimeout,
- Supervisor: p.cfg.Pool.Supervisor,
- }, p.cfg.Env, p.logCallback)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
-
- p.handler, err = handler.NewHandler(
- p.cfg.MaxRequestSize,
- p.cfg.InternalErrorCode,
- *p.cfg.Uploads,
- p.cfg.Cidrs,
- p.pool,
- )
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
-
- p.handler.AddListener(p.logCallback, p.metricsCallback)
-
- if p.cfg.EnableHTTP() {
- if p.cfg.EnableH2C() {
- p.http = &http.Server{
- Handler: h2c.NewHandler(p, &http2.Server{}),
- ErrorLog: p.stdLog,
- }
- } else {
- p.http = &http.Server{
- Handler: p,
- ErrorLog: p.stdLog,
- }
- }
- }
-
- if p.cfg.EnableTLS() {
- p.https = p.initSSL()
- if p.cfg.SSLConfig.RootCA != "" {
- err = p.appendRootCa()
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
- }
-
- // if HTTP2Config not nil
- if p.cfg.HTTP2Config != nil {
- if err := p.initHTTP2(); err != nil {
- errCh <- errors.E(op, err)
- return
- }
- }
- }
-
- if p.cfg.EnableFCGI() {
- p.fcgi = &http.Server{Handler: p, ErrorLog: p.stdLog}
- }
-
- // start http, https and fcgi servers if requested in the config
- go func() {
- p.serveHTTP(errCh)
- }()
-
- go func() {
- p.serveHTTPS(errCh)
- }()
-
- go func() {
- p.serveFCGI(errCh)
- }()
-}
-
-// Stop stops the http.
-func (p *Plugin) Stop() error {
- p.Lock()
- defer p.Unlock()
-
- if p.fcgi != nil {
- err := p.fcgi.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- p.log.Error("fcgi shutdown", "error", err)
- }
- }
-
- if p.https != nil {
- err := p.https.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- p.log.Error("https shutdown", "error", err)
- }
- }
-
- if p.http != nil {
- err := p.http.Shutdown(context.Background())
- if err != nil && err != http.ErrServerClosed {
- p.log.Error("http shutdown", "error", err)
- }
- }
-
- // check for safety
- if p.pool != nil {
- p.pool.Destroy(context.Background())
- }
-
- return nil
-}
-
-// ServeHTTP handles connection using set of middleware and pool PSR-7 server.
-func (p *Plugin) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- defer func() {
- err := r.Body.Close()
- if err != nil {
- p.log.Error("body close", "error", err)
- }
- }()
- if headerContainsUpgrade(r) {
- http.Error(w, "server does not support upgrade header", http.StatusInternalServerError)
- return
- }
-
- if p.https != nil && r.TLS == nil && p.cfg.SSLConfig.Redirect {
- p.redirect(w, r)
- return
- }
-
- if p.https != nil && r.TLS != nil {
- w.Header().Add("Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload")
- }
-
- r = attributes.Init(r)
- // protect the case, when user sendEvent Reset and we are replacing handler with pool
- p.RLock()
- p.handler.ServeHTTP(w, r)
- p.RUnlock()
-}
-
-// Workers returns slice with the process states for the workers
-func (p *Plugin) Workers() []*process.State {
- p.RLock()
- defer p.RUnlock()
-
- workers := p.workers()
-
- ps := make([]*process.State, 0, len(workers))
- for i := 0; i < len(workers); i++ {
- state, err := process.WorkerProcessState(workers[i])
- if err != nil {
- return nil
- }
- ps = append(ps, state)
- }
-
- return ps
-}
-
-// internal
-func (p *Plugin) workers() []worker.BaseProcess {
- return p.pool.Workers()
-}
-
-// Name returns endure.Named interface implementation
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// Reset destroys the old pool and replaces it with new one, waiting for old pool to die
-func (p *Plugin) Reset() error {
- p.Lock()
- defer p.Unlock()
- const op = errors.Op("http_plugin_reset")
- p.log.Info("HTTP plugin got restart request. Restarting...")
- p.pool.Destroy(context.Background())
- p.pool = nil
-
- var err error
- p.pool, err = p.server.NewWorkerPool(context.Background(), &pool.Config{
- Debug: p.cfg.Pool.Debug,
- NumWorkers: p.cfg.Pool.NumWorkers,
- MaxJobs: p.cfg.Pool.MaxJobs,
- AllocateTimeout: p.cfg.Pool.AllocateTimeout,
- DestroyTimeout: p.cfg.Pool.DestroyTimeout,
- Supervisor: p.cfg.Pool.Supervisor,
- }, p.cfg.Env, p.logCallback)
- if err != nil {
- return errors.E(op, err)
- }
-
- p.log.Info("HTTP workers Pool successfully restarted")
-
- p.handler, err = handler.NewHandler(
- p.cfg.MaxRequestSize,
- p.cfg.InternalErrorCode,
- *p.cfg.Uploads,
- p.cfg.Cidrs,
- p.pool,
- )
-
- if err != nil {
- return errors.E(op, err)
- }
-
- p.log.Info("HTTP handler listeners successfully re-added")
- p.handler.AddListener(p.logCallback, p.metricsCallback)
-
- p.log.Info("HTTP plugin successfully restarted")
- return nil
-}
-
-// Collects collecting http middlewares
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.AddMiddleware,
- }
-}
-
-// AddMiddleware is base requirement for the middleware (name and Middleware)
-func (p *Plugin) AddMiddleware(name endure.Named, m Middleware) {
- p.mdwr[name.Name()] = m
-}
-
-// Status return status of the particular plugin
-func (p *Plugin) Status() status.Status {
- p.RLock()
- defer p.RUnlock()
-
- workers := p.workers()
- for i := 0; i < len(workers); i++ {
- if workers[i].State().IsActive() {
- return status.Status{
- Code: http.StatusOK,
- }
- }
- }
- // if there are no workers, threat this as error
- return status.Status{
- Code: http.StatusServiceUnavailable,
- }
-}
-
-// Ready return readiness status of the particular plugin
-func (p *Plugin) Ready() status.Status {
- p.RLock()
- defer p.RUnlock()
-
- workers := p.workers()
- for i := 0; i < len(workers); i++ {
- // If state of the worker is ready (at least 1)
- // we assume, that plugin's worker pool is ready
- if workers[i].State().Value() == worker.StateReady {
- return status.Status{
- Code: http.StatusOK,
- }
- }
- }
- // if there are no workers, threat this as no content error
- return status.Status{
- Code: http.StatusServiceUnavailable,
- }
-}
-
-// Available interface implementation
-func (p *Plugin) Available() {}
diff --git a/plugins/http/serve.go b/plugins/http/serve.go
deleted file mode 100644
index 6d3f2228..00000000
--- a/plugins/http/serve.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package http
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "net/http"
- "net/http/fcgi"
- "net/url"
- "os"
- "strings"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
- "golang.org/x/net/http2"
- "golang.org/x/sys/cpu"
-)
-
-func (p *Plugin) serveHTTP(errCh chan error) {
- if p.http == nil {
- return
- }
- const op = errors.Op("serveHTTP")
-
- if len(p.mdwr) > 0 {
- applyMiddlewares(p.http, p.mdwr, p.cfg.Middleware, p.log)
- }
- l, err := utils.CreateListener(p.cfg.Address)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
-
- err = p.http.Serve(l)
- if err != nil && err != http.ErrServerClosed {
- errCh <- errors.E(op, err)
- return
- }
-}
-
-func (p *Plugin) serveHTTPS(errCh chan error) {
- if p.https == nil {
- return
- }
- const op = errors.Op("serveHTTPS")
- if len(p.mdwr) > 0 {
- applyMiddlewares(p.https, p.mdwr, p.cfg.Middleware, p.log)
- }
- l, err := utils.CreateListener(p.cfg.SSLConfig.Address)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
-
- err = p.https.ServeTLS(
- l,
- p.cfg.SSLConfig.Cert,
- p.cfg.SSLConfig.Key,
- )
-
- if err != nil && err != http.ErrServerClosed {
- errCh <- errors.E(op, err)
- return
- }
-}
-
-// serveFCGI starts FastCGI server.
-func (p *Plugin) serveFCGI(errCh chan error) {
- if p.fcgi == nil {
- return
- }
- const op = errors.Op("serveFCGI")
-
- if len(p.mdwr) > 0 {
- applyMiddlewares(p.fcgi, p.mdwr, p.cfg.Middleware, p.log)
- }
-
- l, err := utils.CreateListener(p.cfg.FCGIConfig.Address)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
-
- err = fcgi.Serve(l, p.fcgi.Handler)
- if err != nil && err != http.ErrServerClosed {
- errCh <- errors.E(op, err)
- return
- }
-}
-
-func (p *Plugin) redirect(w http.ResponseWriter, r *http.Request) {
- target := &url.URL{
- Scheme: HTTPSScheme,
- // host or host:port
- Host: p.tlsAddr(r.Host, false),
- Path: r.URL.Path,
- RawQuery: r.URL.RawQuery,
- }
-
- http.Redirect(w, r, target.String(), http.StatusPermanentRedirect)
-}
-
-// https://golang.org/pkg/net/http/#Hijacker
-//go:inline
-func headerContainsUpgrade(r *http.Request) bool {
- if _, ok := r.Header["Upgrade"]; ok {
- return true
- }
- return false
-}
-
-// append RootCA to the https server TLS config
-func (p *Plugin) appendRootCa() error {
- const op = errors.Op("http_plugin_append_root_ca")
- rootCAs, err := x509.SystemCertPool()
- if err != nil {
- return nil
- }
- if rootCAs == nil {
- rootCAs = x509.NewCertPool()
- }
-
- CA, err := os.ReadFile(p.cfg.SSLConfig.RootCA)
- if err != nil {
- return err
- }
-
- // should append our CA cert
- ok := rootCAs.AppendCertsFromPEM(CA)
- if !ok {
- return errors.E(op, errors.Str("could not append Certs from PEM"))
- }
- // disable "G402 (CWE-295): TLS MinVersion too low. (Confidence: HIGH, Severity: HIGH)"
- // #nosec G402
- cfg := &tls.Config{
- InsecureSkipVerify: false,
- RootCAs: rootCAs,
- }
- p.http.TLSConfig = cfg
-
- return nil
-}
-
-// Init https server
-func (p *Plugin) initSSL() *http.Server {
- var topCipherSuites []uint16
- var defaultCipherSuitesTLS13 []uint16
-
- hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
-
- if hasGCMAsm {
- // If AES-GCM hardware is provided then priorities AES-GCM
- // cipher suites.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- } else {
- // Without AES-GCM hardware, we put the ChaCha20-Poly1305
- // cipher suites first.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- }
-
- DefaultCipherSuites := make([]uint16, 0, 22)
- DefaultCipherSuites = append(DefaultCipherSuites, topCipherSuites...)
- DefaultCipherSuites = append(DefaultCipherSuites, defaultCipherSuitesTLS13...)
-
- sslServer := &http.Server{
- Addr: p.tlsAddr(p.cfg.Address, true),
- Handler: p,
- ErrorLog: p.stdLog,
- TLSConfig: &tls.Config{
- CurvePreferences: []tls.CurveID{
- tls.CurveP256,
- tls.CurveP384,
- tls.CurveP521,
- tls.X25519,
- },
- CipherSuites: DefaultCipherSuites,
- MinVersion: tls.VersionTLS12,
- PreferServerCipherSuites: true,
- },
- }
-
- return sslServer
-}
-
-// init http/2 server
-func (p *Plugin) initHTTP2() error {
- return http2.ConfigureServer(p.https, &http2.Server{
- MaxConcurrentStreams: p.cfg.HTTP2Config.MaxConcurrentStreams,
- })
-}
-
-// tlsAddr replaces listen or host port with port configured by SSLConfig config.
-func (p *Plugin) tlsAddr(host string, forcePort bool) string {
- // remove current forcePort first
- host = strings.Split(host, ":")[0]
-
- if forcePort || p.cfg.SSLConfig.Port != 443 {
- host = fmt.Sprintf("%s:%v", host, p.cfg.SSLConfig.Port)
- }
-
- return host
-}
-
-// static plugin name
-const static string = "static"
-
-func applyMiddlewares(server *http.Server, middlewares map[string]Middleware, order []string, log logger.Logger) {
- for i := len(order) - 1; i >= 0; i-- {
- // set static last in the row
- if order[i] == static {
- continue
- }
- if mdwr, ok := middlewares[order[i]]; ok {
- server.Handler = mdwr.Middleware(server.Handler)
- } else {
- log.Warn("requested middleware does not exist", "requested", order[i])
- }
- }
-
- // set static if exists
- if mdwr, ok := middlewares[static]; ok {
- server.Handler = mdwr.Middleware(server.Handler)
- }
-}
diff --git a/plugins/informer/interface.go b/plugins/informer/interface.go
deleted file mode 100644
index 9277b85b..00000000
--- a/plugins/informer/interface.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package informer
-
-import (
- "context"
-
- "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
-)
-
-/*
-Informer plugin should not receive any other plugin in the Init or via Collects
-Because Availabler implementation should present in every plugin
-*/
-
-// Statistic interfaces ==============
-
-// Informer used to get workers from particular plugin or set of plugins
-type Informer interface {
- Workers() []*process.State
-}
-
-// JobsStat interface provide statistic for the jobs plugin
-type JobsStat interface {
- // JobsState returns slice with the attached drivers information
- JobsState(ctx context.Context) ([]*job.State, error)
-}
-
-// Statistic interfaces end ============
-
-// Availabler interface should be implemented by every plugin which wish to report to the PHP worker that it available in the RR runtime
-type Availabler interface {
- // Available method needed to collect all plugins which are available in the runtime.
- Available()
-}
diff --git a/plugins/informer/plugin.go b/plugins/informer/plugin.go
deleted file mode 100644
index 87180be5..00000000
--- a/plugins/informer/plugin.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package informer
-
-import (
- "context"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const PluginName = "informer"
-
-type Plugin struct {
- log logger.Logger
-
- withJobs map[string]JobsStat
- withWorkers map[string]Informer
- available map[string]Availabler
-}
-
-func (p *Plugin) Init(log logger.Logger) error {
- p.available = make(map[string]Availabler)
- p.withWorkers = make(map[string]Informer)
- p.withJobs = make(map[string]JobsStat)
-
- p.log = log
- return nil
-}
-
-// Workers provides BaseProcess slice with workers for the requested plugin
-func (p *Plugin) Workers(name string) []*process.State {
- svc, ok := p.withWorkers[name]
- if !ok {
- return nil
- }
-
- return svc.Workers()
-}
-
-// Jobs provides information about jobs for the registered plugin using jobs
-func (p *Plugin) Jobs(name string) []*job.State {
- svc, ok := p.withJobs[name]
- if !ok {
- return nil
- }
-
- st, err := svc.JobsState(context.Background())
- if err != nil {
- p.log.Info("jobs stat", "error", err)
- // skip errors here
- return nil
- }
-
- return st
-}
-
-// Collects declares services to be collected.
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.CollectPlugins,
- p.CollectWorkers,
- p.CollectJobs,
- }
-}
-
-// CollectPlugins collects all RR plugins
-func (p *Plugin) CollectPlugins(name endure.Named, l Availabler) {
- p.available[name.Name()] = l
-}
-
-// CollectWorkers obtains plugins with workers inside.
-func (p *Plugin) CollectWorkers(name endure.Named, r Informer) {
- p.withWorkers[name.Name()] = r
-}
-
-func (p *Plugin) CollectJobs(name endure.Named, j JobsStat) {
- p.withJobs[name.Name()] = j
-}
-
-// Name of the service.
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// RPC returns associated rpc service.
-func (p *Plugin) RPC() interface{} {
- return &rpc{srv: p}
-}
diff --git a/plugins/informer/rpc.go b/plugins/informer/rpc.go
deleted file mode 100644
index 478d3227..00000000
--- a/plugins/informer/rpc.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package informer
-
-import (
- "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
-)
-
-type rpc struct {
- srv *Plugin
-}
-
-// WorkerList contains list of workers.
-type WorkerList struct {
- // Workers are list of workers.
- Workers []*process.State `json:"workers"`
-}
-
-// List all resettable services.
-func (rpc *rpc) List(_ bool, list *[]string) error {
- *list = make([]string, 0, len(rpc.srv.withWorkers))
-
- // append all plugin names to the output result
- for name := range rpc.srv.available {
- *list = append(*list, name)
- }
- return nil
-}
-
-// Workers state of a given service.
-func (rpc *rpc) Workers(service string, list *WorkerList) error {
- workers := rpc.srv.Workers(service)
- if workers == nil {
- return nil
- }
-
- // write actual processes
- list.Workers = workers
-
- return nil
-}
-
-func (rpc *rpc) Jobs(service string, out *[]*job.State) error {
- *out = rpc.srv.Jobs(service)
- return nil
-}
-
-// sort.Sort
-
-func (w *WorkerList) Len() int {
- return len(w.Workers)
-}
-
-func (w *WorkerList) Less(i, j int) bool {
- return w.Workers[i].Pid < w.Workers[j].Pid
-}
-
-func (w *WorkerList) Swap(i, j int) {
- w.Workers[i], w.Workers[j] = w.Workers[j], w.Workers[i]
-}
diff --git a/plugins/jobs/config.go b/plugins/jobs/config.go
deleted file mode 100644
index 454256b9..00000000
--- a/plugins/jobs/config.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package jobs
-
-import (
- "runtime"
-
- poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
-)
-
-const (
- // name used to set pipeline name
- pipelineName string = "name"
-)
-
-// Config defines settings for job broker, workers and job-pipeline mapping.
-type Config struct {
- // NumPollers configures number of priority queue pollers
- // Should be no more than 255
- // Default - num logical cores
- NumPollers uint8 `mapstructure:"num_pollers"`
-
- // PipelineSize is the limit of a main jobs queue which consume Items from the drivers pipeline
- // Driver pipeline might be much larger than a main jobs queue
- PipelineSize uint64 `mapstructure:"pipeline_size"`
-
- // Timeout in seconds is the per-push limit to put the job into queue
- Timeout int `mapstructure:"timeout"`
-
- // Pool configures roadrunner workers pool.
- Pool *poolImpl.Config `mapstructure:"Pool"`
-
- // Pipelines defines mapping between PHP job pipeline and associated job broker.
- Pipelines map[string]*pipeline.Pipeline `mapstructure:"pipelines"`
-
- // Consuming specifies names of pipelines to be consumed on service start.
- Consume []string `mapstructure:"consume"`
-}
-
-func (c *Config) InitDefaults() {
- if c.Pool == nil {
- c.Pool = &poolImpl.Config{}
- }
-
- if c.PipelineSize == 0 {
- c.PipelineSize = 1_000_000
- }
-
- if c.NumPollers == 0 {
- c.NumPollers = uint8(runtime.NumCPU())
- }
-
- for k := range c.Pipelines {
- // set the pipeline name
- c.Pipelines[k].With(pipelineName, k)
- }
-
- if c.Timeout == 0 {
- c.Timeout = 60
- }
-
- c.Pool.InitDefaults()
-}
diff --git a/plugins/jobs/doc/jobs_arch.drawio b/plugins/jobs/doc/jobs_arch.drawio
deleted file mode 100644
index 824e1a83..00000000
--- a/plugins/jobs/doc/jobs_arch.drawio
+++ /dev/null
@@ -1 +0,0 @@
-<mxfile host="Electron" modified="2021-08-21T12:35:37.051Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.6.13 Chrome/91.0.4472.164 Electron/13.2.1 Safari/537.36" etag="L_bYn0v_jW4MOvWLd2St" version="14.6.13" type="device"><diagram id="AFQlLRRq6yGg9IpTzkrs" name="Page-1">7R1pc5s4+9d4Jt2ZZJDE+TFXs23Txo2Tttkv72AjxzTYuICTuL/+lYTAIMkOjgE7R3a2iQXI4rkvPeqg4/HjWeROR19DDwcdqHmPHXTSgdBxoEF+0ZF5OmLYppaO3Ea+l46BxUDP/4v5YHbbzPdwXLoxCcMg8aflwUE4meBBUhpzoyh8KN82DIPyt07dWywN9AZuII/+9L1klK2O/Cyu/Iv921EiXRq72f18IB65XvhQGEKnHXQchWGS/jV+PMYBBWAGmvS5j0uu5muL8CSp8kCvG5s3x1/n7udQH4Hbb99O76J9nS/u3g1m/KUvLy6uyMjni6Me+dU9vz779I2/QTLPIBOFs4mH6cxaBx09jPwE96bugF59IMRAxkbJOCCfAPlzGE6Sj+7YDygd9D6Smb6Gk5BccAP/dkLGohSCR4Hbx0E3jP3ED+l4gId0+B5HiU+wci5cTsJp4eohn6wfJkk4pl/rB8FxGIQRWzIaDrE5GJDxOInCO1y44llOX6PvIYOUQ5l+B34sDHEQn+FwjJNoTm7hV5GG4AGne072wKCf6MDDgoigqadjoyL56A4nXk64t/n8C8SSPzhu1Xj+NZk8/gvIc/d/k4l19h3/pw32oYRB7BFS5x/DKBmFt+HEDU4Xo0dlHC/uOQ8p3Blmf+MkmXO+dWdJWMY7gWI0/0WfJxDhH2+K104e+eTppzn/tJReluInDmfRAK96fw6AxI1ucbLiRj4hhc5KdEc4cBP/viwoVGjjj3ZDn6w5JxPHhiUasTQ7o5psknSp/LkiX4tTWVZ5KihNlYJHmuowitx54bYpvSGWaC1/6+eTH9oK+T36SYH6yKebjNzI3wvaox/mRULcDZIFFSlWb4liBTLTnaoUWxcRZbK4oKq6ZCZtn6qpWTwik9JfR24yGG2osMqawzOw7ekqzWHDPjJNjv/CuMZ+nkUX1VWNLWDEMHKMFBVNZpAUFY1uN6RnwDunr8/pelXdZLbF6uaB7ZiGASyoA2Q5JTJDRoOMf/G3i76M0P/unX0TJWc9jZifGeNviaaKFJXTl5qmRIvTHmC1xdm3Dd1YaXHWRjSZ5dM40WiCGQJgRTJZboaov8kyBU1k6fyblplI4hOLtS0oNV1FrQpLlxTWGaarmvpTHPgTXKuSwoCoKUtFbI5pIXdbSsoBArKUSgpAhZJCqCElZUh46UAzoJ7mtIQP88+MOudHY0K3PnEtD8lVbfpI/mWw0dLxfeaG0mt64RoBT7LP/Vt6bUBAiKPyZQ8PwshNnVl6D0V+lBJG9tXkr1v+my2wnw18DvsELIe9JJoNklmEsxsIQPriQ2RsKo6NInHkWe+eeuj0ok0vissmq2RPnLC5I39ym79I/v1ddx6ErrfkPtXam1jpJw8zibF6rRdTiq04vy//rFqswNxinCKNXmQhEB7qCMldw4CFiSh/c+bkSgrAVcy6XDQMDfqfSjSY7EcWDelPPRIg5+15ZpAimf9NBf8DqDckAMxXIAAy0tt5GdDNtd1WGbwb+WHkJ/N8GTNiItAgnMjjJ8QOmi9Z7OKuwyTB42myEAXM4Gj3lS4pM5SXW1hFX8JuW+u68sc4nCXPWdS7zFTKTGRrFWUmakpmZnbcu2e/hpOWeexPu/b2Lrj20Gw9pier4tRHGhDdNhszhbnnReSFow/s7W/9mKhRTM3FYUivJiNccqm0/rz8eeKO34SnZTvllAKCSk/LUkgNszGh4bwLjfWFhl1RaGRKYrtCAzQZD1wJoILQ+IrJN5Kh38zhDHM37cEnhC/IjYJs4DceULTxGcJJQEFGjbu5PGGNYmQ4HMIl+WizbxpbEyNGOWADgTKroKvEiNaQGIFylQLN9OxRTHbgcYomqh+IJCBfT/4/oiCNasWX52J7qMSXObBxf7gjYh9osKrYN8ym8CWn6pYghLrBZahnxj33l+X6jrHveamewLH/1+2zqSiUedyYzGscdYwTOhdRDTH3AyTETcIJbhg3jiNUgtiaAjeGobDjraZQAyXUSEghvhuVj8d5VZUm0zb57yP92qPbyPV8vLiWgbXMPPntShx4bjzKuXJZtU9e17PaKczpRihAEv3BzAahX+nG0/RFh/4jXUdKSjg6vccpRTHqGblT+sD48ZaWux24D7F+MIvZdzVKRMguEZFpqxjcsmUismrI8v41h94Dsj/qd+PDm7PZY9dy/s3KObYSP1MExtwxJYBA/sSQOKTyPRsuP1KOrEmhiTyIdlxwRvpMkH3YUqxNCSopKrMKjuwuOTpG1Wlq6/wTs3hifPA51ak4iqijVXMIqakXueSW3l7Byvsn+/sgi0W+vPfiJPjaXuucIGuPLjvV2fIrvZg3aSHs2hjzu0SNFSkrDXp/eDEvcIlF3lj5BoK5kyn2iNkARbtBjiyrXIalyn+nQ8qGJYeUkcpLAHYNeXilGfEeHFoZHFLCzJBjQ8r7ao8nb4RpueKie937d0P3vEGbG5gCtzi6KgOjKq1ENQRBVmG0AMM/B79pFQixjUcucdWCXYYnEOCJHAU8lVVANcAzvjzpa7/vfv+07r733F/HP82r66wI4AVJH1CUPQVR1Ij0uXSuz2Pjvtezg/9ufsHY/emfq2rSlbBtpiZ93UJB0XMGUNeLZCM9YJho0wdsXSDMzeoKV2GhIAm+HV71NuT+Z+W41GbPkihoDXLEtkV4Z6mrohgBCjGiNyVG7G2KEVAUIk8UJy+MmDbFiBJmWxMjG2H65ZmrrSqMjTCNdkJhAFG8aDpcKf8BgBs/Aax6VYYaD9ruEC94wcS7Sh/vGvEaxmrjRSbF9Z+o295R42G7G4AOrNdCvcr74E5QLwQiLWpP0KK2+RPIaYN6JWu9UIr8/fr0+rRWy910NW04VFnuyNK042PZQB+yn6YjAJLkUFnuqgBAY5b7DhW0VhQqrYYfVwmLF2a5Q4kFv55+vbi8qZXxXorLjKxtu8zG7jDeakeqAQZC22KgeiJomrVax+qSP9SGdyPXoxx+/d59k9wNjG1zt7zruYGOUkJppq1pFlIh59DQNF1RvtaKzYP0KjaPahtPY8gBsir8NJ4ScUqRMRhhxVaJV16iKeSlFFlxkHU2qLtAU40hWZgRcAS0NoG8ahTe+x6O5Er0140lwxQKaXP4F/GEzGbwpOycJ7t1gG4jWODKZXNwoVdA3LIyIc+/F4fiqTvJxiCbPZ3008RPfILfv+mGBWonEFhSePmMmdNtDvwr6VImdCDG7Bevbe3wavl0/1T3e6FUpvi1hWHFAp9YM6Jr7rKdoH6+ZK8MjuIqxu60k+/ZuMPzOEUcu+zTFdO9XDGbLX9mEE6G/i35Y88d/5mmuwH62J3EiRvcpR9xMviQQ4EhjM6xz7CVF3YWa0XXAURlPhSaGbbNhYIC/Mh+avLtpcijaUrc6TgKKarXsPFMyZ2ymisbIb3Tyx+nb1XnEZND0Hq2QppaqE1puuUQ7+bBmEX4pYlgjBJoCl9SeR9HWvvBmFWrLrAmZ8aNHIKnWszmfFtjj651JKSwqw4ACGWWU9Y/NcVyO1T/9GI4TtF0bRWN7wjHyR75LmQg2rdSRB7UkFONB+twx1dJ5iJmLhRxq1E47s/itRFBQxwDTYWI4fCpnamS3KwFA0LHUuLGyVLQbhMBclEtefv15WJ1cz/fxUicsUPaop7OOcWTdITLTft5OHtu8vn58tCqWR5yQsmc6w37FkDNNMRe7FJXwZqaUkJN1+TvgsUpKz0DdCAQ9WbxciU6HInq51gOKdVJ9uke3SZpfk3joHmaz4LuLRM9cIApEhVCwjT1NehQv/q7bbk+XWX08jRhGTtlXWbrLkiTH4fnG9uUlQ2TFU3Zl7Rxb8SYRHrFepY6OrSpESEHvbohDagSWcu7TFKaSGYRa40zDuM0pZC7AumDbywWZtjSeS3AksNhCKjOa2ksHCYHSX66fvJGcQQ1A1TBkd0qimSv+keKG81PUxQeJjaOx5IuNAshZ378IV0/61GHWVrk8PhLNjKgmQfN9egwQ/wTiSPFZuzle9SLeY1C+iVf/ytPhdQhNhxR/kNLznlAoOjK0ljOw4DvVtf6VpdiQ7U6iFb74RubiR95S/XPi8svp5esxObi4rwt80sRc7KVvhwykYO8+swvUSPo2aE2TxpgRlM4kcN5p79Oj6+vaKZx78+BF7JWJ8Xk9SyKOixrH3x4Y0odOFDyU3VN0WmwqfZoahTKAcET1n2WqvQeju5Zc5G3mDSGNionjYEGsnNXSixnKHRec/jaoU2Zu1jEry6OqBppQM1UIUsxKwRMXTLwnWqB2rpiVooeoynnd/Jqke0o1FYS1AhoMgZUB3W0mqLOKfrFcvcWLFoIK3J3/e2jN0O2Inwlpal548ZJP56qXM6N0taaSw+rUjAaPLHM1VmIZdXlDXGmBhVVyq0mrqGcQnrDASoEbME2yqv6txWeQnIEsevGMQszsRMSaJfsvBT4bWELSruaEFBasqBNhO1Qzqxie5Ht6zpUuSBrt3JmSA4e006omEUGsh726bEn+7Q3tT/0B2TkzwwrZGtDdqjn04aYae/tBxwnssas0BG/KdPUURQsqxRgHUcSqMOBbzfYuv7p2BsweNXwbFbKtysMruh4efnp4vLT1c3CmNUabIL5lFtZ0dpt0ahFJqxo1NZwbIUaaXL89hIPMCEWj4tkbtyme40OTvC9n+67Uuf32PlUny9oH+O9MR6H0fxDeiu74E4oQAbhhIKNGUwNJ/kIr7Ep8tMi09f58J7lW06mlriD11Zk+VRdkxvL8iE5Qk2b93eylEJGonwXXaG8o5DpnT5V//3qPQDBX0OWnDxCdqv2v+xRs1pwbRiF405xn2aj+oIzraAt+sDzhspKcqBZyMHNaQXDggew4oHhunQQWX3m3vIjTyhIM+n4Oe3H/OCzGfsMYxfd7ulJp5T243sRp8GMdstfyNniVG+NJfMCqxzztsySMDvsqhWW1GUvrfrxB+kpBtri320cXPMpwVR27BXkPfMhGzu+Rn1eQ5qk0FaCJx2RjmU/YSnz5x4ZXfdysoA0W1T5AOntLeoo9OaFM1T68+QZp6bUvahjIivIlGxde8WFsQ359IyaZxwvUvciDwd3exucmVP3cr65T69HUAqNnV3yZFpyq4eZIFOw35SHHkJNqS0aUxfyWcYckaLF9mr38xlIaa21uqVPl7349z19neohN11xZvnF3y76MkL/u3f2TZSc9bTz67PKbQXq3tQntGwCRlb4WvuWPtFLzL5p+YY+8QmTxwsa3c6nyyGB9/18r4XegQOEbKUlOjw10TtwgCDOTfBER2VpbaZlNE/vWV622O8xZpaTWB4iOkjprgrml0/CfbY5asiCsXFICzp5yu2tdVcDpihSTVUhLlQU4oobS+vbzCD73pl5/I7UKkilm8sF7eUoYtftIlUR4xQiE/Rrg1vZXts+supgNMsUdw1BpKo8UTVsrSPM9Rg/XM1+3P9B5871zUVXsz7/HK84zXlZ0mm9E1GLSSXWIzE7o7fIm9rUn/rs9M64s14+qqmlpg0X0waB6frW3Q/X1MpYW8VewiTdIF/gnhcyCUj1s4cDnKyd2WtqvTprA8ngV1jvIia5E4s0skXyE2QLIdM9Dw/dWUABSxmTRrDYue8UzkNyt7crgDYPlplAded2X5DQhUAXy2eRrYoWGQqh69QQpVA6E1ASuqdprEhju4JpxEKLZ4MRlzwpUb5G9IgqUdcV+lBXoAY2hRpVA+a8TSztGsv2d7vMtUsVGasPkQ3TwnnUPBu/KOtbA5nt1FHUYd5oQjwQGaqwrLLjeR3HvCqxuUkOr+oR5qLE5uxXuuzhQci7cLB7aNI+4ifcLztKPk875YTUTNbuiXdeHM1uq9IWtGuzccRTdMbJIndpHXWsk0ppDHV4SWAAOaNB/ScebgJwlT+1Iqa+U4eyL8tjKFrxLlpi1M4ymaex+2eaLjtZYxFF7NQfNVfCTFGnqoZt7QezLwtFW2XSQqZxoCFdt6Fl2CbUqsUQnxGyWwWeghTmjbipamUViaEiQrNWaVMjGS5H6JSNUMVapDoKz5WQlJOMpW3p7uu0EoGJxF4BKknptGknynnGE7Vpty7h1lhgv0lHM9MC1Wi9Mdsta3Ky+4pomcKBVQ7+XOtsqCc1kV1RE21tR9SqVRf94SntshG5wbr8tAykjSgI8SgFqHZ51GzTnAWnv3TGaZ9vnBfJN3JGRXn03S6zDCKuTNUSb2g1xzNb3YBbB89Y7TONqmntC+AaRc/ao9PDb72rw/MvL4p3dKMq4zTGNrLm5qeU0c0QuXu3y1Clbl5ZIunaQeXdiPaBtT5syccopFG0hZNNQDD6GnqY3vF/</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/jobs/doc/response_protocol.md b/plugins/jobs/doc/response_protocol.md
deleted file mode 100644
index e195c407..00000000
--- a/plugins/jobs/doc/response_protocol.md
+++ /dev/null
@@ -1,54 +0,0 @@
-Response protocol used to communicate between worker and RR. When a worker completes its job, it should send a typed
-response. The response should contain:
-
-1. `type` field with the message type. Can be treated as enums.
-2. `data` field with the dynamic response related to the type.
-
-Types are:
-
-```
-0 - NO_ERROR
-1 - ERROR
-2 - ...
-```
-
-- `NO_ERROR`: contains only `type` and empty `data`.
-- `ERROR` : contains `type`: 1, and `data` field with: `message` describing the error, `requeue` flag to requeue the
- job,
- `delay_seconds`: to delay a queue for a provided amount of seconds, `headers` - job's headers represented as hashmap
- with string key and array of strings as a value.
-
-For example:
-
-`NO_ERROR`:
-For example:
-
-```json
-{
- "type": 0,
- "data": {}
-}
-
-```
-
-`ERROR`:
-
-```json
-{
- "type": 1,
- "data": {
- "message": "internal worker error",
- "requeue": true,
- "headers": [
- {
- "test": [
- "1",
- "2",
- "3"
- ]
- }
- ],
- "delay_seconds": 10
- }
-}
-```
diff --git a/plugins/jobs/job/job.go b/plugins/jobs/job/job.go
deleted file mode 100644
index adab2a0a..00000000
--- a/plugins/jobs/job/job.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package job
-
-import (
- "time"
-)
-
-// constant keys to pack/unpack messages from different drivers
-const (
- RRID string = "rr_id"
- RRJob string = "rr_job"
- RRHeaders string = "rr_headers"
- RRPipeline string = "rr_pipeline"
- RRDelay string = "rr_delay"
- RRPriority string = "rr_priority"
-)
-
-// Job carries information about single job.
-type Job struct {
- // Job contains name of job broker (usually PHP class).
- Job string `json:"job"`
-
- // Ident is unique identifier of the job, should be provided from outside
- Ident string `json:"id"`
-
- // Payload is string data (usually JSON) passed to Job broker.
- Payload string `json:"payload"`
-
- // Headers with key-value pairs
- Headers map[string][]string `json:"headers"`
-
- // Options contains set of PipelineOptions specific to job execution. Can be empty.
- Options *Options `json:"options,omitempty"`
-}
-
-// Options carry information about how to handle given job.
-type Options struct {
- // Priority is job priority, default - 10
- // pointer to distinguish 0 as a priority and nil as priority not set
- Priority int64 `json:"priority"`
-
- // Pipeline manually specified pipeline.
- Pipeline string `json:"pipeline,omitempty"`
-
- // Delay defines time duration to delay execution for. Defaults to none.
- Delay int64 `json:"delay,omitempty"`
-}
-
-// DelayDuration returns delay duration in a form of time.Duration.
-func (o *Options) DelayDuration() time.Duration {
- return time.Second * time.Duration(o.Delay)
-}
diff --git a/plugins/jobs/job/job_test.go b/plugins/jobs/job/job_test.go
deleted file mode 100644
index 4a95e27d..00000000
--- a/plugins/jobs/job/job_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package job
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestOptions_DelayDuration(t *testing.T) {
- opts := &Options{Delay: 0}
- assert.Equal(t, time.Duration(0), opts.DelayDuration())
-}
-
-func TestOptions_DelayDuration2(t *testing.T) {
- opts := &Options{Delay: 1}
- assert.Equal(t, time.Second, opts.DelayDuration())
-}
diff --git a/plugins/jobs/metrics.go b/plugins/jobs/metrics.go
deleted file mode 100644
index 38d0bcfb..00000000
--- a/plugins/jobs/metrics.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package jobs
-
-import (
- "sync/atomic"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/plugins/informer"
-)
-
-func (p *Plugin) MetricsCollector() []prometheus.Collector {
- // p - implements Exporter interface (workers)
- // other - request duration and count
- return []prometheus.Collector{p.statsExporter}
-}
-
-const (
- namespace = "rr_jobs"
-)
-
-type statsExporter struct {
- workers informer.Informer
- workersMemory uint64
- jobsOk uint64
- pushOk uint64
- jobsErr uint64
- pushErr uint64
-}
-
-var (
- worker = prometheus.NewDesc("workers_memory_bytes", "Memory usage by JOBS workers.", nil, nil)
- pushOk = prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "push_ok"), "Number of job push.", nil, nil)
- pushErr = prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "push_err"), "Number of jobs push which was failed.", nil, nil)
- jobsErr = prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "jobs_err"), "Number of jobs error while processing in the worker.", nil, nil)
- jobsOk = prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "jobs_ok"), "Number of successfully processed jobs.", nil, nil)
-)
-
-func newStatsExporter(stats informer.Informer) *statsExporter {
- return &statsExporter{
- workers: stats,
- workersMemory: 0,
- jobsOk: 0,
- pushOk: 0,
- jobsErr: 0,
- pushErr: 0,
- }
-}
-
-func (se *statsExporter) metricsCallback(event interface{}) {
- if jev, ok := event.(events.JobEvent); ok {
- switch jev.Event { //nolint:exhaustive
- case events.EventJobOK:
- atomic.AddUint64(&se.jobsOk, 1)
- case events.EventPushOK:
- atomic.AddUint64(&se.pushOk, 1)
- case events.EventPushError:
- atomic.AddUint64(&se.pushErr, 1)
- case events.EventJobError:
- atomic.AddUint64(&se.jobsErr, 1)
- }
- }
-}
-
-func (se *statsExporter) Describe(d chan<- *prometheus.Desc) {
- // send description
- d <- worker
- d <- pushErr
- d <- pushOk
- d <- jobsErr
- d <- jobsOk
-}
-
-func (se *statsExporter) Collect(ch chan<- prometheus.Metric) {
- // get the copy of the processes
- workers := se.workers.Workers()
-
- // cumulative RSS memory in bytes
- var cum uint64
-
- // collect the memory
- for i := 0; i < len(workers); i++ {
- cum += workers[i].MemoryUsage
- }
-
- // send the values to the prometheus
- ch <- prometheus.MustNewConstMetric(worker, prometheus.GaugeValue, float64(cum))
- // send the values to the prometheus
- ch <- prometheus.MustNewConstMetric(jobsOk, prometheus.GaugeValue, float64(atomic.LoadUint64(&se.jobsOk)))
- ch <- prometheus.MustNewConstMetric(jobsErr, prometheus.GaugeValue, float64(atomic.LoadUint64(&se.jobsErr)))
- ch <- prometheus.MustNewConstMetric(pushOk, prometheus.GaugeValue, float64(atomic.LoadUint64(&se.pushOk)))
- ch <- prometheus.MustNewConstMetric(pushErr, prometheus.GaugeValue, float64(atomic.LoadUint64(&se.pushErr)))
-}
diff --git a/plugins/jobs/pipeline/pipeline.go b/plugins/jobs/pipeline/pipeline.go
deleted file mode 100644
index 8a8c1462..00000000
--- a/plugins/jobs/pipeline/pipeline.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package pipeline
-
-import (
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-// Pipeline defines pipeline options.
-type Pipeline map[string]interface{}
-
-const (
- priority string = "priority"
- driver string = "driver"
- name string = "name"
-)
-
-// With pipeline value
-func (p *Pipeline) With(name string, value interface{}) {
- (*p)[name] = value
-}
-
-// Name returns pipeline name.
-func (p Pipeline) Name() string {
- return p.String(name, "")
-}
-
-// Driver associated with the pipeline.
-func (p Pipeline) Driver() string {
- return p.String(driver, "")
-}
-
-// Has checks if value presented in pipeline.
-func (p Pipeline) Has(name string) bool {
- if _, ok := p[name]; ok {
- return true
- }
-
- return false
-}
-
-// String must return option value as string or return default value.
-func (p Pipeline) String(name string, d string) string {
- if value, ok := p[name]; ok {
- if str, ok := value.(string); ok {
- return str
- }
- }
-
- return d
-}
-
-// Int must return option value as string or return default value.
-func (p Pipeline) Int(name string, d int) int {
- if value, ok := p[name]; ok {
- if i, ok := value.(int); ok {
- return i
- }
- }
-
- return d
-}
-
-// Bool must return option value as bool or return default value.
-func (p Pipeline) Bool(name string, d bool) bool {
- if value, ok := p[name]; ok {
- if i, ok := value.(bool); ok {
- return i
- }
- }
-
- return d
-}
-
-// Map must return nested map value or empty config.
-// Here might be sqs attributes or tags for example
-func (p Pipeline) Map(name string, out map[string]string) error {
- if value, ok := p[name]; ok {
- if m, ok := value.(string); ok {
- err := json.Unmarshal(utils.AsBytes(m), &out)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// Priority returns default pipeline priority
-func (p Pipeline) Priority() int64 {
- if value, ok := p[priority]; ok {
- if v, ok := value.(int64); ok {
- return v
- }
- }
-
- return 10
-}
diff --git a/plugins/jobs/pipeline/pipeline_test.go b/plugins/jobs/pipeline/pipeline_test.go
deleted file mode 100644
index 4482c70d..00000000
--- a/plugins/jobs/pipeline/pipeline_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package pipeline
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestPipeline_String(t *testing.T) {
- pipe := Pipeline{"value": "value"}
-
- assert.Equal(t, "value", pipe.String("value", ""))
- assert.Equal(t, "value", pipe.String("other", "value"))
-}
-
-func TestPipeline_Has(t *testing.T) {
- pipe := Pipeline{"options": map[string]interface{}{"ttl": 10}}
-
- assert.Equal(t, true, pipe.Has("options"))
- assert.Equal(t, false, pipe.Has("other"))
-}
diff --git a/plugins/jobs/plugin.go b/plugins/jobs/plugin.go
deleted file mode 100644
index 3aec6acc..00000000
--- a/plugins/jobs/plugin.go
+++ /dev/null
@@ -1,719 +0,0 @@
-package jobs
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/jobs"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
-)
-
-const (
- // RrMode env variable
- RrMode string = "RR_MODE"
- RrModeJobs string = "jobs"
-
- PluginName string = "jobs"
- pipelines string = "pipelines"
-)
-
-type Plugin struct {
- sync.RWMutex
-
- // Jobs plugin configuration
- cfg *Config `structure:"jobs"`
- log logger.Logger
- workersPool pool.Pool
- server server.Server
-
- jobConstructors map[string]jobs.Constructor
- consumers sync.Map // map[string]jobs.Consumer
-
- // events handler
- events events.Handler
-
- // priority queue implementation
- queue priorityqueue.Queue
-
- // parent config for broken options. keys are pipelines names, values - pointers to the associated pipeline
- pipelines sync.Map
-
- // initial set of the pipelines to consume
- consume map[string]struct{}
-
- // signal channel to stop the pollers
- stopCh chan struct{}
-
- // internal payloads pool
- pldPool sync.Pool
- statsExporter *statsExporter
-}
-
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger, server server.Server) error {
- const op = errors.Op("jobs_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &p.cfg)
- if err != nil {
- return errors.E(op, err)
- }
-
- p.cfg.InitDefaults()
-
- p.server = server
-
- p.events = events.NewEventsHandler()
- p.events.AddListener(p.collectJobsEvents)
-
- p.jobConstructors = make(map[string]jobs.Constructor)
- p.consume = make(map[string]struct{})
- p.stopCh = make(chan struct{}, 1)
-
- p.pldPool = sync.Pool{New: func() interface{} {
- // with nil fields
- return &payload.Payload{}
- }}
-
- // initial set of pipelines
- for i := range p.cfg.Pipelines {
- p.pipelines.Store(i, p.cfg.Pipelines[i])
- }
-
- if len(p.cfg.Consume) > 0 {
- for i := 0; i < len(p.cfg.Consume); i++ {
- p.consume[p.cfg.Consume[i]] = struct{}{}
- }
- }
-
- // initialize priority queue
- p.queue = priorityqueue.NewBinHeap(p.cfg.PipelineSize)
- p.log = log
-
- // metrics
- p.statsExporter = newStatsExporter(p)
- p.events.AddListener(p.statsExporter.metricsCallback)
-
- return nil
-}
-
-func (p *Plugin) Serve() chan error { //nolint:gocognit
- errCh := make(chan error, 1)
- const op = errors.Op("jobs_plugin_serve")
-
- // register initial pipelines
- p.pipelines.Range(func(key, value interface{}) bool {
- t := time.Now()
- // pipeline name (ie test-local, sqs-aws, etc)
- name := key.(string)
-
- // pipeline associated with the name
- pipe := value.(*pipeline.Pipeline)
- // driver for the pipeline (ie amqp, ephemeral, etc)
- dr := pipe.Driver()
-
- // jobConstructors contains constructors for the drivers
- // we need here to initialize these drivers for the pipelines
- if _, ok := p.jobConstructors[dr]; ok {
- // config key for the particular sub-driver jobs.pipelines.test-local
- configKey := fmt.Sprintf("%s.%s.%s", PluginName, pipelines, name)
-
- // init the driver
- initializedDriver, err := p.jobConstructors[dr].JobsConstruct(configKey, p.events, p.queue)
- if err != nil {
- errCh <- errors.E(op, err)
- return false
- }
-
- // add driver to the set of the consumers (name - pipeline name, value - associated driver)
- p.consumers.Store(name, initializedDriver)
-
- // register pipeline for the initialized driver
- err = initializedDriver.Register(context.Background(), pipe)
- if err != nil {
- errCh <- errors.E(op, errors.Errorf("pipe register failed for the driver: %s with pipe name: %s", pipe.Driver(), pipe.Name()))
- return false
- }
-
- // if pipeline initialized to be consumed, call Run on it
- if _, ok := p.consume[name]; ok {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- defer cancel()
- err = initializedDriver.Run(ctx, pipe)
- if err != nil {
- errCh <- errors.E(op, err)
- return false
- }
- return true
- }
-
- return true
- }
-
- p.events.Push(events.JobEvent{
- Event: events.EventDriverReady,
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Start: t,
- Elapsed: t.Sub(t),
- })
-
- return true
- })
-
- // do not continue processing, immediately stop if channel contains an error
- if len(errCh) > 0 {
- return errCh
- }
-
- var err error
- p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: RrModeJobs})
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // start listening
- go func() {
- for i := uint8(0); i < p.cfg.NumPollers; i++ {
- go func() {
- for {
- select {
- case <-p.stopCh:
- p.log.Info("------> job poller stopped <------")
- return
- default:
- // get prioritized JOB from the queue
- jb := p.queue.ExtractMin()
-
- // parse the context
- // for each job, context contains:
- /*
- 1. Job class
- 2. Job ID provided from the outside
- 3. Job Headers map[string][]string
- 4. Timeout in seconds
- 5. Pipeline name
- */
-
- start := time.Now()
- p.events.Push(events.JobEvent{
- Event: events.EventJobStart,
- ID: jb.ID(),
- Start: start,
- Elapsed: 0,
- })
-
- ctx, err := jb.Context()
- if err != nil {
- p.events.Push(events.JobEvent{
- Event: events.EventJobError,
- Error: err,
- ID: jb.ID(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- errNack := jb.Nack()
- if errNack != nil {
- p.log.Error("negatively acknowledge failed", "error", errNack)
- }
- p.log.Error("job marshal context", "error", err)
- continue
- }
-
- // get payload from the sync.Pool
- exec := p.getPayload(jb.Body(), ctx)
-
- // protect from the pool reset
- p.RLock()
- resp, err := p.workersPool.Exec(exec)
- p.RUnlock()
- if err != nil {
- p.events.Push(events.JobEvent{
- Event: events.EventJobError,
- ID: jb.ID(),
- Error: err,
- Start: start,
- Elapsed: time.Since(start),
- })
- // RR protocol level error, Nack the job
- errNack := jb.Nack()
- if errNack != nil {
- p.log.Error("negatively acknowledge failed", "error", errNack)
- }
-
- p.log.Error("job execute failed", "error", err)
-
- p.putPayload(exec)
- continue
- }
-
- // if response is nil or body is nil, just acknowledge the job
- if resp == nil || resp.Body == nil {
- p.putPayload(exec)
- err = jb.Ack()
- if err != nil {
- p.events.Push(events.JobEvent{
- Event: events.EventJobError,
- ID: jb.ID(),
- Error: err,
- Start: start,
- Elapsed: time.Since(start),
- })
- p.log.Error("acknowledge error, job might be missed", "error", err)
- continue
- }
-
- p.events.Push(events.JobEvent{
- Event: events.EventJobOK,
- ID: jb.ID(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- continue
- }
-
- // handle the response protocol
- err = handleResponse(resp.Body, jb, p.log)
- if err != nil {
- p.events.Push(events.JobEvent{
- Event: events.EventJobError,
- ID: jb.ID(),
- Start: start,
- Error: err,
- Elapsed: time.Since(start),
- })
- p.putPayload(exec)
- errNack := jb.Nack()
- if errNack != nil {
- p.log.Error("negatively acknowledge failed, job might be lost", "root error", err, "error nack", errNack)
- continue
- }
-
- p.log.Error("job negatively acknowledged", "error", err)
- continue
- }
-
- p.events.Push(events.JobEvent{
- Event: events.EventJobOK,
- ID: jb.ID(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- // return payload
- p.putPayload(exec)
- }
- }
- }()
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin) Stop() error {
- // range over all consumers and call stop
- p.consumers.Range(func(key, value interface{}) bool {
- consumer := value.(jobs.Consumer)
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- err := consumer.Stop(ctx)
- if err != nil {
- cancel()
- p.log.Error("stop job driver", "driver", key)
- return true
- }
- cancel()
- return true
- })
-
- // this function can block forever, but we don't care, because we might have a chance to exit from the pollers,
- // but if not, this is not a problem at all.
- // The main target is to stop the drivers
- go func() {
- for i := uint8(0); i < p.cfg.NumPollers; i++ {
- // stop jobs plugin pollers
- p.stopCh <- struct{}{}
- }
- }()
-
- // just wait pollers for 5 seconds before exit
- time.Sleep(time.Second * 5)
-
- p.Lock()
- p.workersPool.Destroy(context.Background())
- p.Unlock()
-
- return nil
-}
-
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.CollectMQBrokers,
- }
-}
-
-func (p *Plugin) CollectMQBrokers(name endure.Named, c jobs.Constructor) {
- p.jobConstructors[name.Name()] = c
-}
-
-func (p *Plugin) Workers() []*process.State {
- p.RLock()
- wrk := p.workersPool.Workers()
- p.RUnlock()
-
- ps := make([]*process.State, len(wrk))
-
- for i := 0; i < len(wrk); i++ {
- st, err := process.WorkerProcessState(wrk[i])
- if err != nil {
- p.log.Error("jobs workers state", "error", err)
- return nil
- }
-
- ps[i] = st
- }
-
- return ps
-}
-
-func (p *Plugin) JobsState(ctx context.Context) ([]*jobState.State, error) {
- const op = errors.Op("jobs_plugin_drivers_state")
- jst := make([]*jobState.State, 0, 2)
- var err error
- p.consumers.Range(func(key, value interface{}) bool {
- consumer := value.(jobs.Consumer)
- newCtx, cancel := context.WithTimeout(ctx, time.Second*time.Duration(p.cfg.Timeout))
-
- var state *jobState.State
- state, err = consumer.State(newCtx)
- if err != nil {
- cancel()
- return false
- }
-
- jst = append(jst, state)
- cancel()
- return true
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
- return jst, nil
-}
-
-func (p *Plugin) Available() {}
-
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-func (p *Plugin) Reset() error {
- p.Lock()
- defer p.Unlock()
-
- const op = errors.Op("jobs_plugin_reset")
- p.log.Info("JOBS plugin received restart request. Restarting...")
- p.workersPool.Destroy(context.Background())
- p.workersPool = nil
-
- var err error
- p.workersPool, err = p.server.NewWorkerPool(context.Background(), p.cfg.Pool, map[string]string{RrMode: RrModeJobs}, p.collectJobsEvents, p.statsExporter.metricsCallback)
- if err != nil {
- return errors.E(op, err)
- }
-
- p.log.Info("JOBS workers pool successfully restarted")
-
- return nil
-}
-
-func (p *Plugin) Push(j *job.Job) error {
- const op = errors.Op("jobs_plugin_push")
-
- start := time.Now()
- // get the pipeline for the job
- pipe, ok := p.pipelines.Load(j.Options.Pipeline)
- if !ok {
- return errors.E(op, errors.Errorf("no such pipeline, requested: %s", j.Options.Pipeline))
- }
-
- // type conversion
- ppl := pipe.(*pipeline.Pipeline)
-
- d, ok := p.consumers.Load(ppl.Name())
- if !ok {
- return errors.E(op, errors.Errorf("consumer not registered for the requested driver: %s", ppl.Driver()))
- }
-
- // if job has no priority, inherit it from the pipeline
- if j.Options.Priority == 0 {
- j.Options.Priority = ppl.Priority()
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- defer cancel()
-
- err := d.(jobs.Consumer).Push(ctx, j)
- if err != nil {
- p.events.Push(events.JobEvent{
- Event: events.EventPushError,
- ID: j.Ident,
- Pipeline: ppl.Name(),
- Driver: ppl.Driver(),
- Error: err,
- Start: start,
- Elapsed: time.Since(start),
- })
- return errors.E(op, err)
- }
-
- p.events.Push(events.JobEvent{
- Event: events.EventPushOK,
- ID: j.Ident,
- Pipeline: ppl.Name(),
- Driver: ppl.Driver(),
- Error: err,
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (p *Plugin) PushBatch(j []*job.Job) error {
- const op = errors.Op("jobs_plugin_push")
- start := time.Now()
-
- for i := 0; i < len(j); i++ {
- // get the pipeline for the job
- pipe, ok := p.pipelines.Load(j[i].Options.Pipeline)
- if !ok {
- return errors.E(op, errors.Errorf("no such pipeline, requested: %s", j[i].Options.Pipeline))
- }
-
- ppl := pipe.(*pipeline.Pipeline)
-
- d, ok := p.consumers.Load(ppl.Name())
- if !ok {
- return errors.E(op, errors.Errorf("consumer not registered for the requested driver: %s", ppl.Driver()))
- }
-
- // if job has no priority, inherit it from the pipeline
- if j[i].Options.Priority == 0 {
- j[i].Options.Priority = ppl.Priority()
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- err := d.(jobs.Consumer).Push(ctx, j[i])
- if err != nil {
- cancel()
- p.events.Push(events.JobEvent{
- Event: events.EventPushError,
- ID: j[i].Ident,
- Pipeline: ppl.Name(),
- Driver: ppl.Driver(),
- Start: start,
- Elapsed: time.Since(start),
- Error: err,
- })
- return errors.E(op, err)
- }
-
- cancel()
- }
-
- return nil
-}
-
-func (p *Plugin) Pause(pp string) {
- pipe, ok := p.pipelines.Load(pp)
-
- if !ok {
- p.log.Error("no such pipeline", "requested", pp)
- }
-
- ppl := pipe.(*pipeline.Pipeline)
-
- d, ok := p.consumers.Load(ppl.Name())
- if !ok {
- p.log.Warn("driver for the pipeline not found", "pipeline", pp)
- return
- }
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- defer cancel()
- // redirect call to the underlying driver
- d.(jobs.Consumer).Pause(ctx, ppl.Name())
-}
-
-func (p *Plugin) Resume(pp string) {
- pipe, ok := p.pipelines.Load(pp)
- if !ok {
- p.log.Error("no such pipeline", "requested", pp)
- }
-
- ppl := pipe.(*pipeline.Pipeline)
-
- d, ok := p.consumers.Load(ppl.Name())
- if !ok {
- p.log.Warn("driver for the pipeline not found", "pipeline", pp)
- return
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- defer cancel()
- // redirect call to the underlying driver
- d.(jobs.Consumer).Resume(ctx, ppl.Name())
-}
-
-// Declare a pipeline.
-func (p *Plugin) Declare(pipeline *pipeline.Pipeline) error {
- const op = errors.Op("jobs_plugin_declare")
- // driver for the pipeline (ie amqp, ephemeral, etc)
- dr := pipeline.Driver()
- if dr == "" {
- return errors.E(op, errors.Errorf("no associated driver with the pipeline, pipeline name: %s", pipeline.Name()))
- }
-
- // jobConstructors contains constructors for the drivers
- // we need here to initialize these drivers for the pipelines
- if _, ok := p.jobConstructors[dr]; ok {
- // init the driver from pipeline
- initializedDriver, err := p.jobConstructors[dr].FromPipeline(pipeline, p.events, p.queue)
- if err != nil {
- return errors.E(op, err)
- }
-
- // register pipeline for the initialized driver
- err = initializedDriver.Register(context.Background(), pipeline)
- if err != nil {
- return errors.E(op, errors.Errorf("pipe register failed for the driver: %s with pipe name: %s", pipeline.Driver(), pipeline.Name()))
- }
-
- // if pipeline initialized to be consumed, call Run on it
- // but likely for the dynamic pipelines it should be started manually
- if _, ok := p.consume[pipeline.Name()]; ok {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- defer cancel()
- err = initializedDriver.Run(ctx, pipeline)
- if err != nil {
- return errors.E(op, err)
- }
- }
-
- // add driver to the set of the consumers (name - pipeline name, value - associated driver)
- p.consumers.Store(pipeline.Name(), initializedDriver)
- // save the pipeline
- p.pipelines.Store(pipeline.Name(), pipeline)
- }
-
- return nil
-}
-
-// Destroy pipeline and release all associated resources.
-func (p *Plugin) Destroy(pp string) error {
- const op = errors.Op("jobs_plugin_destroy")
- pipe, ok := p.pipelines.Load(pp)
- if !ok {
- return errors.E(op, errors.Errorf("no such pipeline, requested: %s", pp))
- }
-
- // type conversion
- ppl := pipe.(*pipeline.Pipeline)
-
- // delete consumer
- d, ok := p.consumers.LoadAndDelete(ppl.Name())
- if !ok {
- return errors.E(op, errors.Errorf("consumer not registered for the requested driver: %s", ppl.Driver()))
- }
-
- // delete old pipeline
- p.pipelines.LoadAndDelete(pp)
-
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(p.cfg.Timeout))
- err := d.(jobs.Consumer).Stop(ctx)
- if err != nil {
- cancel()
- return errors.E(op, err)
- }
-
- cancel()
- return nil
-}
-
-func (p *Plugin) List() []string {
- out := make([]string, 0, 10)
-
- p.pipelines.Range(func(key, _ interface{}) bool {
- // we can safely convert value here as we know that we store keys as strings
- out = append(out, key.(string))
- return true
- })
-
- return out
-}
-
-func (p *Plugin) RPC() interface{} {
- return &rpc{
- log: p.log,
- p: p,
- }
-}
-
-func (p *Plugin) collectJobsEvents(event interface{}) {
- if jev, ok := event.(events.JobEvent); ok {
- switch jev.Event {
- case events.EventPipePaused:
- p.log.Info("pipeline paused", "pipeline", jev.Pipeline, "driver", jev.Driver, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventJobStart:
- p.log.Info("job processing started", "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventJobOK:
- p.log.Info("job processed without errors", "ID", jev.ID, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventPushOK:
- p.log.Info("job pushed to the queue", "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventPushError:
- p.log.Error("job push error, job might be lost", "error", jev.Error, "pipeline", jev.Pipeline, "ID", jev.ID, "driver", jev.Driver, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventJobError:
- p.log.Error("job processed with errors", "error", jev.Error, "ID", jev.ID, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventPipeActive:
- p.log.Info("pipeline active", "pipeline", jev.Pipeline, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventPipeStopped:
- p.log.Warn("pipeline stopped", "pipeline", jev.Pipeline, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventPipeError:
- p.log.Error("pipeline error", "pipeline", jev.Pipeline, "error", jev.Error, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- case events.EventDriverReady:
- p.log.Info("driver ready", "pipeline", jev.Pipeline, "start", jev.Start.UTC(), "elapsed", jev.Elapsed)
- }
- }
-}
-
-func (p *Plugin) getPayload(body, context []byte) *payload.Payload {
- pld := p.pldPool.Get().(*payload.Payload)
- pld.Body = body
- pld.Context = context
- return pld
-}
-
-func (p *Plugin) putPayload(pld *payload.Payload) {
- pld.Body = nil
- pld.Context = nil
- p.pldPool.Put(pld)
-}
diff --git a/plugins/jobs/protocol.go b/plugins/jobs/protocol.go
deleted file mode 100644
index 9d769fdf..00000000
--- a/plugins/jobs/protocol.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package jobs
-
-import (
- json "github.com/json-iterator/go"
- "github.com/spiral/errors"
- pq "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type Type uint32
-
-const (
- NoError Type = iota
- Error
-)
-
-// internal worker protocol (jobs mode)
-type protocol struct {
- // message type, see Type
- T Type `json:"type"`
- // Payload
- Data json.RawMessage `json:"data"`
-}
-
-type errorResp struct {
- Msg string `json:"message"`
- Requeue bool `json:"requeue"`
- Delay int64 `json:"delay_seconds"`
- Headers map[string][]string `json:"headers"`
-}
-
-func handleResponse(resp []byte, jb pq.Item, log logger.Logger) error {
- const op = errors.Op("jobs_handle_response")
- // TODO(rustatian) to sync.Pool
- p := &protocol{}
-
- err := json.Unmarshal(resp, p)
- if err != nil {
- return errors.E(op, err)
- }
-
- switch p.T {
- // likely case
- case NoError:
- err = jb.Ack()
- if err != nil {
- return errors.E(op, err)
- }
- case Error:
- // TODO(rustatian) to sync.Pool
- er := &errorResp{}
-
- err = json.Unmarshal(p.Data, er)
- if err != nil {
- return errors.E(op, err)
- }
-
- log.Error("jobs protocol error", "error", er.Msg, "delay", er.Delay, "requeue", er.Requeue)
-
- if er.Requeue {
- err = jb.Requeue(er.Headers, er.Delay)
- if err != nil {
- return errors.E(op, err)
- }
- return nil
- }
-
- return errors.E(op, errors.Errorf("jobs response error: %v", er.Msg))
-
- default:
- err = jb.Ack()
- if err != nil {
- return errors.E(op, err)
- }
- }
-
- return nil
-}
diff --git a/plugins/jobs/rpc.go b/plugins/jobs/rpc.go
deleted file mode 100644
index d7b93bd1..00000000
--- a/plugins/jobs/rpc.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package jobs
-
-import (
- "context"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
-)
-
-type rpc struct {
- log logger.Logger
- p *Plugin
-}
-
-func (r *rpc) Push(j *jobsv1beta.PushRequest, _ *jobsv1beta.Empty) error {
- const op = errors.Op("rpc_push")
-
- // convert transport entity into domain
- // how we can do this quickly
-
- if j.GetJob().GetId() == "" {
- return errors.E(op, errors.Str("empty ID field not allowed"))
- }
-
- err := r.p.Push(from(j.GetJob()))
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (r *rpc) PushBatch(j *jobsv1beta.PushBatchRequest, _ *jobsv1beta.Empty) error {
- const op = errors.Op("rpc_push_batch")
-
- l := len(j.GetJobs())
-
- batch := make([]*job.Job, l)
-
- for i := 0; i < l; i++ {
- // convert transport entity into domain
- // how we can do this quickly
- batch[i] = from(j.GetJobs()[i])
- }
-
- err := r.p.PushBatch(batch)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (r *rpc) Pause(req *jobsv1beta.Pipelines, _ *jobsv1beta.Empty) error {
- for i := 0; i < len(req.GetPipelines()); i++ {
- r.p.Pause(req.GetPipelines()[i])
- }
-
- return nil
-}
-
-func (r *rpc) Resume(req *jobsv1beta.Pipelines, _ *jobsv1beta.Empty) error {
- for i := 0; i < len(req.GetPipelines()); i++ {
- r.p.Resume(req.GetPipelines()[i])
- }
-
- return nil
-}
-
-func (r *rpc) List(_ *jobsv1beta.Empty, resp *jobsv1beta.Pipelines) error {
- resp.Pipelines = r.p.List()
- return nil
-}
-
-// Declare pipeline used to dynamically declare any type of the pipeline
-// Mandatory fields:
-// 1. Driver
-// 2. Pipeline name
-// 3. Options related to the particular pipeline
-func (r *rpc) Declare(req *jobsv1beta.DeclareRequest, _ *jobsv1beta.Empty) error {
- const op = errors.Op("rpc_declare_pipeline")
- pipe := &pipeline.Pipeline{}
-
- for i := range req.GetPipeline() {
- (*pipe)[i] = req.GetPipeline()[i]
- }
-
- err := r.p.Declare(pipe)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (r *rpc) Destroy(req *jobsv1beta.Pipelines, resp *jobsv1beta.Pipelines) error {
- const op = errors.Op("rpc_declare_pipeline")
-
- var destroyed []string //nolint:prealloc
- for i := 0; i < len(req.GetPipelines()); i++ {
- err := r.p.Destroy(req.GetPipelines()[i])
- if err != nil {
- return errors.E(op, err)
- }
- destroyed = append(destroyed, req.GetPipelines()[i])
- }
-
- // return destroyed pipelines
- resp.Pipelines = destroyed
-
- return nil
-}
-
-func (r *rpc) Stat(_ *jobsv1beta.Empty, resp *jobsv1beta.Stats) error {
- const op = errors.Op("rpc_stats")
- state, err := r.p.JobsState(context.Background())
- if err != nil {
- return errors.E(op, err)
- }
-
- for i := 0; i < len(state); i++ {
- resp.Stats = append(resp.Stats, &jobsv1beta.Stat{
- Pipeline: state[i].Pipeline,
- Driver: state[i].Driver,
- Queue: state[i].Queue,
- Active: state[i].Active,
- Delayed: state[i].Delayed,
- Reserved: state[i].Reserved,
- Ready: state[i].Ready,
- })
- }
-
- return nil
-}
-
-// from converts from transport entity to domain
-func from(j *jobsv1beta.Job) *job.Job {
- headers := make(map[string][]string, len(j.GetHeaders()))
-
- for k, v := range j.GetHeaders() {
- headers[k] = v.GetValue()
- }
-
- jb := &job.Job{
- Job: j.GetJob(),
- Headers: headers,
- Ident: j.GetId(),
- Payload: j.GetPayload(),
- Options: &job.Options{
- Priority: j.GetOptions().GetPriority(),
- Pipeline: j.GetOptions().GetPipeline(),
- Delay: j.GetOptions().GetDelay(),
- },
- }
-
- return jb
-}
diff --git a/plugins/kv/config.go b/plugins/kv/config.go
deleted file mode 100644
index 09ba79cd..00000000
--- a/plugins/kv/config.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package kv
-
-// Config represents general storage configuration with keys as the user defined kv-names and values as the constructors
-type Config struct {
- Data map[string]interface{} `mapstructure:"kv"`
-}
diff --git a/plugins/kv/doc/kv.drawio b/plugins/kv/doc/kv.drawio
deleted file mode 100644
index 04470e4a..00000000
--- a/plugins/kv/doc/kv.drawio
+++ /dev/null
@@ -1 +0,0 @@
-<mxfile host="Electron" modified="2021-04-22T21:31:28.320Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.5.1 Chrome/89.0.4389.128 Electron/12.0.5 Safari/537.36" etag="PMNN2QoTRBeugwC1WCGf" version="14.5.1" type="device"><diagram id="2us8W0xnLog_cmX3fgYy" name="Page-1">7V1Zc6O4Fv41rp55sAsQi3lMHKeTmaQ7lXRmpp9uySDbTLDlBjmJ59dficWAJO8GvLmrYyMkAeccfTqLdGiAzujzawAnw0fsIr+hKe5nA9w0NE1TTI1+sZJZXKJqqhGXDALPTcqyghfvP5QUKknp1HNRWKhIMPaJNykWOng8Rg4plMEgwB/Fan3sF686gQMkFLw40BdL//ZcMoxLdbOdO3GHvMEwuTQASnLnI5jWTgrCIXTxR64IdBugE2BM4l+jzw7yGflSwsTtbhecnd9AgMZknQb62+30l3L1+b8/vk0ME0B/YipNI+kmJLP0kZFLKZAc4oAM8QCPod/NSq8DPB27iHWr0KOszgPGE1qo0sJ/ESGzhJ1wSjAtGpKRn5xFnx75J/f7J+uqZSRHN59Jz9HBLD0Yk2D2T1aRHf5M+2AHWbPoKG0XEhiQKyYKtGCMxygtu/V8f97CTWs4PgxDz4kLkyrsEn08JsnzqDo7pqc62MdBRDRgKtBWrajrAL+h3BntxjIVdpGY0Iy6CxmYMgNPAyep9T57/HDIt/G/Duh13bfO+633q5mOBBgMEFnCXXUuZnSEIjxClDC0XYB8SLz34n3AZKQM5vUyWaI/EnHaRLS0OkUrE6efeWmSitaZiIi0nrGjiERNKWXgLFdhgr0xCXM9P7ECWiGZGFTVTkAxmRfaBoddfAPQXtqA/ojvIRPX+cPsIMHgIsGHL8HgICTYAhtKMNdgZwleRsN36E8TQvz5Fz1+enj9ev9NEO6i6H4MPYJeJjBi0gfV74piysmGC1G778hkw3TaqNeXSNdC0XhHAUGfuSKRu3M26EWq2gkuKB+ZtqamKtkwp6il7fY+7bVrxYxtFKqm0lKUuRoVN1WNuZK1QK+Kjp5Q4FGyoUDEmASFMoARcCpDoTxUqftFEbCmqrQriuwkM6p9fFp4UV5WSUsJE1T5giKH1HZFkrLsLnOAfv394cfNdTmgjlTXQNIJ3zYtAM2SQB1oRVA3dQmoaxJQN8sC9VrH5/agDrhBatnbgPqaoL0K+/c4Vo2KQH0r1TA1TeaKnr1cM+Trq5pulW/bqOax2DY5xw+w9YLrp6Xoq9w/qyX6IGejdSWcYm6NeotWq4vnordsIik5n3MNeosh6C2P3cfOVeeue3PSqotl1q266LV6+JWWtgXSc8aoVv0oZUzmRUrtQRVpMpFSFLN7dbvfcW2vq+NUNa7l0mUKA1s+JzzAHvKLMgJ9bzBm7KC0YVPzNRtnngP9q+TEyHPdWBxR6P0He1F/jJeJCkY7N64bxo2EdQsG7jxQmHTWmEfn8ixaMo4Wjnyqb6sAgMLg1+OjzdRNQT8ESss0Cv02Nb2lmMV+cL8fUjnh+bkHTVGv1QtenOStbWb5A1ERjwhSrDrsJg1whlCbD2ivbFBFUEi3BcTTTxTxFpjPGeIZtm0WkWk/kGe3Uq0ltYmNlm2VAHjLhkmOwc9PtCfl+vVlryrqGuE0xuRceT/6lKO6agqvutprqq6lhVKMw5l4yrEu94jtqsQOXBwSrMsMVEU7sBUErRmk5NdMn973dS8osNz8NWULpCKJb4YR065oBVWffEYETM/TXwMyJ2lOZDYbk3R4aY40jOmaPdMoyWy0zTWimGUNvSVRjgKj9op9/baD5HTutQ3dUPZDV9VSDi2SkC5sPKJQgiQ8bLWPL5KweIXcwUYSOOm1VyjEfH0VJMO43EiCVatEbxdJ2GgJ6eHHEHaSbXXXNYC7xRAOyL1wBjGEnSRl0XRb0SoZQSV57t7cvxxY/EBigQssWT9+0F7XCCsvfnA06E4PxPVo2yH+HsfW2uuKakVhcV1Rao+53vtaBplBDTKJNRZ3wirTSn3oFDvpwNCBrgfzLbe91tt7dDa+ICVCfM3ifdDi6HmKpUfziPQbjhhujXvhJO6ph33i9ppUDBk8nN/zS4rcgA6XIO4nJs+FKpQqXkKSrAOCQmZ+3AZBk9Gp0PPZ04vqA4gnWBC0ImG6kCnDn6nzxuY1nlAXKuWLJigYeVSrx+Mw7syyrAtdqFZE/LgTXQlPnh5z97bwYKf5vDyzR2jkQGeI3OY0vKgqgqoyJ8+FMNTAcd1UXTGuC1OLjx3oD3HI5hxV1VS1cFMsNn7q5Dt3HAmQ610ARASQmC4XokTgcRGQhUVN+v9LHkZNYNlf6iOWFMoKF+MckOEQTlDkSyfM27jCsduDztsg8kh+nxLfG6Ok3IXB23fayiOR16+lMA9hukLLR/3oObiotcH+NWSbnKNPQ1ixE3/mD7GTc9hWistAVUWyTEADEucwKG2Jjipf7VuLezg9s7t7OHMIrxHR2T6OXWZEp6rsCbtJjxjSWYUsTjy2ovlu0PuN5S9ia/Oy799jkOMRSF24XqgUxF5yuUP21Z0Fdav27J0FUSv2A54FTUv3Gp4FFXfwMdand0o1g92WFhhqcWnB/Di/tECmPdqlKY9i1PkZOYipHfTJ/OnAG3+hLFPwx7gR7WPoe4P4mYnAG0oY0ihtswVTQcOEHyWxBwCOPbKVH5aEPVpp7NH3oZ0pGaYoGaxoymJk2QusxC1b9HPWY14vmoxmWxQqU2YwGmXJlLgB4CJTxy1T7ZSndclUCoC171XYja78XoX6UwdoMvP8+Alb/5pKTZxZnQBBwvQeijjKGH005r79g9F09H0BCOfHtKw1FdHSNB1NnJWW8INOMOwvgs4wp5aeGpd0g1Md1nU2l8clMdfEOY0aznyzldpHjXUa0wNHWJmgVzs9gKPZEnP4W1u0ZCPrykCILZeSijZBidtt47yezZfvrz/u6Jmb5/u/us9Vj68ygMzis3vWr+eKGR9OAcikelW1QFbr5qHL3r4sP9ihA6AujrjDTzKVS0xXz5totCpTStUrXVttoFdtzqneTiy9hW9pEBqseDGJptSSg0pUF+Qq4/HnoNKWznl0CFoqlxwvocSOKahUrWVxOajMlq0Xuykv6V4qhrKEqU2Wh4pfOne6+qEliVlUrMaciB90HQ9CtYQ9wgxftb99bt/6oXYM+mF6l5LkD6eLfLZshUa1A1R0uT52n792adHVwwP9++OuO2cBY8bf9z/uWCYO5fXb/e19lNx7cfLE2nyy5p4Y1rY4V4bEdajLGFaaTxYc0OunAPdZDrDLlmfXnw9claBExZaWVRFOb2VpaQaXwjDNRVaqHQREx93be+sOhtJBcPTGEFjw6qlcCnLD4BLyJvPmjtZQUwXtFtDs7NMuXibNF1lBOnIxdi5/w8EZsNsCls3xYTd2V2DYikHD11ASr03XSE1H/pVDcJ5ZEWOf6NxOPMyY1sOE4JGEmwRzah6OF1B15i8Ll+3C4ZZQ7UFPEMBRllpZFrotL6mX6F54gjMfQze3bXKDTLzCIjm1RU+9UL6x96tzW/+FJdq8L0Njje8JGtGv397QLF7iF99r9JMQ//cNtMmEkwcd36fqBCcjQKL8t6sUEkMUkmcUTvA4RBsQv1xVfjn0bkB+4Y3UsqikKiE/2Jz8bBsXZsMlA1ZqkQ4fscsmrO7/AQ==</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/kv/plugin.go b/plugins/kv/plugin.go
deleted file mode 100644
index 86bd982f..00000000
--- a/plugins/kv/plugin.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package kv
-
-import (
- "fmt"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/kv"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- // PluginName linked to the memory, boltdb, memcached, redis plugins. DO NOT change w/o sync.
- PluginName string = "kv"
- // driver is the mandatory field which should present in every storage
- driver string = "driver"
- // config key used to detect local configuration for the driver
- cfg string = "config"
-)
-
-// Plugin for the unified storage
-type Plugin struct {
- log logger.Logger
- // constructors contains general storage constructors, such as boltdb, memory, memcached, redis.
- constructors map[string]kv.Constructor
- // storages contains user-defined storages, such as boltdb-north, memcached-us and so on.
- storages map[string]kv.Storage
- // KV configuration
- cfg Config
- cfgPlugin config.Configurer
-}
-
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("kv_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &p.cfg.Data)
- if err != nil {
- return errors.E(op, err)
- }
- p.constructors = make(map[string]kv.Constructor, 5)
- p.storages = make(map[string]kv.Storage, 5)
- p.log = log
- p.cfgPlugin = cfg
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- errCh := make(chan error, 1)
- const op = errors.Op("kv_plugin_serve")
- // key - storage name in the config
- // value - storage
- // For this config we should have 3 constructors: memory, boltdb and memcached but 4 KVs: default, boltdb-south, boltdb-north and memcached
- // when user requests for example boltdb-south, we should provide that particular preconfigured storage
-
- for k, v := range p.cfg.Data {
- // for example if the key not properly formatted (yaml)
- if v == nil {
- continue
- }
-
- // check type of the v
- // should be a map[string]interface{}
- switch t := v.(type) {
- // correct type
- case map[string]interface{}:
- if _, ok := t[driver]; !ok {
- errCh <- errors.E(op, errors.Errorf("could not find mandatory driver field in the %s storage", k))
- return errCh
- }
- default:
- p.log.Warn("wrong type detected in the configuration, please, check yaml indentation")
- continue
- }
-
- // config key for the particular sub-driver kv.memcached.config
- configKey := fmt.Sprintf("%s.%s.%s", PluginName, k, cfg)
- // at this point we know, that driver field present in the configuration
- drName := v.(map[string]interface{})[driver]
-
- // driver name should be a string
- if drStr, ok := drName.(string); ok {
- switch {
- // local configuration section key
- case p.cfgPlugin.Has(configKey):
- if _, ok := p.constructors[drStr]; !ok {
- p.log.Warn("no constructors registered", "requested constructor", drStr, "registered", p.constructors)
- continue
- }
-
- storage, err := p.constructors[drStr].KVConstruct(configKey)
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
-
- // save the storage
- p.storages[k] = storage
- // try global then
- case p.cfgPlugin.Has(k):
- if _, ok := p.constructors[drStr]; !ok {
- p.log.Warn("no constructors registered", "requested constructor", drStr, "registered", p.constructors)
- continue
- }
-
- // use only key for the driver registration, for example rr-boltdb should be globally available
- storage, err := p.constructors[drStr].KVConstruct(k)
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
-
- // save the storage
- p.storages[k] = storage
- default:
- p.log.Error("can't find local or global configuration, this section will be skipped", "local: ", configKey, "global: ", k)
- continue
- }
- }
- continue
- }
-
- return errCh
-}
-
-func (p *Plugin) Stop() error {
- // stop all attached storages
- for k := range p.storages {
- p.storages[k].Stop()
- }
- return nil
-}
-
-// Collects will get all plugins which implement Storage interface
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.GetAllStorageDrivers,
- }
-}
-
-func (p *Plugin) GetAllStorageDrivers(name endure.Named, constructor kv.Constructor) {
- // save the storage constructor
- p.constructors[name.Name()] = constructor
-}
-
-// RPC returns associated rpc service.
-func (p *Plugin) RPC() interface{} {
- return &rpc{srv: p, log: p.log, storages: p.storages}
-}
-
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (p *Plugin) Available() {}
diff --git a/plugins/kv/rpc.go b/plugins/kv/rpc.go
deleted file mode 100644
index ad4aefa9..00000000
--- a/plugins/kv/rpc.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package kv
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/kv"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
-)
-
-// Wrapper for the plugin
-type rpc struct {
- // all available storages
- storages map[string]kv.Storage
- // svc is a plugin implementing Storage interface
- srv *Plugin
- // Logger
- log logger.Logger
-}
-
-// Has accept []*kvv1.Payload proto payload with Storage and Item
-func (r *rpc) Has(in *kvv1.Request, out *kvv1.Response) error {
- const op = errors.Op("rpc_has")
-
- if in.GetStorage() == "" {
- return errors.E(op, errors.Str("no storage provided"))
- }
-
- keys := make([]string, 0, len(in.GetItems()))
-
- for i := 0; i < len(in.GetItems()); i++ {
- keys = append(keys, in.Items[i].Key)
- }
-
- if st, ok := r.storages[in.GetStorage()]; ok {
- ret, err := st.Has(keys...)
- if err != nil {
- return errors.E(op, err)
- }
-
- // update the value in the pointer
- // save the result
- out.Items = make([]*kvv1.Item, 0, len(ret))
- for k := range ret {
- out.Items = append(out.Items, &kvv1.Item{
- Key: k,
- })
- }
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
-
-// Set accept proto payload with Storage and Item
-func (r *rpc) Set(in *kvv1.Request, _ *kvv1.Response) error {
- const op = errors.Op("rpc_set")
-
- if st, exists := r.storages[in.GetStorage()]; exists {
- err := st.Set(in.GetItems()...)
- if err != nil {
- return errors.E(op, err)
- }
-
- // save the result
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
-
-// MGet accept proto payload with Storage and Item
-func (r *rpc) MGet(in *kvv1.Request, out *kvv1.Response) error {
- const op = errors.Op("rpc_mget")
-
- keys := make([]string, 0, len(in.GetItems()))
-
- for i := 0; i < len(in.GetItems()); i++ {
- keys = append(keys, in.Items[i].Key)
- }
-
- if st, exists := r.storages[in.GetStorage()]; exists {
- ret, err := st.MGet(keys...)
- if err != nil {
- return errors.E(op, err)
- }
-
- out.Items = make([]*kvv1.Item, 0, len(ret))
- for k := range ret {
- out.Items = append(out.Items, &kvv1.Item{
- Key: k,
- Value: ret[k],
- })
- }
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
-
-// MExpire accept proto payload with Storage and Item
-func (r *rpc) MExpire(in *kvv1.Request, _ *kvv1.Response) error {
- const op = errors.Op("rpc_mexpire")
-
- if st, exists := r.storages[in.GetStorage()]; exists {
- err := st.MExpire(in.GetItems()...)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
-
-// TTL accept proto payload with Storage and Item
-func (r *rpc) TTL(in *kvv1.Request, out *kvv1.Response) error {
- const op = errors.Op("rpc_ttl")
- keys := make([]string, 0, len(in.GetItems()))
-
- for i := 0; i < len(in.GetItems()); i++ {
- keys = append(keys, in.Items[i].Key)
- }
-
- if st, exists := r.storages[in.GetStorage()]; exists {
- ret, err := st.TTL(keys...)
- if err != nil {
- return errors.E(op, err)
- }
-
- out.Items = make([]*kvv1.Item, 0, len(ret))
- for k := range ret {
- out.Items = append(out.Items, &kvv1.Item{
- Key: k,
- Timeout: ret[k],
- })
- }
-
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
-
-// Delete accept proto payload with Storage and Item
-func (r *rpc) Delete(in *kvv1.Request, _ *kvv1.Response) error {
- const op = errors.Op("rcp_delete")
-
- keys := make([]string, 0, len(in.GetItems()))
-
- for i := 0; i < len(in.GetItems()); i++ {
- keys = append(keys, in.Items[i].Key)
- }
- if st, exists := r.storages[in.GetStorage()]; exists {
- err := st.Delete(keys...)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
-
-// Clear clean the storage
-func (r *rpc) Clear(in *kvv1.Request, _ *kvv1.Response) error {
- const op = errors.Op("rcp_delete")
-
- if st, exists := r.storages[in.GetStorage()]; exists {
- err := st.Clear()
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
- }
-
- return errors.E(op, errors.Errorf("no such storage: %s", in.GetStorage()))
-}
diff --git a/plugins/logger/config.go b/plugins/logger/config.go
deleted file mode 100644
index 6ef56661..00000000
--- a/plugins/logger/config.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package logger
-
-import (
- "os"
- "strings"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "gopkg.in/natefinch/lumberjack.v2"
-)
-
-// ChannelConfig configures loggers per channel.
-type ChannelConfig struct {
- // Dedicated channels per logger. By default logger allocated via named logger.
- Channels map[string]Config `mapstructure:"channels"`
-}
-
-// FileLoggerConfig structure represents configuration for the file logger
-type FileLoggerConfig struct {
- // Filename is the file to write logs to. Backup log files will be retained
- // in the same directory. It uses <processname>-lumberjack.log in
- // os.TempDir() if empty.
- LogOutput string `mapstructure:"log_output"`
-
- // MaxSize is the maximum size in megabytes of the log file before it gets
- // rotated. It defaults to 100 megabytes.
- MaxSize int `mapstructure:"max_size"`
-
- // MaxAge is the maximum number of days to retain old log files based on the
- // timestamp encoded in their filename. Note that a day is defined as 24
- // hours and may not exactly correspond to calendar days due to daylight
- // savings, leap seconds, etc. The default is not to remove old log files
- // based on age.
- MaxAge int `mapstructure:"max_age"`
-
- // MaxBackups is the maximum number of old log files to retain. The default
- // is to retain all old log files (though MaxAge may still cause them to get
- // deleted.)
- MaxBackups int `mapstructure:"max_backups"`
-
- // Compress determines if the rotated log files should be compressed
- // using gzip. The default is not to perform compression.
- Compress bool `mapstructure:"compress"`
-}
-
-func (fl *FileLoggerConfig) InitDefaults() *FileLoggerConfig {
- if fl.LogOutput == "" {
- fl.LogOutput = os.TempDir()
- }
-
- if fl.MaxSize == 0 {
- fl.MaxSize = 100
- }
-
- if fl.MaxAge == 0 {
- fl.MaxAge = 24
- }
-
- if fl.MaxBackups == 0 {
- fl.MaxBackups = 10
- }
-
- return fl
-}
-
-type Config struct {
- // Mode configures logger based on some default template (development, production, off).
- Mode Mode `mapstructure:"mode"`
-
- // Level is the minimum enabled logging level. Note that this is a dynamic
- // level, so calling ChannelConfig.Level.SetLevel will atomically change the log
- // level of all loggers descended from this config.
- Level string `mapstructure:"level"`
-
- // Encoding sets the logger's encoding. InitDefault values are "json" and
- // "console", as well as any third-party encodings registered via
- // RegisterEncoder.
- Encoding string `mapstructure:"encoding"`
-
- // Output is a list of URLs or file paths to write logging output to.
- // See Open for details.
- Output []string `mapstructure:"output"`
-
- // ErrorOutput is a list of URLs to write internal logger errors to.
- // The default is standard error.
- //
- // Note that this setting only affects internal errors; for sample code that
- // sends error-level logs to a different location from info- and debug-level
- // logs, see the package-level AdvancedConfiguration example.
- ErrorOutput []string `mapstructure:"errorOutput"`
-
- // File logger options
- FileLogger *FileLoggerConfig `mapstructure:"file_logger_options"`
-}
-
-// BuildLogger converts config into Zap configuration.
-func (cfg *Config) BuildLogger() (*zap.Logger, error) {
- var zCfg zap.Config
- switch Mode(strings.ToLower(string(cfg.Mode))) {
- case off, none:
- return zap.NewNop(), nil
- case production:
- zCfg = zap.NewProductionConfig()
- case development:
- zCfg = zap.Config{
- Level: zap.NewAtomicLevelAt(zap.DebugLevel),
- Development: true,
- Encoding: "console",
- EncoderConfig: zapcore.EncoderConfig{
- // Keys can be anything except the empty string.
- TimeKey: "T",
- LevelKey: "L",
- NameKey: "N",
- CallerKey: "C",
- FunctionKey: zapcore.OmitKey,
- MessageKey: "M",
- StacktraceKey: "S",
- LineEnding: zapcore.DefaultLineEnding,
- EncodeLevel: ColoredLevelEncoder,
- EncodeTime: zapcore.ISO8601TimeEncoder,
- EncodeDuration: zapcore.StringDurationEncoder,
- EncodeCaller: zapcore.ShortCallerEncoder,
- EncodeName: ColoredNameEncoder,
- },
- OutputPaths: []string{"stderr"},
- ErrorOutputPaths: []string{"stderr"},
- }
- case raw:
- zCfg = zap.Config{
- Level: zap.NewAtomicLevelAt(zap.InfoLevel),
- Encoding: "console",
- EncoderConfig: zapcore.EncoderConfig{
- MessageKey: "message",
- },
- OutputPaths: []string{"stderr"},
- ErrorOutputPaths: []string{"stderr"},
- }
- default:
- zCfg = zap.Config{
- Level: zap.NewAtomicLevelAt(zap.DebugLevel),
- Encoding: "console",
- EncoderConfig: zapcore.EncoderConfig{
- MessageKey: "message",
- LevelKey: "level",
- TimeKey: "time",
- NameKey: "name",
- EncodeName: ColoredHashedNameEncoder,
- EncodeLevel: ColoredLevelEncoder,
- EncodeTime: UTCTimeEncoder,
- EncodeCaller: zapcore.ShortCallerEncoder,
- },
- OutputPaths: []string{"stderr"},
- ErrorOutputPaths: []string{"stderr"},
- }
- }
-
- if cfg.Level != "" {
- level := zap.NewAtomicLevel()
- if err := level.UnmarshalText([]byte(cfg.Level)); err == nil {
- zCfg.Level = level
- }
- }
-
- if cfg.Encoding != "" {
- zCfg.Encoding = cfg.Encoding
- }
-
- if len(cfg.Output) != 0 {
- zCfg.OutputPaths = cfg.Output
- }
-
- if len(cfg.ErrorOutput) != 0 {
- zCfg.ErrorOutputPaths = cfg.ErrorOutput
- }
-
- // if we also have a file logger specified in the config
- // init it
- // otherwise - return standard config
- if cfg.FileLogger != nil {
- // init absent options
- cfg.FileLogger.InitDefaults()
-
- w := zapcore.AddSync(
- &lumberjack.Logger{
- Filename: cfg.FileLogger.LogOutput,
- MaxSize: cfg.FileLogger.MaxSize,
- MaxAge: cfg.FileLogger.MaxAge,
- MaxBackups: cfg.FileLogger.MaxBackups,
- Compress: cfg.FileLogger.Compress,
- },
- )
-
- core := zapcore.NewCore(
- zapcore.NewJSONEncoder(zCfg.EncoderConfig),
- w,
- zCfg.Level,
- )
- return zap.New(core), nil
- }
-
- return zCfg.Build()
-}
-
-// InitDefault Initialize default logger
-func (cfg *Config) InitDefault() {
- if cfg.Mode == "" {
- cfg.Mode = development
- }
- if cfg.Level == "" {
- cfg.Level = "debug"
- }
-}
diff --git a/plugins/logger/encoder.go b/plugins/logger/encoder.go
deleted file mode 100644
index 4ff583c4..00000000
--- a/plugins/logger/encoder.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package logger
-
-import (
- "hash/fnv"
- "strings"
- "time"
-
- "github.com/fatih/color"
- "go.uber.org/zap/zapcore"
-)
-
-var colorMap = []func(string, ...interface{}) string{
- color.HiYellowString,
- color.HiGreenString,
- color.HiBlueString,
- color.HiRedString,
- color.HiCyanString,
- color.HiMagentaString,
-}
-
-// ColoredLevelEncoder colorizes log levels.
-func ColoredLevelEncoder(level zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
- switch level {
- case zapcore.DebugLevel:
- enc.AppendString(color.HiWhiteString(level.CapitalString()))
- case zapcore.InfoLevel:
- enc.AppendString(color.HiCyanString(level.CapitalString()))
- case zapcore.WarnLevel:
- enc.AppendString(color.HiYellowString(level.CapitalString()))
- case zapcore.ErrorLevel, zapcore.DPanicLevel:
- enc.AppendString(color.HiRedString(level.CapitalString()))
- case zapcore.PanicLevel, zapcore.FatalLevel:
- enc.AppendString(color.HiMagentaString(level.CapitalString()))
- }
-}
-
-// ColoredNameEncoder colorizes service names.
-func ColoredNameEncoder(s string, enc zapcore.PrimitiveArrayEncoder) {
- if len(s) < 12 {
- s += strings.Repeat(" ", 12-len(s))
- }
-
- enc.AppendString(color.HiGreenString(s))
-}
-
-// ColoredHashedNameEncoder colorizes service names and assigns different colors to different names.
-func ColoredHashedNameEncoder(s string, enc zapcore.PrimitiveArrayEncoder) {
- if len(s) < 12 {
- s += strings.Repeat(" ", 12-len(s))
- }
-
- colorID := stringHash(s, len(colorMap))
- enc.AppendString(colorMap[colorID](s))
-}
-
-// UTCTimeEncoder encodes time into short UTC specific timestamp.
-func UTCTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
- enc.AppendString(t.UTC().Format("2006/01/02 15:04:05"))
-}
-
-// returns string hash
-func stringHash(name string, base int) int {
- h := fnv.New32a()
- _, _ = h.Write([]byte(name))
- return int(h.Sum32()) % base
-}
diff --git a/plugins/logger/enums.go b/plugins/logger/enums.go
deleted file mode 100644
index 803eace0..00000000
--- a/plugins/logger/enums.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package logger
-
-// Mode represents available logger modes
-type Mode string
-
-const (
- none Mode = "none"
- off Mode = "off"
- production Mode = "production"
- development Mode = "development"
- raw Mode = "raw"
-)
diff --git a/plugins/logger/interface.go b/plugins/logger/interface.go
deleted file mode 100644
index 827f9821..00000000
--- a/plugins/logger/interface.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package logger
-
-// Logger is an general RR log interface
-type Logger interface {
- Debug(msg string, keyvals ...interface{})
- Info(msg string, keyvals ...interface{})
- Warn(msg string, keyvals ...interface{})
- Error(msg string, keyvals ...interface{})
-}
-
-// WithLogger creates a child logger and adds structured context to it
-type WithLogger interface {
- With(keyvals ...interface{}) Logger
-}
diff --git a/plugins/logger/plugin.go b/plugins/logger/plugin.go
deleted file mode 100644
index ffbf7f5e..00000000
--- a/plugins/logger/plugin.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package logger
-
-import (
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "go.uber.org/zap"
-)
-
-// PluginName declares plugin name.
-const PluginName = "logs"
-
-// ZapLogger manages zap logger.
-type ZapLogger struct {
- base *zap.Logger
- cfg *Config
- channels ChannelConfig
-}
-
-// Init logger service.
-func (z *ZapLogger) Init(cfg config.Configurer) error {
- const op = errors.Op("config_plugin_init")
- var err error
- // if not configured, configure with default params
- if !cfg.Has(PluginName) {
- z.cfg = &Config{}
- z.cfg.InitDefault()
-
- z.base, err = z.cfg.BuildLogger()
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- return nil
- }
-
- err = cfg.UnmarshalKey(PluginName, &z.cfg)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- err = cfg.UnmarshalKey(PluginName, &z.channels)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- z.base, err = z.cfg.BuildLogger()
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
- return nil
-}
-
-// NamedLogger returns logger dedicated to the specific channel. Similar to Named() but also reads the core params.
-func (z *ZapLogger) NamedLogger(name string) (Logger, error) {
- if cfg, ok := z.channels.Channels[name]; ok {
- l, err := cfg.BuildLogger()
- if err != nil {
- return nil, err
- }
- return NewZapAdapter(l.Named(name)), nil
- }
-
- return NewZapAdapter(z.base.Named(name)), nil
-}
-
-// ServiceLogger returns logger dedicated to the specific channel. Similar to Named() but also reads the core params.
-func (z *ZapLogger) ServiceLogger(n endure.Named) (Logger, error) {
- return z.NamedLogger(n.Name())
-}
-
-// Provides declares factory methods.
-func (z *ZapLogger) Provides() []interface{} {
- return []interface{}{
- z.ServiceLogger,
- }
-}
-
-// Name returns user-friendly plugin name
-func (z *ZapLogger) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (z *ZapLogger) Available() {
-}
diff --git a/plugins/logger/std_log_adapter.go b/plugins/logger/std_log_adapter.go
deleted file mode 100644
index 479aa565..00000000
--- a/plugins/logger/std_log_adapter.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package logger
-
-import (
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-// StdLogAdapter can be passed to the http.Server or any place which required standard logger to redirect output
-// to the logger plugin
-type StdLogAdapter struct {
- log Logger
-}
-
-// Write io.Writer interface implementation
-func (s *StdLogAdapter) Write(p []byte) (n int, err error) {
- s.log.Error("server internal error", "message", utils.AsString(p))
- return len(p), nil
-}
-
-// NewStdAdapter constructs StdLogAdapter
-func NewStdAdapter(log Logger) *StdLogAdapter {
- logAdapter := &StdLogAdapter{
- log: log,
- }
-
- return logAdapter
-}
diff --git a/plugins/logger/zap_adapter.go b/plugins/logger/zap_adapter.go
deleted file mode 100644
index 1c68cf25..00000000
--- a/plugins/logger/zap_adapter.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package logger
-
-import (
- "fmt"
-
- "go.uber.org/zap"
- core "go.uber.org/zap/zapcore"
-)
-
-type ZapAdapter struct {
- zl *zap.Logger
-}
-
-// NewZapAdapter ... which uses general log interface
-func NewZapAdapter(zapLogger *zap.Logger) *ZapAdapter {
- return &ZapAdapter{
- zl: zapLogger.WithOptions(zap.AddCallerSkip(1)),
- }
-}
-
-func separateFields(keyVals []interface{}) ([]zap.Field, []interface{}) {
- var fields []zap.Field
- var pairedKeyVals []interface{}
-
- for key := range keyVals {
- switch value := keyVals[key].(type) {
- case zap.Field:
- fields = append(fields, value)
- case core.ObjectMarshaler:
- fields = append(fields, zap.Inline(value))
- default:
- pairedKeyVals = append(pairedKeyVals, value)
- }
- }
- return fields, pairedKeyVals
-}
-
-func (log *ZapAdapter) fields(keyvals []interface{}) []zap.Field {
- // separate any zap fields from other structs
- zapFields, keyvals := separateFields(keyvals)
-
- // we should have even number of keys and values
- if len(keyvals)%2 != 0 {
- return []zap.Field{zap.Error(fmt.Errorf("odd number of keyvals pairs: %v", keyvals))}
- }
-
- fields := make([]zap.Field, 0, len(keyvals)/2+len(zapFields))
- for i := 0; i < len(keyvals); i += 2 {
- key, ok := keyvals[i].(string)
- if !ok {
- key = fmt.Sprintf("%v", keyvals[i])
- }
- fields = append(fields, zap.Any(key, keyvals[i+1]))
- }
- // add all the fields
- fields = append(fields, zapFields...)
-
- return fields
-}
-
-func (log *ZapAdapter) Debug(msg string, keyvals ...interface{}) {
- log.zl.Debug(msg, log.fields(keyvals)...)
-}
-
-func (log *ZapAdapter) Info(msg string, keyvals ...interface{}) {
- log.zl.Info(msg, log.fields(keyvals)...)
-}
-
-func (log *ZapAdapter) Warn(msg string, keyvals ...interface{}) {
- log.zl.Warn(msg, log.fields(keyvals)...)
-}
-
-func (log *ZapAdapter) Error(msg string, keyvals ...interface{}) {
- log.zl.Error(msg, log.fields(keyvals)...)
-}
-
-func (log *ZapAdapter) With(keyvals ...interface{}) Logger {
- return NewZapAdapter(log.zl.With(log.fields(keyvals)...))
-}
diff --git a/plugins/memcached/memcachedkv/config.go b/plugins/memcached/memcachedkv/config.go
deleted file mode 100644
index 569e2573..00000000
--- a/plugins/memcached/memcachedkv/config.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package memcachedkv
-
-type Config struct {
- // Addr is url for memcached, 11211 port is used by default
- Addr []string
-}
-
-func (s *Config) InitDefaults() {
- if s.Addr == nil {
- s.Addr = []string{"127.0.0.1:11211"} // default url for memcached
- }
-}
diff --git a/plugins/memcached/memcachedkv/driver.go b/plugins/memcached/memcachedkv/driver.go
deleted file mode 100644
index dcb071b4..00000000
--- a/plugins/memcached/memcachedkv/driver.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package memcachedkv
-
-import (
- "strings"
- "time"
-
- "github.com/bradfitz/gomemcache/memcache"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
-)
-
-type Driver struct {
- client *memcache.Client
- log logger.Logger
- cfg *Config
-}
-
-// NewMemcachedDriver returns a memcache client using the provided server(s)
-// with equal weight. If a server is listed multiple times,
-// it gets a proportional amount of weight.
-func NewMemcachedDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) {
- const op = errors.Op("new_memcached_driver")
-
- s := &Driver{
- log: log,
- }
-
- err := cfgPlugin.UnmarshalKey(key, &s.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- if s.cfg == nil {
- return nil, errors.E(op, errors.Errorf("config not found by provided key: %s", key))
- }
-
- s.cfg.InitDefaults()
-
- m := memcache.New(s.cfg.Addr...)
- s.client = m
-
- return s, nil
-}
-
-// Has checks the key for existence
-func (d *Driver) Has(keys ...string) (map[string]bool, error) {
- const op = errors.Op("memcached_plugin_has")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
- m := make(map[string]bool, len(keys))
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- exist, err := d.client.Get(keys[i])
-
- if err != nil {
- // ErrCacheMiss means that a Get failed because the item wasn't present.
- if err == memcache.ErrCacheMiss {
- continue
- }
- return nil, errors.E(op, err)
- }
- if exist != nil {
- m[keys[i]] = true
- }
- }
- return m, nil
-}
-
-// Get gets the item for the given key. ErrCacheMiss is returned for a
-// memcache cache miss. The key must be at most 250 bytes in length.
-func (d *Driver) Get(key string) ([]byte, error) {
- const op = errors.Op("memcached_plugin_get")
- // to get cases like " "
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- data, err := d.client.Get(key)
- if err != nil {
- // ErrCacheMiss means that a Get failed because the item wasn't present.
- if err == memcache.ErrCacheMiss {
- return nil, nil
- }
- return nil, errors.E(op, err)
- }
- if data != nil {
- // return the value by the key
- return data.Value, nil
- }
- // data is nil by some reason and error also nil
- return nil, nil
-}
-
-// MGet return map with key -- string
-// and map value as value -- []byte
-func (d *Driver) MGet(keys ...string) (map[string][]byte, error) {
- const op = errors.Op("memcached_plugin_mget")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string][]byte, len(keys))
- for i := range keys {
- // Here also MultiGet
- data, err := d.client.Get(keys[i])
- if err != nil {
- // ErrCacheMiss means that a Get failed because the item wasn't present.
- if err == memcache.ErrCacheMiss {
- continue
- }
- return nil, errors.E(op, err)
- }
- if data != nil {
- m[keys[i]] = data.Value
- }
- }
-
- return m, nil
-}
-
-// Set sets the KV pairs. Keys should be 250 bytes maximum
-// TTL:
-// Expiration is the cache expiration time, in seconds: either a relative
-// time from now (up to 1 month), or an absolute Unix epoch time.
-// Zero means the Item has no expiration time.
-func (d *Driver) Set(items ...*kvv1.Item) error {
- const op = errors.Op("memcached_plugin_set")
- if items == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- for i := range items {
- if items[i] == nil {
- return errors.E(op, errors.EmptyItem)
- }
-
- // pre-allocate item
- memcachedItem := &memcache.Item{
- Key: items[i].Key,
- // unsafe convert
- Value: items[i].Value,
- Flags: 0,
- }
-
- // add additional TTL in case of TTL isn't empty
- if items[i].Timeout != "" {
- // verify the TTL
- t, err := time.Parse(time.RFC3339, items[i].Timeout)
- if err != nil {
- return err
- }
- memcachedItem.Expiration = int32(t.Unix())
- }
-
- err := d.client.Set(memcachedItem)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// MExpire Expiration is the cache expiration time, in seconds: either a relative
-// time from now (up to 1 month), or an absolute Unix epoch time.
-// Zero means the Item has no expiration time.
-func (d *Driver) MExpire(items ...*kvv1.Item) error {
- const op = errors.Op("memcached_plugin_mexpire")
- for i := range items {
- if items[i] == nil {
- continue
- }
- if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" {
- return errors.E(op, errors.Str("should set timeout and at least one key"))
- }
-
- // verify provided TTL
- t, err := time.Parse(time.RFC3339, items[i].Timeout)
- if err != nil {
- return errors.E(op, err)
- }
-
- // Touch updates the expiry for the given key. The seconds parameter is either
- // a Unix timestamp or, if seconds is less than 1 month, the number of seconds
- // into the future at which time the item will expire. Zero means the item has
- // no expiration time. ErrCacheMiss is returned if the key is not in the cache.
- // The key must be at most 250 bytes in length.
- err = d.client.Touch(items[i].Key, int32(t.Unix()))
- if err != nil {
- return errors.E(op, err)
- }
- }
- return nil
-}
-
-// TTL return time in seconds (int32) for a given keys
-func (d *Driver) TTL(_ ...string) (map[string]string, error) {
- const op = errors.Op("memcached_plugin_ttl")
- return nil, errors.E(op, errors.Str("not valid request for memcached, see https://github.com/memcached/memcached/issues/239"))
-}
-
-func (d *Driver) Delete(keys ...string) error {
- const op = errors.Op("memcached_plugin_has")
- if keys == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return errors.E(op, errors.EmptyKey)
- }
- }
-
- for i := range keys {
- err := d.client.Delete(keys[i])
- // ErrCacheMiss means that a Get failed because the item wasn't present.
- if err != nil {
- // ErrCacheMiss means that a Get failed because the item wasn't present.
- if err == memcache.ErrCacheMiss {
- continue
- }
- return errors.E(op, err)
- }
- }
- return nil
-}
-
-func (d *Driver) Clear() error {
- err := d.client.DeleteAll()
- if err != nil {
- d.log.Error("flush_all operation failed", "error", err)
- return err
- }
-
- return nil
-}
-
-func (d *Driver) Stop() {}
diff --git a/plugins/memcached/plugin.go b/plugins/memcached/plugin.go
deleted file mode 100644
index 47bca0e2..00000000
--- a/plugins/memcached/plugin.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package memcached
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/kv"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/memcached/memcachedkv"
-)
-
-const (
- PluginName string = "memcached"
- RootPluginName string = "kv"
-)
-
-type Plugin struct {
- // config plugin
- cfgPlugin config.Configurer
- // logger
- log logger.Logger
-}
-
-func (s *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- if !cfg.Has(RootPluginName) {
- return errors.E(errors.Disabled)
- }
-
- s.cfgPlugin = cfg
- s.log = log
- return nil
-}
-
-// Name returns plugin user-friendly name
-func (s *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (s *Plugin) Available() {}
-
-func (s *Plugin) KVConstruct(key string) (kv.Storage, error) {
- const op = errors.Op("boltdb_plugin_provide")
- st, err := memcachedkv.NewMemcachedDriver(s.log, key, s.cfgPlugin)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return st, nil
-}
diff --git a/plugins/memory/memoryjobs/consumer.go b/plugins/memory/memoryjobs/consumer.go
deleted file mode 100644
index 79246063..00000000
--- a/plugins/memory/memoryjobs/consumer.go
+++ /dev/null
@@ -1,296 +0,0 @@
-package memoryjobs
-
-import (
- "context"
- "sync/atomic"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-const (
- prefetch string = "prefetch"
- goroutinesMax uint64 = 1000
-)
-
-type Config struct {
- Prefetch uint64 `mapstructure:"prefetch"`
-}
-
-type consumer struct {
- cfg *Config
- log logger.Logger
- eh events.Handler
- pipeline atomic.Value
- pq priorityqueue.Queue
- localPrefetch chan *Item
-
- // time.sleep goroutines max number
- goroutines uint64
-
- delayed *int64
- active *int64
-
- listeners uint32
- stopCh chan struct{}
-}
-
-func NewJobBroker(configKey string, log logger.Logger, cfg config.Configurer, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_ephemeral_pipeline")
-
- jb := &consumer{
- log: log,
- pq: pq,
- eh: eh,
- goroutines: 0,
- active: utils.Int64(0),
- delayed: utils.Int64(0),
- stopCh: make(chan struct{}),
- }
-
- err := cfg.UnmarshalKey(configKey, &jb.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- if jb.cfg == nil {
- return nil, errors.E(op, errors.Errorf("config not found by provided key: %s", configKey))
- }
-
- if jb.cfg.Prefetch == 0 {
- jb.cfg.Prefetch = 100_000
- }
-
- // initialize a local queue
- jb.localPrefetch = make(chan *Item, jb.cfg.Prefetch)
-
- return jb, nil
-}
-
-func FromPipeline(pipeline *pipeline.Pipeline, log logger.Logger, eh events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- return &consumer{
- log: log,
- pq: pq,
- eh: eh,
- localPrefetch: make(chan *Item, pipeline.Int(prefetch, 100_000)),
- goroutines: 0,
- active: utils.Int64(0),
- delayed: utils.Int64(0),
- stopCh: make(chan struct{}),
- }, nil
-}
-
-func (c *consumer) Push(ctx context.Context, jb *job.Job) error {
- const op = errors.Op("ephemeral_push")
-
- // check if the pipeline registered
- _, ok := c.pipeline.Load().(*pipeline.Pipeline)
- if !ok {
- return errors.E(op, errors.Errorf("no such pipeline: %s", jb.Options.Pipeline))
- }
-
- err := c.handleItem(ctx, fromJob(jb))
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (c *consumer) State(_ context.Context) (*jobState.State, error) {
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- return &jobState.State{
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Queue: pipe.Name(),
- Active: atomic.LoadInt64(c.active),
- Delayed: atomic.LoadInt64(c.delayed),
- Ready: ready(atomic.LoadUint32(&c.listeners)),
- }, nil
-}
-
-func (c *consumer) Register(_ context.Context, pipeline *pipeline.Pipeline) error {
- c.pipeline.Store(pipeline)
- return nil
-}
-
-func (c *consumer) Run(_ context.Context, pipe *pipeline.Pipeline) error {
- const op = errors.Op("memory_jobs_run")
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: time.Now(),
- })
-
- l := atomic.LoadUint32(&c.listeners)
- // listener already active
- if l == 1 {
- c.log.Warn("listener already in the active state")
- return errors.E(op, errors.Str("listener already in the active state"))
- }
-
- c.consume()
- atomic.StoreUint32(&c.listeners, 1)
-
- return nil
-}
-
-func (c *consumer) Pause(_ context.Context, p string) {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested pause on: ", p)
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 0 {
- c.log.Warn("no active listeners, nothing to pause")
- return
- }
-
- atomic.AddUint32(&c.listeners, ^uint32(0))
-
- // stop the consumer
- c.stopCh <- struct{}{}
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipePaused,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) Resume(_ context.Context, p string) {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested resume on: ", p)
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // listener already active
- if l == 1 {
- c.log.Warn("listener already in the active state")
- return
- }
-
- // resume the consumer on the same channel
- c.consume()
-
- atomic.StoreUint32(&c.listeners, 1)
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) Stop(_ context.Context) error {
- start := time.Now()
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
-
- select {
- case c.stopCh <- struct{}{}:
- default:
- break
- }
-
- for i := 0; i < len(c.localPrefetch); i++ {
- // drain all jobs from the channel
- <-c.localPrefetch
- }
-
- c.localPrefetch = nil
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeStopped,
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (c *consumer) handleItem(ctx context.Context, msg *Item) error {
- const op = errors.Op("ephemeral_handle_request")
- // handle timeouts
- // theoretically, some bad user may send millions requests with a delay and produce a billion (for example)
- // goroutines here. We should limit goroutines here.
- if msg.Options.Delay > 0 {
- // if we have 1000 goroutines waiting on the delay - reject 1001
- if atomic.LoadUint64(&c.goroutines) >= goroutinesMax {
- return errors.E(op, errors.Str("max concurrency number reached"))
- }
-
- go func(jj *Item) {
- atomic.AddUint64(&c.goroutines, 1)
- atomic.AddInt64(c.delayed, 1)
-
- time.Sleep(jj.Options.DelayDuration())
-
- select {
- case c.localPrefetch <- jj:
- atomic.AddUint64(&c.goroutines, ^uint64(0))
- default:
- c.log.Warn("can't push job", "error", "local queue closed or full")
- }
- }(msg)
-
- return nil
- }
-
- // increase number of the active jobs
- atomic.AddInt64(c.active, 1)
-
- // insert to the local, limited pipeline
- select {
- case c.localPrefetch <- msg:
- return nil
- case <-ctx.Done():
- return errors.E(op, errors.Errorf("local pipeline is full, consider to increase prefetch number, current limit: %d, context error: %v", c.cfg.Prefetch, ctx.Err()))
- }
-}
-
-func (c *consumer) consume() {
- go func() {
- // redirect
- for {
- select {
- case item, ok := <-c.localPrefetch:
- if !ok {
- c.log.Warn("ephemeral local prefetch queue closed")
- return
- }
-
- // set requeue channel
- item.Options.requeueFn = c.handleItem
- item.Options.active = c.active
- item.Options.delayed = c.delayed
-
- c.pq.Insert(item)
- case <-c.stopCh:
- return
- }
- }
- }()
-}
-
-func ready(r uint32) bool {
- return r > 0
-}
diff --git a/plugins/memory/memoryjobs/item.go b/plugins/memory/memoryjobs/item.go
deleted file mode 100644
index f4d62ada..00000000
--- a/plugins/memory/memoryjobs/item.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package memoryjobs
-
-import (
- "context"
- "sync/atomic"
- "time"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type Item struct {
- // Job contains name of job broker (usually PHP class).
- Job string `json:"job"`
-
- // Ident is unique identifier of the job, should be provided from outside
- Ident string `json:"id"`
-
- // Payload is string data (usually JSON) passed to Job broker.
- Payload string `json:"payload"`
-
- // Headers with key-values pairs
- Headers map[string][]string `json:"headers"`
-
- // Options contains set of PipelineOptions specific to job execution. Can be empty.
- Options *Options `json:"options,omitempty"`
-}
-
-// Options carry information about how to handle given job.
-type Options struct {
- // Priority is job priority, default - 10
- // pointer to distinguish 0 as a priority and nil as priority not set
- Priority int64 `json:"priority"`
-
- // Pipeline manually specified pipeline.
- Pipeline string `json:"pipeline,omitempty"`
-
- // Delay defines time duration to delay execution for. Defaults to none.
- Delay int64 `json:"delay,omitempty"`
-
- // private
- requeueFn func(context.Context, *Item) error
- active *int64
- delayed *int64
-}
-
-// DelayDuration returns delay duration in a form of time.Duration.
-func (o *Options) DelayDuration() time.Duration {
- return time.Second * time.Duration(o.Delay)
-}
-
-func (i *Item) ID() string {
- return i.Ident
-}
-
-func (i *Item) Priority() int64 {
- return i.Options.Priority
-}
-
-// Body packs job payload into binary payload.
-func (i *Item) Body() []byte {
- return utils.AsBytes(i.Payload)
-}
-
-// Context packs job context (job, id) into binary payload.
-func (i *Item) Context() ([]byte, error) {
- ctx, err := json.Marshal(
- struct {
- ID string `json:"id"`
- Job string `json:"job"`
- Headers map[string][]string `json:"headers"`
- Pipeline string `json:"pipeline"`
- }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline},
- )
-
- if err != nil {
- return nil, err
- }
-
- return ctx, nil
-}
-
-func (i *Item) Ack() error {
- i.atomicallyReduceCount()
- return nil
-}
-
-func (i *Item) Nack() error {
- i.atomicallyReduceCount()
- return nil
-}
-
-func (i *Item) Requeue(headers map[string][]string, delay int64) error {
- // overwrite the delay
- i.Options.Delay = delay
- i.Headers = headers
-
- i.atomicallyReduceCount()
-
- err := i.Options.requeueFn(context.Background(), i)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// atomicallyReduceCount reduces counter of active or delayed jobs
-func (i *Item) atomicallyReduceCount() {
- // if job was delayed, reduce number of the delayed jobs
- if i.Options.Delay > 0 {
- atomic.AddInt64(i.Options.delayed, ^int64(0))
- return
- }
-
- // otherwise, reduce number of the active jobs
- atomic.AddInt64(i.Options.active, ^int64(0))
- // noop for the in-memory
-}
-
-func fromJob(job *job.Job) *Item {
- return &Item{
- Job: job.Job,
- Ident: job.Ident,
- Payload: job.Payload,
- Headers: job.Headers,
- Options: &Options{
- Priority: job.Options.Priority,
- Pipeline: job.Options.Pipeline,
- Delay: job.Options.Delay,
- },
- }
-}
diff --git a/plugins/memory/memorykv/config.go b/plugins/memory/memorykv/config.go
deleted file mode 100644
index a8a8993f..00000000
--- a/plugins/memory/memorykv/config.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package memorykv
-
-// Config is default config for the in-memory driver
-type Config struct {
- // Interval for the check
- Interval int
-}
-
-// InitDefaults by default driver is turned off
-func (c *Config) InitDefaults() {
- if c.Interval == 0 {
- c.Interval = 60 // seconds
- }
-}
diff --git a/plugins/memory/memorykv/kv.go b/plugins/memory/memorykv/kv.go
deleted file mode 100644
index 5383275c..00000000
--- a/plugins/memory/memorykv/kv.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package memorykv
-
-import (
- "strings"
- "sync"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
-)
-
-type Driver struct {
- clearMu sync.RWMutex
- heap sync.Map
- // stop is used to stop keys GC and close boltdb connection
- stop chan struct{}
- log logger.Logger
- cfg *Config
-}
-
-func NewInMemoryDriver(key string, log logger.Logger, cfgPlugin config.Configurer) (*Driver, error) {
- const op = errors.Op("new_in_memory_driver")
-
- d := &Driver{
- stop: make(chan struct{}),
- log: log,
- }
-
- err := cfgPlugin.UnmarshalKey(key, &d.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- if d.cfg == nil {
- return nil, errors.E(op, errors.Errorf("config not found by provided key: %s", key))
- }
-
- d.cfg.InitDefaults()
-
- go d.gc()
-
- return d, nil
-}
-
-func (d *Driver) Has(keys ...string) (map[string]bool, error) {
- const op = errors.Op("in_memory_plugin_has")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
- m := make(map[string]bool)
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
-
- if _, ok := d.heap.Load(keys[i]); ok {
- m[keys[i]] = true
- }
- }
-
- return m, nil
-}
-
-func (d *Driver) Get(key string) ([]byte, error) {
- const op = errors.Op("in_memory_plugin_get")
- // to get cases like " "
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
-
- if data, exist := d.heap.Load(key); exist {
- // here might be a panic
- // but data only could be a string, see Set function
- return data.(*kvv1.Item).Value, nil
- }
- return nil, nil
-}
-
-func (d *Driver) MGet(keys ...string) (map[string][]byte, error) {
- const op = errors.Op("in_memory_plugin_mget")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string][]byte, len(keys))
-
- for i := range keys {
- if value, ok := d.heap.Load(keys[i]); ok {
- m[keys[i]] = value.(*kvv1.Item).Value
- }
- }
-
- return m, nil
-}
-
-func (d *Driver) Set(items ...*kvv1.Item) error {
- const op = errors.Op("in_memory_plugin_set")
- if items == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- for i := range items {
- if items[i] == nil {
- continue
- }
- // TTL is set
- if items[i].Timeout != "" {
- // check the TTL in the item
- _, err := time.Parse(time.RFC3339, items[i].Timeout)
- if err != nil {
- return err
- }
- }
-
- d.heap.Store(items[i].Key, items[i])
- }
- return nil
-}
-
-// MExpire sets the expiration time to the key
-// If key already has the expiration time, it will be overwritten
-func (d *Driver) MExpire(items ...*kvv1.Item) error {
- const op = errors.Op("in_memory_plugin_mexpire")
- for i := range items {
- if items[i] == nil {
- continue
- }
- if items[i].Timeout == "" || strings.TrimSpace(items[i].Key) == "" {
- return errors.E(op, errors.Str("should set timeout and at least one key"))
- }
-
- // if key exist, overwrite it value
- if pItem, ok := d.heap.LoadAndDelete(items[i].Key); ok {
- // check that time is correct
- _, err := time.Parse(time.RFC3339, items[i].Timeout)
- if err != nil {
- return errors.E(op, err)
- }
- tmp := pItem.(*kvv1.Item)
- // guess that t is in the future
- // in memory is just FOR TESTING PURPOSES
- // LOGIC ISN'T IDEAL
- d.heap.Store(items[i].Key, &kvv1.Item{
- Key: items[i].Key,
- Value: tmp.Value,
- Timeout: items[i].Timeout,
- })
- }
- }
-
- return nil
-}
-
-func (d *Driver) TTL(keys ...string) (map[string]string, error) {
- const op = errors.Op("in_memory_plugin_ttl")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string]string, len(keys))
-
- for i := range keys {
- if item, ok := d.heap.Load(keys[i]); ok {
- m[keys[i]] = item.(*kvv1.Item).Timeout
- }
- }
- return m, nil
-}
-
-func (d *Driver) Delete(keys ...string) error {
- const op = errors.Op("in_memory_plugin_delete")
- if keys == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for i := range keys {
- keyTrimmed := strings.TrimSpace(keys[i])
- if keyTrimmed == "" {
- return errors.E(op, errors.EmptyKey)
- }
- }
-
- for i := range keys {
- d.heap.Delete(keys[i])
- }
- return nil
-}
-
-func (d *Driver) Clear() error {
- d.clearMu.Lock()
- d.heap = sync.Map{}
- d.clearMu.Unlock()
-
- return nil
-}
-
-func (d *Driver) Stop() {
- d.stop <- struct{}{}
-}
-
-// ================================== PRIVATE ======================================
-
-func (d *Driver) gc() {
- ticker := time.NewTicker(time.Duration(d.cfg.Interval) * time.Second)
- defer ticker.Stop()
- for {
- select {
- case <-d.stop:
- return
- case now := <-ticker.C:
- // mutes needed to clear the map
- d.clearMu.RLock()
-
- // check every second
- d.heap.Range(func(key, value interface{}) bool {
- v := value.(*kvv1.Item)
- if v.Timeout == "" {
- return true
- }
-
- t, err := time.Parse(time.RFC3339, v.Timeout)
- if err != nil {
- return false
- }
-
- if now.After(t) {
- d.log.Debug("key deleted", "key", key)
- d.heap.Delete(key)
- }
- return true
- })
-
- d.clearMu.RUnlock()
- }
- }
-}
diff --git a/plugins/memory/memorypubsub/pubsub.go b/plugins/memory/memorypubsub/pubsub.go
deleted file mode 100644
index 231da134..00000000
--- a/plugins/memory/memorypubsub/pubsub.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package memorypubsub
-
-import (
- "context"
- "sync"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/pkg/bst"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type PubSubDriver struct {
- sync.RWMutex
- // channel with the messages from the RPC
- pushCh chan *pubsub.Message
- // user-subscribed topics
- storage bst.Storage
- log logger.Logger
-}
-
-func NewPubSubDriver(log logger.Logger, _ string) (*PubSubDriver, error) {
- ps := &PubSubDriver{
- pushCh: make(chan *pubsub.Message, 100),
- storage: bst.NewBST(),
- log: log,
- }
- return ps, nil
-}
-
-func (p *PubSubDriver) Publish(msg *pubsub.Message) error {
- p.pushCh <- msg
- return nil
-}
-
-func (p *PubSubDriver) PublishAsync(msg *pubsub.Message) {
- go func() {
- p.pushCh <- msg
- }()
-}
-
-func (p *PubSubDriver) Subscribe(connectionID string, topics ...string) error {
- p.Lock()
- defer p.Unlock()
- for i := 0; i < len(topics); i++ {
- p.storage.Insert(connectionID, topics[i])
- }
- return nil
-}
-
-func (p *PubSubDriver) Unsubscribe(connectionID string, topics ...string) error {
- p.Lock()
- defer p.Unlock()
- for i := 0; i < len(topics); i++ {
- p.storage.Remove(connectionID, topics[i])
- }
- return nil
-}
-
-func (p *PubSubDriver) Connections(topic string, res map[string]struct{}) {
- p.RLock()
- defer p.RUnlock()
-
- ret := p.storage.Get(topic)
- for rr := range ret {
- res[rr] = struct{}{}
- }
-}
-
-func (p *PubSubDriver) Next(ctx context.Context) (*pubsub.Message, error) {
- const op = errors.Op("pubsub_memory")
- select {
- case msg := <-p.pushCh:
- if msg == nil {
- return nil, nil
- }
-
- p.RLock()
- defer p.RUnlock()
- // push only messages, which topics are subscibed
- // TODO better???
- // if we have active subscribers - send a message to a topic
- // or send nil instead
- if ok := p.storage.Contains(msg.Topic); ok {
- return msg, nil
- }
- case <-ctx.Done():
- return nil, errors.E(op, errors.TimeOut, ctx.Err())
- }
-
- return nil, nil
-}
diff --git a/plugins/memory/plugin.go b/plugins/memory/plugin.go
deleted file mode 100644
index 87e0f84b..00000000
--- a/plugins/memory/plugin.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package memory
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/jobs"
- "github.com/spiral/roadrunner/v2/common/kv"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/memory/memoryjobs"
- "github.com/spiral/roadrunner/v2/plugins/memory/memorykv"
- "github.com/spiral/roadrunner/v2/plugins/memory/memorypubsub"
-)
-
-const PluginName string = "memory"
-
-type Plugin struct {
- log logger.Logger
- cfg config.Configurer
-}
-
-func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- p.log = log
- p.cfg = cfg
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- return make(chan error, 1)
-}
-
-func (p *Plugin) Stop() error {
- return nil
-}
-
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-func (p *Plugin) Available() {}
-
-// Drivers implementation
-
-func (p *Plugin) PSConstruct(key string) (pubsub.PubSub, error) {
- return memorypubsub.NewPubSubDriver(p.log, key)
-}
-
-func (p *Plugin) KVConstruct(key string) (kv.Storage, error) {
- const op = errors.Op("memory_plugin_construct")
- st, err := memorykv.NewInMemoryDriver(key, p.log, p.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
- return st, nil
-}
-
-// JobsConstruct creates new ephemeral consumer from the configuration
-func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return memoryjobs.NewJobBroker(configKey, p.log, p.cfg, e, pq)
-}
-
-// FromPipeline creates new ephemeral consumer from the provided pipeline
-func (p *Plugin) FromPipeline(pipeline *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return memoryjobs.FromPipeline(pipeline, p.log, e, pq)
-}
diff --git a/plugins/metrics/config.go b/plugins/metrics/config.go
deleted file mode 100644
index a2835130..00000000
--- a/plugins/metrics/config.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package metrics
-
-import (
- "fmt"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// Config configures metrics service.
-type Config struct {
- // Address to listen
- Address string
-
- // Collect define application specific metrics.
- Collect map[string]Collector
-}
-
-type NamedCollector struct {
- // Name of the collector
- Name string `json:"name"`
-
- // Collector structure
- Collector `json:"collector"`
-}
-
-// CollectorType represents prometheus collector types
-type CollectorType string
-
-const (
- // Histogram type
- Histogram CollectorType = "histogram"
-
- // Gauge type
- Gauge CollectorType = "gauge"
-
- // Counter type
- Counter CollectorType = "counter"
-
- // Summary type
- Summary CollectorType = "summary"
-)
-
-// Collector describes single application specific metric.
-type Collector struct {
- // Namespace of the metric.
- Namespace string `json:"namespace"`
- // Subsystem of the metric.
- Subsystem string `json:"subsystem"`
- // Collector type (histogram, gauge, counter, summary).
- Type CollectorType `json:"type"`
- // Help of collector.
- Help string `json:"help"`
- // Labels for vectorized metrics.
- Labels []string `json:"labels"`
- // Buckets for histogram metric.
- Buckets []float64 `json:"buckets"`
- // Objectives for the summary opts
- Objectives map[float64]float64 `json:"objectives"`
-}
-
-// register application specific metrics.
-func (c *Config) getCollectors() (map[string]prometheus.Collector, error) {
- if c.Collect == nil {
- return nil, nil
- }
-
- collectors := make(map[string]prometheus.Collector)
-
- for name, m := range c.Collect {
- var collector prometheus.Collector
- switch m.Type {
- case Histogram:
- opts := prometheus.HistogramOpts{
- Name: name,
- Namespace: m.Namespace,
- Subsystem: m.Subsystem,
- Help: m.Help,
- Buckets: m.Buckets,
- }
-
- if len(m.Labels) != 0 {
- collector = prometheus.NewHistogramVec(opts, m.Labels)
- } else {
- collector = prometheus.NewHistogram(opts)
- }
- case Gauge:
- opts := prometheus.GaugeOpts{
- Name: name,
- Namespace: m.Namespace,
- Subsystem: m.Subsystem,
- Help: m.Help,
- }
-
- if len(m.Labels) != 0 {
- collector = prometheus.NewGaugeVec(opts, m.Labels)
- } else {
- collector = prometheus.NewGauge(opts)
- }
- case Counter:
- opts := prometheus.CounterOpts{
- Name: name,
- Namespace: m.Namespace,
- Subsystem: m.Subsystem,
- Help: m.Help,
- }
-
- if len(m.Labels) != 0 {
- collector = prometheus.NewCounterVec(opts, m.Labels)
- } else {
- collector = prometheus.NewCounter(opts)
- }
- case Summary:
- opts := prometheus.SummaryOpts{
- Name: name,
- Namespace: m.Namespace,
- Subsystem: m.Subsystem,
- Help: m.Help,
- Objectives: m.Objectives,
- }
-
- if len(m.Labels) != 0 {
- collector = prometheus.NewSummaryVec(opts, m.Labels)
- } else {
- collector = prometheus.NewSummary(opts)
- }
- default:
- return nil, fmt.Errorf("invalid metric type `%s` for `%s`", m.Type, name)
- }
-
- collectors[name] = collector
- }
-
- return collectors, nil
-}
-
-func (c *Config) InitDefaults() {
- if c.Address == "" {
- c.Address = "127.0.0.1:2112"
- }
-}
diff --git a/plugins/metrics/config_test.go b/plugins/metrics/config_test.go
deleted file mode 100644
index 665ec9cd..00000000
--- a/plugins/metrics/config_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package metrics
-
-import (
- "bytes"
- "testing"
-
- j "github.com/json-iterator/go"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/stretchr/testify/assert"
-)
-
-var json = j.ConfigCompatibleWithStandardLibrary
-
-func Test_Config_Hydrate_Error1(t *testing.T) {
- cfg := `{"request": {"From": "Something"}}`
- c := &Config{}
- f := new(bytes.Buffer)
- f.WriteString(cfg)
-
- err := json.Unmarshal(f.Bytes(), &c)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func Test_Config_Hydrate_Error2(t *testing.T) {
- cfg := `{"dir": "/dir/"`
- c := &Config{}
-
- f := new(bytes.Buffer)
- f.WriteString(cfg)
-
- err := json.Unmarshal(f.Bytes(), &c)
- assert.Error(t, err)
-}
-
-func Test_Config_Metrics(t *testing.T) {
- cfg := `{
-"collect":{
- "metric1":{"type": "gauge"},
- "metric2":{ "type": "counter"},
- "metric3":{"type": "summary"},
- "metric4":{"type": "histogram"}
-}
-}`
- c := &Config{}
- f := new(bytes.Buffer)
- f.WriteString(cfg)
-
- err := json.Unmarshal(f.Bytes(), &c)
- if err != nil {
- t.Fatal(err)
- }
-
- m, err := c.getCollectors()
- assert.NoError(t, err)
-
- assert.IsType(t, prometheus.NewGauge(prometheus.GaugeOpts{}), m["metric1"])
- assert.IsType(t, prometheus.NewCounter(prometheus.CounterOpts{}), m["metric2"])
- assert.IsType(t, prometheus.NewSummary(prometheus.SummaryOpts{}), m["metric3"])
- assert.IsType(t, prometheus.NewHistogram(prometheus.HistogramOpts{}), m["metric4"])
-}
-
-func Test_Config_MetricsVector(t *testing.T) {
- cfg := `{
-"collect":{
- "metric1":{"type": "gauge","labels":["label"]},
- "metric2":{ "type": "counter","labels":["label"]},
- "metric3":{"type": "summary","labels":["label"]},
- "metric4":{"type": "histogram","labels":["label"]}
-}
-}`
- c := &Config{}
- f := new(bytes.Buffer)
- f.WriteString(cfg)
-
- err := json.Unmarshal(f.Bytes(), &c)
- if err != nil {
- t.Fatal(err)
- }
-
- m, err := c.getCollectors()
- assert.NoError(t, err)
-
- assert.IsType(t, prometheus.NewGaugeVec(prometheus.GaugeOpts{}, []string{}), m["metric1"])
- assert.IsType(t, prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{}), m["metric2"])
- assert.IsType(t, prometheus.NewSummaryVec(prometheus.SummaryOpts{}, []string{}), m["metric3"])
- assert.IsType(t, prometheus.NewHistogramVec(prometheus.HistogramOpts{}, []string{}), m["metric4"])
-}
diff --git a/plugins/metrics/doc.go b/plugins/metrics/doc.go
deleted file mode 100644
index 1abe097a..00000000
--- a/plugins/metrics/doc.go
+++ /dev/null
@@ -1 +0,0 @@
-package metrics
diff --git a/plugins/metrics/interface.go b/plugins/metrics/interface.go
deleted file mode 100644
index 87ba4017..00000000
--- a/plugins/metrics/interface.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package metrics
-
-import "github.com/prometheus/client_golang/prometheus"
-
-type StatProvider interface {
- MetricsCollector() []prometheus.Collector
-}
diff --git a/plugins/metrics/plugin.go b/plugins/metrics/plugin.go
deleted file mode 100644
index d285e609..00000000
--- a/plugins/metrics/plugin.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package metrics
-
-import (
- "context"
- "crypto/tls"
- "net/http"
- "sync"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/collectors"
- "github.com/prometheus/client_golang/prometheus/promhttp"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "golang.org/x/sys/cpu"
-)
-
-const (
- // PluginName declares plugin name.
- PluginName = "metrics"
- // maxHeaderSize declares max header size for prometheus server
- maxHeaderSize = 1024 * 1024 * 100 // 104MB
-)
-
-// Plugin to manage application metrics using Prometheus.
-type Plugin struct {
- cfg *Config
- log logger.Logger
- mu sync.Mutex // all receivers are pointers
- http *http.Server
- collectors sync.Map // all receivers are pointers
- registry *prometheus.Registry
-
- // prometheus Collectors
- statProviders []StatProvider
-}
-
-// Init service.
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("metrics_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &p.cfg)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- p.cfg.InitDefaults()
-
- p.log = log
- p.registry = prometheus.NewRegistry()
-
- // Default
- err = p.registry.Register(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
- if err != nil {
- return errors.E(op, err)
- }
-
- // Default
- err = p.registry.Register(collectors.NewGoCollector())
- if err != nil {
- return errors.E(op, err)
- }
-
- cl, err := p.cfg.getCollectors()
- if err != nil {
- return errors.E(op, err)
- }
-
- // Register invocation will be later in the Serve method
- for k, v := range cl {
- p.collectors.Store(k, v)
- }
-
- p.statProviders = make([]StatProvider, 0, 2)
-
- return nil
-}
-
-// Register new prometheus collector.
-func (p *Plugin) Register(c prometheus.Collector) error {
- return p.registry.Register(c)
-}
-
-// Serve prometheus metrics service.
-func (p *Plugin) Serve() chan error {
- errCh := make(chan error, 1)
-
- // register Collected stat providers
- for i := 0; i < len(p.statProviders); i++ {
- sp := p.statProviders[i]
- for _, c := range sp.MetricsCollector() {
- err := p.registry.Register(c)
- if err != nil {
- errCh <- err
- return errCh
- }
- }
- }
-
- p.collectors.Range(func(key, value interface{}) bool {
- // key - name
- // value - prometheus.Collector
- c := value.(prometheus.Collector)
- if err := p.registry.Register(c); err != nil {
- errCh <- err
- return false
- }
-
- return true
- })
-
- var topCipherSuites []uint16
- var defaultCipherSuitesTLS13 []uint16
-
- hasGCMAsmAMD64 := cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
- hasGCMAsmARM64 := cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
- // Keep in sync with crypto/aes/cipher_s390x.go.
- hasGCMAsmS390X := cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
-
- hasGCMAsm := hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X
-
- if hasGCMAsm {
- // If AES-GCM hardware is provided then prioritize AES-GCM
- // cipher suites.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- } else {
- // Without AES-GCM hardware, we put the ChaCha20-Poly1305
- // cipher suites first.
- topCipherSuites = []uint16{
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- }
- defaultCipherSuitesTLS13 = []uint16{
- tls.TLS_CHACHA20_POLY1305_SHA256,
- tls.TLS_AES_128_GCM_SHA256,
- tls.TLS_AES_256_GCM_SHA384,
- }
- }
-
- DefaultCipherSuites := make([]uint16, 0, 22)
- DefaultCipherSuites = append(DefaultCipherSuites, topCipherSuites...)
- DefaultCipherSuites = append(DefaultCipherSuites, defaultCipherSuitesTLS13...)
-
- p.http = &http.Server{
- Addr: p.cfg.Address,
- Handler: promhttp.HandlerFor(p.registry, promhttp.HandlerOpts{}),
- IdleTimeout: time.Hour * 24,
- ReadTimeout: time.Minute * 60,
- MaxHeaderBytes: maxHeaderSize,
- ReadHeaderTimeout: time.Minute * 60,
- WriteTimeout: time.Minute * 60,
- TLSConfig: &tls.Config{
- CurvePreferences: []tls.CurveID{
- tls.CurveP256,
- tls.CurveP384,
- tls.CurveP521,
- tls.X25519,
- },
- CipherSuites: DefaultCipherSuites,
- MinVersion: tls.VersionTLS12,
- PreferServerCipherSuites: true,
- },
- }
-
- go func() {
- err := p.http.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- errCh <- err
- return
- }
- }()
-
- return errCh
-}
-
-// Stop prometheus metrics service.
-func (p *Plugin) Stop() error {
- p.mu.Lock()
- defer p.mu.Unlock()
-
- if p.http != nil {
- // timeout is 10 seconds
- ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
- defer cancel()
- err := p.http.Shutdown(ctx)
- if err != nil {
- // Function should be Stop() error
- p.log.Error("stop error", "error", errors.Errorf("error shutting down the metrics server: error %v", err))
- }
- }
- return nil
-}
-
-// Collects used to collect all plugins which implement metrics.StatProvider interface (and Named)
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.AddStatProvider,
- }
-}
-
-// AddStatProvider adds a metrics provider
-func (p *Plugin) AddStatProvider(stat StatProvider) error {
- p.statProviders = append(p.statProviders, stat)
-
- return nil
-}
-
-// Name returns user friendly plugin name
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// RPC interface satisfaction
-func (p *Plugin) RPC() interface{} {
- return &rpcServer{
- svc: p,
- log: p.log,
- }
-}
-
-// Available interface implementation
-func (p *Plugin) Available() {}
diff --git a/plugins/metrics/rpc.go b/plugins/metrics/rpc.go
deleted file mode 100644
index 538cdb78..00000000
--- a/plugins/metrics/rpc.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package metrics
-
-import (
- "github.com/prometheus/client_golang/prometheus"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type rpcServer struct {
- svc *Plugin
- log logger.Logger
-}
-
-// Metric represent single metric produced by the application.
-type Metric struct {
- // Collector name.
- Name string
-
- // Collector value.
- Value float64
-
- // Labels associated with metric. Only for vector metrics. Must be provided in a form of label values.
- Labels []string
-}
-
-// Add new metric to the designated collector.
-func (rpc *rpcServer) Add(m *Metric, ok *bool) error {
- const op = errors.Op("metrics_plugin_add")
- rpc.log.Info("adding metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
- c, exist := rpc.svc.collectors.Load(m.Name)
- if !exist {
- rpc.log.Error("undefined collector", "collector", m.Name)
- return errors.E(op, errors.Errorf("undefined collector %s, try first Declare the desired collector", m.Name))
- }
-
- switch c := c.(type) {
- case prometheus.Gauge:
- c.Add(m.Value)
-
- case *prometheus.GaugeVec:
- if len(m.Labels) == 0 {
- rpc.log.Error("required labels for collector", "collector", m.Name)
- return errors.E(op, errors.Errorf("required labels for collector %s", m.Name))
- }
-
- gauge, err := c.GetMetricWithLabelValues(m.Labels...)
- if err != nil {
- rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
- return errors.E(op, err)
- }
- gauge.Add(m.Value)
- case prometheus.Counter:
- c.Add(m.Value)
-
- case *prometheus.CounterVec:
- if len(m.Labels) == 0 {
- return errors.E(op, errors.Errorf("required labels for collector `%s`", m.Name))
- }
-
- gauge, err := c.GetMetricWithLabelValues(m.Labels...)
- if err != nil {
- rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
- return errors.E(op, err)
- }
- gauge.Add(m.Value)
-
- default:
- return errors.E(op, errors.Errorf("collector %s does not support method `Add`", m.Name))
- }
-
- // RPC, set ok to true as return value. Need by rpc.Call reply argument
- *ok = true
- rpc.log.Info("metric successfully added", "name", m.Name, "labels", m.Labels, "value", m.Value)
- return nil
-}
-
-// Sub subtract the value from the specific metric (gauge only).
-func (rpc *rpcServer) Sub(m *Metric, ok *bool) error {
- const op = errors.Op("metrics_plugin_sub")
- rpc.log.Info("subtracting value from metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
- c, exist := rpc.svc.collectors.Load(m.Name)
- if !exist {
- rpc.log.Error("undefined collector", "name", m.Name, "value", m.Value, "labels", m.Labels)
- return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
- }
- if c == nil {
- // can it be nil ??? I guess can't
- return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
- }
-
- switch c := c.(type) {
- case prometheus.Gauge:
- c.Sub(m.Value)
-
- case *prometheus.GaugeVec:
- if len(m.Labels) == 0 {
- rpc.log.Error("required labels for collector, but none was provided", "name", m.Name, "value", m.Value)
- return errors.E(op, errors.Errorf("required labels for collector %s", m.Name))
- }
-
- gauge, err := c.GetMetricWithLabelValues(m.Labels...)
- if err != nil {
- rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
- return errors.E(op, err)
- }
- gauge.Sub(m.Value)
- default:
- return errors.E(op, errors.Errorf("collector `%s` does not support method `Sub`", m.Name))
- }
- rpc.log.Info("subtracting operation finished successfully", "name", m.Name, "labels", m.Labels, "value", m.Value)
-
- *ok = true
- return nil
-}
-
-// Observe the value (histogram and summary only).
-func (rpc *rpcServer) Observe(m *Metric, ok *bool) error {
- const op = errors.Op("metrics_plugin_observe")
- rpc.log.Info("observing metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
-
- c, exist := rpc.svc.collectors.Load(m.Name)
- if !exist {
- rpc.log.Error("undefined collector", "name", m.Name, "value", m.Value, "labels", m.Labels)
- return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
- }
- if c == nil {
- return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
- }
-
- switch c := c.(type) {
- case *prometheus.SummaryVec:
- if len(m.Labels) == 0 {
- return errors.E(op, errors.Errorf("required labels for collector `%s`", m.Name))
- }
-
- observer, err := c.GetMetricWithLabelValues(m.Labels...)
- if err != nil {
- return errors.E(op, err)
- }
- observer.Observe(m.Value)
-
- case prometheus.Histogram:
- c.Observe(m.Value)
-
- case *prometheus.HistogramVec:
- if len(m.Labels) == 0 {
- return errors.E(op, errors.Errorf("required labels for collector `%s`", m.Name))
- }
-
- observer, err := c.GetMetricWithLabelValues(m.Labels...)
- if err != nil {
- rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
- return errors.E(op, err)
- }
- observer.Observe(m.Value)
- default:
- return errors.E(op, errors.Errorf("collector `%s` does not support method `Observe`", m.Name))
- }
-
- rpc.log.Info("observe operation finished successfully", "name", m.Name, "labels", m.Labels, "value", m.Value)
-
- *ok = true
- return nil
-}
-
-// Declare is used to register new collector in prometheus
-// THE TYPES ARE:
-// NamedCollector -> Collector with the name
-// bool -> RPC reply value
-// RETURNS:
-// error
-func (rpc *rpcServer) Declare(nc *NamedCollector, ok *bool) error {
- const op = errors.Op("metrics_plugin_declare")
- rpc.log.Info("declaring new metric", "name", nc.Name, "type", nc.Type, "namespace", nc.Namespace)
- _, exist := rpc.svc.collectors.Load(nc.Name)
- if exist {
- rpc.log.Error("metric with provided name already exist", "name", nc.Name, "type", nc.Type, "namespace", nc.Namespace)
- return errors.E(op, errors.Errorf("tried to register existing collector with the name `%s`", nc.Name))
- }
-
- var collector prometheus.Collector
- switch nc.Type {
- case Histogram:
- opts := prometheus.HistogramOpts{
- Name: nc.Name,
- Namespace: nc.Namespace,
- Subsystem: nc.Subsystem,
- Help: nc.Help,
- Buckets: nc.Buckets,
- }
-
- if len(nc.Labels) != 0 {
- collector = prometheus.NewHistogramVec(opts, nc.Labels)
- } else {
- collector = prometheus.NewHistogram(opts)
- }
- case Gauge:
- opts := prometheus.GaugeOpts{
- Name: nc.Name,
- Namespace: nc.Namespace,
- Subsystem: nc.Subsystem,
- Help: nc.Help,
- }
-
- if len(nc.Labels) != 0 {
- collector = prometheus.NewGaugeVec(opts, nc.Labels)
- } else {
- collector = prometheus.NewGauge(opts)
- }
- case Counter:
- opts := prometheus.CounterOpts{
- Name: nc.Name,
- Namespace: nc.Namespace,
- Subsystem: nc.Subsystem,
- Help: nc.Help,
- }
-
- if len(nc.Labels) != 0 {
- collector = prometheus.NewCounterVec(opts, nc.Labels)
- } else {
- collector = prometheus.NewCounter(opts)
- }
- case Summary:
- opts := prometheus.SummaryOpts{
- Name: nc.Name,
- Namespace: nc.Namespace,
- Subsystem: nc.Subsystem,
- Help: nc.Help,
- }
-
- if len(nc.Labels) != 0 {
- collector = prometheus.NewSummaryVec(opts, nc.Labels)
- } else {
- collector = prometheus.NewSummary(opts)
- }
-
- default:
- return errors.E(op, errors.Errorf("unknown collector type %s", nc.Type))
- }
-
- // add collector to sync.Map
- rpc.svc.collectors.Store(nc.Name, collector)
- // that method might panic, we handle it by recover
- err := rpc.svc.Register(collector)
- if err != nil {
- *ok = false
- return errors.E(op, err)
- }
-
- rpc.log.Info("metric successfully added", "name", nc.Name, "type", nc.Type, "namespace", nc.Namespace)
-
- *ok = true
- return nil
-}
-
-// Set the metric value (only for gaude).
-func (rpc *rpcServer) Set(m *Metric, ok *bool) (err error) {
- const op = errors.Op("metrics_plugin_set")
- rpc.log.Info("observing metric", "name", m.Name, "value", m.Value, "labels", m.Labels)
-
- c, exist := rpc.svc.collectors.Load(m.Name)
- if !exist {
- return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
- }
- if c == nil {
- return errors.E(op, errors.Errorf("undefined collector %s", m.Name))
- }
-
- switch c := c.(type) {
- case prometheus.Gauge:
- c.Set(m.Value)
-
- case *prometheus.GaugeVec:
- if len(m.Labels) == 0 {
- rpc.log.Error("required labels for collector", "collector", m.Name)
- return errors.E(op, errors.Errorf("required labels for collector %s", m.Name))
- }
-
- gauge, err := c.GetMetricWithLabelValues(m.Labels...)
- if err != nil {
- rpc.log.Error("failed to get metrics with label values", "collector", m.Name, "labels", m.Labels)
- return errors.E(op, err)
- }
- gauge.Set(m.Value)
-
- default:
- return errors.E(op, errors.Errorf("collector `%s` does not support method Set", m.Name))
- }
-
- rpc.log.Info("set operation finished successfully", "name", m.Name, "labels", m.Labels, "value", m.Value)
-
- *ok = true
- return nil
-}
diff --git a/plugins/redis/config.go b/plugins/redis/config.go
deleted file mode 100644
index 9acb4b47..00000000
--- a/plugins/redis/config.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package redis
-
-import "time"
-
-type Config struct {
- Addrs []string `mapstructure:"addrs"`
- DB int `mapstructure:"db"`
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- MasterName string `mapstructure:"master_name"`
- SentinelPassword string `mapstructure:"sentinel_password"`
- RouteByLatency bool `mapstructure:"route_by_latency"`
- RouteRandomly bool `mapstructure:"route_randomly"`
- MaxRetries int `mapstructure:"max_retries"`
- DialTimeout time.Duration `mapstructure:"dial_timeout"`
- MinRetryBackoff time.Duration `mapstructure:"min_retry_backoff"`
- MaxRetryBackoff time.Duration `mapstructure:"max_retry_backoff"`
- PoolSize int `mapstructure:"pool_size"`
- MinIdleConns int `mapstructure:"min_idle_conns"`
- MaxConnAge time.Duration `mapstructure:"max_conn_age"`
- ReadTimeout time.Duration `mapstructure:"read_timeout"`
- WriteTimeout time.Duration `mapstructure:"write_timeout"`
- PoolTimeout time.Duration `mapstructure:"pool_timeout"`
- IdleTimeout time.Duration `mapstructure:"idle_timeout"`
- IdleCheckFreq time.Duration `mapstructure:"idle_check_freq"`
- ReadOnly bool `mapstructure:"read_only"`
-}
-
-// InitDefaults initializing fill config with default values
-func (s *Config) InitDefaults() {
- if s.Addrs == nil {
- s.Addrs = []string{"127.0.0.1:6379"} // default addr is pointing to local storage
- }
-}
diff --git a/plugins/redis/kv/config.go b/plugins/redis/kv/config.go
deleted file mode 100644
index 5bd772a9..00000000
--- a/plugins/redis/kv/config.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package kv
-
-import (
- "time"
-)
-
-type Config struct {
- Addrs []string `mapstructure:"addrs"`
- DB int `mapstructure:"db"`
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- MasterName string `mapstructure:"master_name"`
- SentinelPassword string `mapstructure:"sentinel_password"`
- RouteByLatency bool `mapstructure:"route_by_latency"`
- RouteRandomly bool `mapstructure:"route_randomly"`
- MaxRetries int `mapstructure:"max_retries"`
- DialTimeout time.Duration `mapstructure:"dial_timeout"`
- MinRetryBackoff time.Duration `mapstructure:"min_retry_backoff"`
- MaxRetryBackoff time.Duration `mapstructure:"max_retry_backoff"`
- PoolSize int `mapstructure:"pool_size"`
- MinIdleConns int `mapstructure:"min_idle_conns"`
- MaxConnAge time.Duration `mapstructure:"max_conn_age"`
- ReadTimeout time.Duration `mapstructure:"read_timeout"`
- WriteTimeout time.Duration `mapstructure:"write_timeout"`
- PoolTimeout time.Duration `mapstructure:"pool_timeout"`
- IdleTimeout time.Duration `mapstructure:"idle_timeout"`
- IdleCheckFreq time.Duration `mapstructure:"idle_check_freq"`
- ReadOnly bool `mapstructure:"read_only"`
-}
-
-// InitDefaults initializing fill config with default values
-func (s *Config) InitDefaults() {
- if s.Addrs == nil {
- s.Addrs = []string{"127.0.0.1:6379"} // default addr is pointing to local storage
- }
-}
diff --git a/plugins/redis/kv/kv.go b/plugins/redis/kv/kv.go
deleted file mode 100644
index ae55d332..00000000
--- a/plugins/redis/kv/kv.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package kv
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/go-redis/redis/v8"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- kvv1 "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type Driver struct {
- universalClient redis.UniversalClient
- log logger.Logger
- cfg *Config
-}
-
-func NewRedisDriver(log logger.Logger, key string, cfgPlugin config.Configurer) (*Driver, error) {
- const op = errors.Op("new_redis_driver")
-
- d := &Driver{
- log: log,
- }
-
- // will be different for every connected driver
- err := cfgPlugin.UnmarshalKey(key, &d.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- if d.cfg == nil {
- return nil, errors.E(op, errors.Errorf("config not found by provided key: %s", key))
- }
-
- d.cfg.InitDefaults()
-
- d.universalClient = redis.NewUniversalClient(&redis.UniversalOptions{
- Addrs: d.cfg.Addrs,
- DB: d.cfg.DB,
- Username: d.cfg.Username,
- Password: d.cfg.Password,
- SentinelPassword: d.cfg.SentinelPassword,
- MaxRetries: d.cfg.MaxRetries,
- MinRetryBackoff: d.cfg.MaxRetryBackoff,
- MaxRetryBackoff: d.cfg.MaxRetryBackoff,
- DialTimeout: d.cfg.DialTimeout,
- ReadTimeout: d.cfg.ReadTimeout,
- WriteTimeout: d.cfg.WriteTimeout,
- PoolSize: d.cfg.PoolSize,
- MinIdleConns: d.cfg.MinIdleConns,
- MaxConnAge: d.cfg.MaxConnAge,
- PoolTimeout: d.cfg.PoolTimeout,
- IdleTimeout: d.cfg.IdleTimeout,
- IdleCheckFrequency: d.cfg.IdleCheckFreq,
- ReadOnly: d.cfg.ReadOnly,
- RouteByLatency: d.cfg.RouteByLatency,
- RouteRandomly: d.cfg.RouteRandomly,
- MasterName: d.cfg.MasterName,
- })
-
- return d, nil
-}
-
-// Has checks if value exists.
-func (d *Driver) Has(keys ...string) (map[string]bool, error) {
- const op = errors.Op("redis_driver_has")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- m := make(map[string]bool, len(keys))
- for _, key := range keys {
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
-
- exist, err := d.universalClient.Exists(context.Background(), key).Result()
- if err != nil {
- return nil, err
- }
- if exist == 1 {
- m[key] = true
- }
- }
- return m, nil
-}
-
-// Get loads key content into slice.
-func (d *Driver) Get(key string) ([]byte, error) {
- const op = errors.Op("redis_driver_get")
- // to get cases like " "
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- return d.universalClient.Get(context.Background(), key).Bytes()
-}
-
-// MGet loads content of multiple values (some values might be skipped).
-// https://redis.io/commands/mget
-// Returns slice with the interfaces with values
-func (d *Driver) MGet(keys ...string) (map[string][]byte, error) {
- const op = errors.Op("redis_driver_mget")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for _, key := range keys {
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string][]byte, len(keys))
-
- for _, k := range keys {
- cmd := d.universalClient.Get(context.Background(), k)
- if cmd.Err() != nil {
- if cmd.Err() == redis.Nil {
- continue
- }
- return nil, errors.E(op, cmd.Err())
- }
-
- m[k] = utils.AsBytes(cmd.Val())
- }
-
- return m, nil
-}
-
-// Set sets value with the TTL in seconds
-// https://redis.io/commands/set
-// Redis `SET key value [expiration]` command.
-//
-// Use expiration for `SETEX`-like behavior.
-// Zero expiration means the key has no expiration time.
-func (d *Driver) Set(items ...*kvv1.Item) error {
- const op = errors.Op("redis_driver_set")
- if items == nil {
- return errors.E(op, errors.NoKeys)
- }
- now := time.Now()
- for _, item := range items {
- if item == nil {
- return errors.E(op, errors.EmptyKey)
- }
-
- if item.Timeout == "" {
- err := d.universalClient.Set(context.Background(), item.Key, item.Value, 0).Err()
- if err != nil {
- return err
- }
- } else {
- t, err := time.Parse(time.RFC3339, item.Timeout)
- if err != nil {
- return err
- }
- err = d.universalClient.Set(context.Background(), item.Key, item.Value, t.Sub(now)).Err()
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Delete one or multiple keys.
-func (d *Driver) Delete(keys ...string) error {
- const op = errors.Op("redis_driver_delete")
- if keys == nil {
- return errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for _, key := range keys {
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return errors.E(op, errors.EmptyKey)
- }
- }
- return d.universalClient.Del(context.Background(), keys...).Err()
-}
-
-// MExpire https://redis.io/commands/expire
-// timeout in RFC3339
-func (d *Driver) MExpire(items ...*kvv1.Item) error {
- const op = errors.Op("redis_driver_mexpire")
- now := time.Now()
- for _, item := range items {
- if item == nil {
- continue
- }
- if item.Timeout == "" || strings.TrimSpace(item.Key) == "" {
- return errors.E(op, errors.Str("should set timeout and at least one key"))
- }
-
- t, err := time.Parse(time.RFC3339, item.Timeout)
- if err != nil {
- return err
- }
-
- // t guessed to be in future
- // for Redis we use t.Sub, it will result in seconds, like 4.2s
- d.universalClient.Expire(context.Background(), item.Key, t.Sub(now))
- }
-
- return nil
-}
-
-// TTL https://redis.io/commands/ttl
-// return time in seconds (float64) for a given keys
-func (d *Driver) TTL(keys ...string) (map[string]string, error) {
- const op = errors.Op("redis_driver_ttl")
- if keys == nil {
- return nil, errors.E(op, errors.NoKeys)
- }
-
- // should not be empty keys
- for _, key := range keys {
- keyTrimmed := strings.TrimSpace(key)
- if keyTrimmed == "" {
- return nil, errors.E(op, errors.EmptyKey)
- }
- }
-
- m := make(map[string]string, len(keys))
-
- for _, key := range keys {
- duration, err := d.universalClient.TTL(context.Background(), key).Result()
- if err != nil {
- return nil, err
- }
-
- m[key] = duration.String()
- }
- return m, nil
-}
-
-func (d *Driver) Clear() error {
- fdb := d.universalClient.FlushDB(context.Background())
- if fdb.Err() != nil {
- return fdb.Err()
- }
-
- return nil
-}
-
-func (d *Driver) Stop() {}
diff --git a/plugins/redis/plugin.go b/plugins/redis/plugin.go
deleted file mode 100644
index 961182a9..00000000
--- a/plugins/redis/plugin.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package redis
-
-import (
- "sync"
-
- "github.com/go-redis/redis/v8"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/kv"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- redis_kv "github.com/spiral/roadrunner/v2/plugins/redis/kv"
- redis_pubsub "github.com/spiral/roadrunner/v2/plugins/redis/pubsub"
-)
-
-const PluginName = "redis"
-
-type Plugin struct {
- sync.RWMutex
- // config for RR integration
- cfgPlugin config.Configurer
- // logger
- log logger.Logger
- // redis universal client
- universalClient redis.UniversalClient
-
- // fanIn implementation used to deliver messages from all channels to the single websocket point
- stopCh chan struct{}
-}
-
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- p.log = log
- p.cfgPlugin = cfg
- p.stopCh = make(chan struct{}, 1)
-
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- return make(chan error)
-}
-
-func (p *Plugin) Stop() error {
- const op = errors.Op("redis_plugin_stop")
- p.stopCh <- struct{}{}
-
- if p.universalClient != nil {
- err := p.universalClient.Close()
- if err != nil {
- return errors.E(op, err)
- }
- }
-
- return nil
-}
-
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (p *Plugin) Available() {}
-
-// KVConstruct provides KV storage implementation over the redis plugin
-func (p *Plugin) KVConstruct(key string) (kv.Storage, error) {
- const op = errors.Op("redis_plugin_provide")
- st, err := redis_kv.NewRedisDriver(p.log, key, p.cfgPlugin)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return st, nil
-}
-
-func (p *Plugin) PSConstruct(key string) (pubsub.PubSub, error) {
- return redis_pubsub.NewPubSubDriver(p.log, key, p.cfgPlugin, p.stopCh)
-}
diff --git a/plugins/redis/pubsub/channel.go b/plugins/redis/pubsub/channel.go
deleted file mode 100644
index a1655ab2..00000000
--- a/plugins/redis/pubsub/channel.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package pubsub
-
-import (
- "context"
- "sync"
-
- "github.com/go-redis/redis/v8"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type redisChannel struct {
- sync.Mutex
-
- // redis client
- client redis.UniversalClient
- pubsub *redis.PubSub
-
- log logger.Logger
-
- // out channel with all subs
- out chan *pubsub.Message
-
- exit chan struct{}
-}
-
-func newRedisChannel(redisClient redis.UniversalClient, log logger.Logger) *redisChannel {
- out := make(chan *pubsub.Message, 100)
- fi := &redisChannel{
- out: out,
- client: redisClient,
- pubsub: redisClient.Subscribe(context.Background()),
- exit: make(chan struct{}),
- log: log,
- }
-
- // start reading messages
- go fi.read()
-
- return fi
-}
-
-func (r *redisChannel) sub(topics ...string) error {
- const op = errors.Op("redis_sub")
- err := r.pubsub.Subscribe(context.Background(), topics...)
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-// read reads messages from the pubsub subscription
-func (r *redisChannel) read() {
- for {
- select {
- // here we receive message from us (which we sent before in Publish)
- // it should be compatible with the pubsub.Message structure
- // payload should be in the redis.message.payload field
-
- case msg, ok := <-r.pubsub.Channel():
- // channel closed
- if !ok {
- return
- }
-
- r.out <- &pubsub.Message{
- Topic: msg.Channel,
- Payload: utils.AsBytes(msg.Payload),
- }
-
- case <-r.exit:
- return
- }
- }
-}
-
-func (r *redisChannel) unsub(topic string) error {
- const op = errors.Op("redis_unsub")
- err := r.pubsub.Unsubscribe(context.Background(), topic)
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-func (r *redisChannel) stop() error {
- r.exit <- struct{}{}
- close(r.out)
- close(r.exit)
- return nil
-}
-
-func (r *redisChannel) message() chan *pubsub.Message {
- return r.out
-}
diff --git a/plugins/redis/pubsub/config.go b/plugins/redis/pubsub/config.go
deleted file mode 100644
index bf8d2fc9..00000000
--- a/plugins/redis/pubsub/config.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package pubsub
-
-import "time"
-
-type Config struct {
- Addrs []string `mapstructure:"addrs"`
- DB int `mapstructure:"db"`
- Username string `mapstructure:"username"`
- Password string `mapstructure:"password"`
- MasterName string `mapstructure:"master_name"`
- SentinelPassword string `mapstructure:"sentinel_password"`
- RouteByLatency bool `mapstructure:"route_by_latency"`
- RouteRandomly bool `mapstructure:"route_randomly"`
- MaxRetries int `mapstructure:"max_retries"`
- DialTimeout time.Duration `mapstructure:"dial_timeout"`
- MinRetryBackoff time.Duration `mapstructure:"min_retry_backoff"`
- MaxRetryBackoff time.Duration `mapstructure:"max_retry_backoff"`
- PoolSize int `mapstructure:"pool_size"`
- MinIdleConns int `mapstructure:"min_idle_conns"`
- MaxConnAge time.Duration `mapstructure:"max_conn_age"`
- ReadTimeout time.Duration `mapstructure:"read_timeout"`
- WriteTimeout time.Duration `mapstructure:"write_timeout"`
- PoolTimeout time.Duration `mapstructure:"pool_timeout"`
- IdleTimeout time.Duration `mapstructure:"idle_timeout"`
- IdleCheckFreq time.Duration `mapstructure:"idle_check_freq"`
- ReadOnly bool `mapstructure:"read_only"`
-}
-
-// InitDefaults initializing fill config with default values
-func (s *Config) InitDefaults() {
- if s.Addrs == nil {
- s.Addrs = []string{"127.0.0.1:6379"} // default addr is pointing to local storage
- }
-}
diff --git a/plugins/redis/pubsub/pubsub.go b/plugins/redis/pubsub/pubsub.go
deleted file mode 100644
index 3561ef18..00000000
--- a/plugins/redis/pubsub/pubsub.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package pubsub
-
-import (
- "context"
- "sync"
-
- "github.com/go-redis/redis/v8"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type PubSubDriver struct {
- sync.RWMutex
- cfg *Config
-
- log logger.Logger
- channel *redisChannel
- universalClient redis.UniversalClient
- stopCh chan struct{}
-}
-
-func NewPubSubDriver(log logger.Logger, key string, cfgPlugin config.Configurer, stopCh chan struct{}) (*PubSubDriver, error) {
- const op = errors.Op("new_pub_sub_driver")
- ps := &PubSubDriver{
- log: log,
- stopCh: stopCh,
- }
-
- // will be different for every connected driver
- err := cfgPlugin.UnmarshalKey(key, &ps.cfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- if ps.cfg == nil {
- return nil, errors.E(op, errors.Errorf("config not found by provided key: %s", key))
- }
-
- ps.cfg.InitDefaults()
-
- ps.universalClient = redis.NewUniversalClient(&redis.UniversalOptions{
- Addrs: ps.cfg.Addrs,
- DB: ps.cfg.DB,
- Username: ps.cfg.Username,
- Password: ps.cfg.Password,
- SentinelPassword: ps.cfg.SentinelPassword,
- MaxRetries: ps.cfg.MaxRetries,
- MinRetryBackoff: ps.cfg.MaxRetryBackoff,
- MaxRetryBackoff: ps.cfg.MaxRetryBackoff,
- DialTimeout: ps.cfg.DialTimeout,
- ReadTimeout: ps.cfg.ReadTimeout,
- WriteTimeout: ps.cfg.WriteTimeout,
- PoolSize: ps.cfg.PoolSize,
- MinIdleConns: ps.cfg.MinIdleConns,
- MaxConnAge: ps.cfg.MaxConnAge,
- PoolTimeout: ps.cfg.PoolTimeout,
- IdleTimeout: ps.cfg.IdleTimeout,
- IdleCheckFrequency: ps.cfg.IdleCheckFreq,
- ReadOnly: ps.cfg.ReadOnly,
- RouteByLatency: ps.cfg.RouteByLatency,
- RouteRandomly: ps.cfg.RouteRandomly,
- MasterName: ps.cfg.MasterName,
- })
-
- statusCmd := ps.universalClient.Ping(context.Background())
- if statusCmd.Err() != nil {
- return nil, statusCmd.Err()
- }
-
- ps.channel = newRedisChannel(ps.universalClient, log)
-
- ps.stop()
-
- return ps, nil
-}
-
-func (p *PubSubDriver) stop() {
- go func() {
- for range p.stopCh {
- _ = p.channel.stop()
- return
- }
- }()
-}
-
-func (p *PubSubDriver) Publish(msg *pubsub.Message) error {
- p.Lock()
- defer p.Unlock()
-
- f := p.universalClient.Publish(context.Background(), msg.Topic, msg.Payload)
- if f.Err() != nil {
- return f.Err()
- }
-
- return nil
-}
-
-func (p *PubSubDriver) PublishAsync(msg *pubsub.Message) {
- go func() {
- p.Lock()
- defer p.Unlock()
-
- f := p.universalClient.Publish(context.Background(), msg.Topic, msg.Payload)
- if f.Err() != nil {
- p.log.Error("redis publish", "error", f.Err())
- }
- }()
-}
-
-func (p *PubSubDriver) Subscribe(connectionID string, topics ...string) error {
- // just add a connection
- for i := 0; i < len(topics); i++ {
- // key - topic
- // value - connectionID
- hset := p.universalClient.SAdd(context.Background(), topics[i], connectionID)
- res, err := hset.Result()
- if err != nil {
- return err
- }
- if res == 0 {
- p.log.Warn("could not subscribe to the provided topic, you might be already subscribed to it", "connectionID", connectionID, "topic", topics[i])
- continue
- }
- }
-
- // and subscribe after
- return p.channel.sub(topics...)
-}
-
-func (p *PubSubDriver) Unsubscribe(connectionID string, topics ...string) error {
- // Remove topics from the storage
- for i := 0; i < len(topics); i++ {
- srem := p.universalClient.SRem(context.Background(), topics[i], connectionID)
- if srem.Err() != nil {
- return srem.Err()
- }
- }
-
- for i := 0; i < len(topics); i++ {
- // if there are no such topics, we can safely unsubscribe from the redis
- exists := p.universalClient.Exists(context.Background(), topics[i])
- res, err := exists.Result()
- if err != nil {
- return err
- }
-
- // if we have associated connections - skip
- if res == 1 { // exists means that topic still exists and some other nodes may have connections associated with it
- continue
- }
-
- // else - unsubscribe
- err = p.channel.unsub(topics[i])
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (p *PubSubDriver) Connections(topic string, res map[string]struct{}) {
- hget := p.universalClient.SMembersMap(context.Background(), topic)
- r, err := hget.Result()
- if err != nil {
- panic(err)
- }
-
- // assign connections
- // res expected to be from the sync.Pool
- for k := range r {
- res[k] = struct{}{}
- }
-}
-
-// Next return next message
-func (p *PubSubDriver) Next(ctx context.Context) (*pubsub.Message, error) {
- const op = errors.Op("redis_driver_next")
- select {
- case msg := <-p.channel.message():
- return msg, nil
- case <-ctx.Done():
- return nil, errors.E(op, errors.TimeOut, ctx.Err())
- }
-}
diff --git a/plugins/reload/config.go b/plugins/reload/config.go
deleted file mode 100644
index 6fd3af70..00000000
--- a/plugins/reload/config.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package reload
-
-import (
- "time"
-
- "github.com/spiral/errors"
-)
-
-// Config is a Reload configuration point.
-type Config struct {
- // Interval is a global refresh interval
- Interval time.Duration
-
- // Patterns is a global file patterns to watch. It will be applied to every directory in project
- Patterns []string
-
- // Services is set of services which would be reloaded in case of FS changes
- Services map[string]ServiceConfig
-}
-
-type ServiceConfig struct {
- // Enabled indicates that service must be watched, doest not required when any other option specified
- Enabled bool
-
- // Recursive is options to use nested files from root folder
- Recursive bool
-
- // Patterns is per-service specific files to watch
- Patterns []string
-
- // Dirs is per-service specific dirs which will be combined with Patterns
- Dirs []string
-
- // Ignore is set of files which would not be watched
- Ignore []string
-}
-
-// InitDefaults sets missing values to their default values.
-func (c *Config) InitDefaults() {
- if c.Interval == 0 {
- c.Interval = time.Second
- }
- if c.Patterns == nil {
- c.Patterns = []string{".php"}
- }
-}
-
-// Valid validates the configuration.
-func (c *Config) Valid() error {
- const op = errors.Op("reload_plugin_valid")
- if c.Interval < time.Second {
- return errors.E(op, errors.Str("too short interval"))
- }
-
- if c.Services == nil {
- return errors.E(op, errors.Str("should add at least 1 service"))
- } else if len(c.Services) == 0 {
- return errors.E(op, errors.Str("service initialized, however, no config added"))
- }
-
- return nil
-}
diff --git a/plugins/reload/plugin.go b/plugins/reload/plugin.go
deleted file mode 100644
index a9a5a63c..00000000
--- a/plugins/reload/plugin.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package reload
-
-import (
- "os"
- "strings"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
-)
-
-// PluginName contains default plugin name.
-const PluginName string = "reload"
-const thresholdChanBuffer uint = 1000
-
-type Plugin struct {
- cfg *Config
- log logger.Logger
- watcher *Watcher
- services map[string]interface{}
- res *resetter.Plugin
- stopc chan struct{}
-}
-
-// Init controller service
-func (s *Plugin) Init(cfg config.Configurer, log logger.Logger, res *resetter.Plugin) error {
- const op = errors.Op("reload_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &s.cfg)
- if err != nil {
- // disable plugin in case of error
- return errors.E(op, errors.Disabled, err)
- }
-
- s.cfg.InitDefaults()
-
- s.log = log
- s.res = res
- s.stopc = make(chan struct{}, 1)
- s.services = make(map[string]interface{})
-
- configs := make([]WatcherConfig, 0, len(s.cfg.Services))
-
- for serviceName, serviceConfig := range s.cfg.Services {
- ignored, errIgn := ConvertIgnored(serviceConfig.Ignore)
- if errIgn != nil {
- return errors.E(op, err)
- }
- configs = append(configs, WatcherConfig{
- ServiceName: serviceName,
- Recursive: serviceConfig.Recursive,
- Directories: serviceConfig.Dirs,
- FilterHooks: func(filename string, patterns []string) error {
- for i := 0; i < len(patterns); i++ {
- if strings.Contains(filename, patterns[i]) {
- return nil
- }
- }
- return errors.E(op, errors.SkipFile)
- },
- Files: make(map[string]os.FileInfo),
- Ignored: ignored,
- FilePatterns: append(serviceConfig.Patterns, s.cfg.Patterns...),
- })
- }
-
- s.watcher, err = NewWatcher(configs, s.log)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (s *Plugin) Serve() chan error {
- const op = errors.Op("reload_plugin_serve")
- errCh := make(chan error, 1)
- if s.cfg.Interval < time.Second {
- errCh <- errors.E(op, errors.Str("reload interval is too fast"))
- return errCh
- }
-
- // make a map with unique services
- // so, if we would have 100 events from http service
- // in map we would see only 1 key, and it's config
- thCh := make(chan struct {
- serviceConfig ServiceConfig
- service string
- }, thresholdChanBuffer)
-
- // use the same interval
- timer := time.NewTimer(s.cfg.Interval)
-
- go func() {
- for e := range s.watcher.Event {
- thCh <- struct {
- serviceConfig ServiceConfig
- service string
- }{serviceConfig: s.cfg.Services[e.service], service: e.service}
- }
- }()
-
- // map with config by services
- updated := make(map[string]ServiceConfig, len(s.cfg.Services))
-
- go func() {
- for {
- select {
- case cfg := <-thCh:
- // logic is following:
- // restart
- timer.Stop()
- // replace previous value in map by more recent without adding new one
- updated[cfg.service] = cfg.serviceConfig
- // if we are getting a lot of events, we shouldn't restart particular service on each of it (user doing batch move or very fast typing)
- // instead, we are resetting the timer and wait for s.cfg.Interval time
- // If there is no more events, we restart service only once
- timer.Reset(s.cfg.Interval)
- case <-timer.C:
- if len(updated) > 0 {
- for name := range updated {
- err := s.res.Reset(name)
- if err != nil {
- timer.Stop()
- errCh <- errors.E(op, err)
- return
- }
- }
- // zero map
- updated = make(map[string]ServiceConfig, len(s.cfg.Services))
- }
- case <-s.stopc:
- timer.Stop()
- return
- }
- }
- }()
-
- go func() {
- err := s.watcher.StartPolling(s.cfg.Interval)
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
- }()
-
- return errCh
-}
-
-func (s *Plugin) Stop() error {
- s.watcher.Stop()
- s.stopc <- struct{}{}
- return nil
-}
-
-func (s *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (s *Plugin) Available() {
-}
diff --git a/plugins/reload/watcher.go b/plugins/reload/watcher.go
deleted file mode 100644
index c40c2fdf..00000000
--- a/plugins/reload/watcher.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package reload
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-// SimpleHook is used to filter by simple criteria, CONTAINS
-type SimpleHook func(filename string, pattern []string) error
-
-// An Event describes an event that is received when files or directory
-// changes occur. It includes the os.FileInfo of the changed file or
-// directory and the type of event that's occurred and the full path of the file.
-type Event struct {
- Path string
- Info os.FileInfo
-
- service string // type of service, http, grpc, etc...
-}
-
-type WatcherConfig struct {
- // service name
- ServiceName string
-
- // Recursive or just add by singe directory
- Recursive bool
-
- // Directories used per-service
- Directories []string
-
- // simple hook, just CONTAINS
- FilterHooks func(filename string, pattern []string) error
-
- // path to file with Files
- Files map[string]os.FileInfo
-
- // Ignored Directories, used map for O(1) amortized get
- Ignored map[string]struct{}
-
- // FilePatterns to ignore
- FilePatterns []string
-}
-
-type Watcher struct {
- // main event channel
- Event chan Event
- close chan struct{}
-
- // =============================
- mu *sync.Mutex
-
- // indicates is walker started or not
- started bool
-
- // config for each service
- // need pointer here to assign files
- watcherConfigs map[string]WatcherConfig
-
- // logger
- log logger.Logger
-}
-
-// Options is used to set Watcher Options
-type Options func(*Watcher)
-
-// NewWatcher returns new instance of File Watcher
-func NewWatcher(configs []WatcherConfig, log logger.Logger, options ...Options) (*Watcher, error) {
- w := &Watcher{
- Event: make(chan Event),
- mu: &sync.Mutex{},
-
- log: log,
-
- close: make(chan struct{}),
-
- //workingDir: workDir,
- watcherConfigs: make(map[string]WatcherConfig),
- }
-
- // add watcherConfigs by service names
- for _, v := range configs {
- w.watcherConfigs[v.ServiceName] = v
- }
-
- // apply options
- for _, option := range options {
- option(w)
- }
- err := w.initFs()
- if err != nil {
- return nil, err
- }
-
- return w, nil
-}
-
-// initFs makes initial map with files
-func (w *Watcher) initFs() error {
- const op = errors.Op("watcher_init_fs")
- for srvName, config := range w.watcherConfigs {
- fileList, err := w.retrieveFileList(srvName, config)
- if err != nil {
- return errors.E(op, err)
- }
- // workaround. in golang you can't assign to map in struct field
- tmp := w.watcherConfigs[srvName]
- tmp.Files = fileList
- w.watcherConfigs[srvName] = tmp
- }
- return nil
-}
-
-// ConvertIgnored is used to convert slice to map with ignored files
-func ConvertIgnored(ignored []string) (map[string]struct{}, error) {
- if len(ignored) == 0 {
- return nil, nil
- }
-
- ign := make(map[string]struct{}, len(ignored))
- for i := 0; i < len(ignored); i++ {
- abs, err := filepath.Abs(ignored[i])
- if err != nil {
- return nil, err
- }
- ign[abs] = struct{}{}
- }
-
- return ign, nil
-}
-
-// https://en.wikipedia.org/wiki/Inotify
-// SetMaxFileEvents sets max file notify events for Watcher
-// In case of file watch errors, this value can be increased system-wide
-// For linux: set --> fs.inotify.max_user_watches = 600000 (under /etc/<choose_name_here>.conf)
-// Add apply: sudo sysctl -p --system
-// func SetMaxFileEvents(events int) Options {
-// return func(watcher *Watcher) {
-// watcher.maxFileWatchEvents = events
-// }
-//
-// }
-
-// pass map from outside
-func (w *Watcher) retrieveFilesSingle(serviceName, path string) (map[string]os.FileInfo, error) {
- stat, err := os.Stat(path)
- if err != nil {
- return nil, err
- }
-
- filesList := make(map[string]os.FileInfo, 10)
- filesList[path] = stat
-
- // if it's not a dir, return
- if !stat.IsDir() {
- return filesList, nil
- }
-
- fileInfoList, err := ioutil.ReadDir(path)
- if err != nil {
- return nil, err
- }
-
- // recursive calls are slow in compare to goto
- // so, we will add files with goto pattern
-outer:
- for i := 0; i < len(fileInfoList); i++ {
- // if file in ignored --> continue
- if _, ignored := w.watcherConfigs[serviceName].Ignored[path]; ignored {
- continue
- }
-
- // if filename does not contain pattern --> ignore that file
- if w.watcherConfigs[serviceName].FilePatterns != nil && w.watcherConfigs[serviceName].FilterHooks != nil {
- err = w.watcherConfigs[serviceName].FilterHooks(fileInfoList[i].Name(), w.watcherConfigs[serviceName].FilePatterns)
- if errors.Is(errors.SkipFile, err) {
- continue outer
- }
- }
-
- filesList[fileInfoList[i].Name()] = fileInfoList[i]
- }
-
- return filesList, nil
-}
-
-func (w *Watcher) StartPolling(duration time.Duration) error {
- w.mu.Lock()
- const op = errors.Op("watcher_start_polling")
- if w.started {
- w.mu.Unlock()
- return errors.E(op, errors.Str("already started"))
- }
-
- w.started = true
- w.mu.Unlock()
-
- return w.waitEvent(duration)
-}
-
-// this is blocking operation
-func (w *Watcher) waitEvent(d time.Duration) error {
- ticker := time.NewTicker(d)
- for {
- select {
- case <-w.close:
- ticker.Stop()
- // just exit
- // no matter for the pollEvents
- return nil
- case <-ticker.C:
- // this is not very effective way
- // because we have to wait on Lock
- // better is to listen files in parallel, but, since that would be used in debug...
- for serviceName := range w.watcherConfigs {
- fileList, _ := w.retrieveFileList(serviceName, w.watcherConfigs[serviceName])
- w.pollEvents(w.watcherConfigs[serviceName].ServiceName, fileList)
- }
- }
- }
-}
-
-// retrieveFileList get file list for service
-func (w *Watcher) retrieveFileList(serviceName string, config WatcherConfig) (map[string]os.FileInfo, error) {
- fileList := make(map[string]os.FileInfo)
- if config.Recursive {
- // walk through directories recursively
- for i := 0; i < len(config.Directories); i++ {
- // full path is workdir/relative_path
- fullPath, err := filepath.Abs(config.Directories[i])
- if err != nil {
- return nil, err
- }
- list, err := w.retrieveFilesRecursive(serviceName, fullPath)
- if err != nil {
- return nil, err
- }
-
- for k := range list {
- fileList[k] = list[k]
- }
- }
- return fileList, nil
- }
-
- for i := 0; i < len(config.Directories); i++ {
- // full path is workdir/relative_path
- fullPath, err := filepath.Abs(config.Directories[i])
- if err != nil {
- return nil, err
- }
-
- // list is pathToFiles with files
- list, err := w.retrieveFilesSingle(serviceName, fullPath)
- if err != nil {
- return nil, err
- }
-
- for pathToFile, file := range list {
- fileList[pathToFile] = file
- }
- }
-
- return fileList, nil
-}
-
-func (w *Watcher) retrieveFilesRecursive(serviceName, root string) (map[string]os.FileInfo, error) {
- fileList := make(map[string]os.FileInfo)
-
- return fileList, filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
- const op = errors.Op("retrieve files recursive")
- if err != nil {
- return errors.E(op, err)
- }
-
- // If path is ignored and it's a directory, skip the directory. If it's
- // ignored and it's a single file, skip the file.
- _, ignored := w.watcherConfigs[serviceName].Ignored[path]
- if ignored {
- if info.IsDir() {
- // if it's dir, ignore whole
- return filepath.SkipDir
- }
- return nil
- }
-
- // if filename does not contain pattern --> ignore that file
- err = w.watcherConfigs[serviceName].FilterHooks(info.Name(), w.watcherConfigs[serviceName].FilePatterns)
- if errors.Is(errors.SkipFile, err) {
- return nil
- }
-
- // Add the path and it's info to the file list.
- fileList[path] = info
- return nil
- })
-}
-
-func (w *Watcher) pollEvents(serviceName string, files map[string]os.FileInfo) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- // InsertMany create and remove events for use to check for rename events.
- creates := make(map[string]os.FileInfo)
- removes := make(map[string]os.FileInfo)
-
- // Check for removed files.
- for pth := range w.watcherConfigs[serviceName].Files {
- if _, found := files[pth]; !found {
- removes[pth] = w.watcherConfigs[serviceName].Files[pth]
- w.log.Debug("file added to the list of removed files", "path", pth, "name", w.watcherConfigs[serviceName].Files[pth].Name(), "size", w.watcherConfigs[serviceName].Files[pth].Size())
- }
- }
-
- // Check for created files, writes and chmods.
- for pth := range files {
- if files[pth].IsDir() {
- continue
- }
- oldInfo, found := w.watcherConfigs[serviceName].Files[pth]
- if !found {
- // A file was created.
- creates[pth] = files[pth]
- w.log.Debug("file was created", "path", pth, "name", files[pth].Name(), "size", files[pth].Size())
- continue
- }
-
- if oldInfo.ModTime() != files[pth].ModTime() || oldInfo.Mode() != files[pth].Mode() {
- w.watcherConfigs[serviceName].Files[pth] = files[pth]
- w.log.Debug("file was updated", "path", pth, "name", files[pth].Name(), "size", files[pth].Size())
- w.Event <- Event{
- Path: pth,
- Info: files[pth],
- service: serviceName,
- }
- }
- }
-
- // Send all the remaining create and remove events.
- for pth := range creates {
- // add file to the plugin watch files
- w.watcherConfigs[serviceName].Files[pth] = creates[pth]
- w.log.Debug("file was added to watcher", "path", pth, "name", creates[pth].Name(), "size", creates[pth].Size())
-
- w.Event <- Event{
- Path: pth,
- Info: creates[pth],
- service: serviceName,
- }
- }
-
- for pth := range removes {
- // delete path from the config
- delete(w.watcherConfigs[serviceName].Files, pth)
- w.log.Debug("file was removed from watcher", "path", pth, "name", removes[pth].Name(), "size", removes[pth].Size())
-
- w.Event <- Event{
- Path: pth,
- Info: removes[pth],
- service: serviceName,
- }
- }
-}
-
-func (w *Watcher) Stop() {
- w.close <- struct{}{}
-}
diff --git a/plugins/resetter/interface.go b/plugins/resetter/interface.go
deleted file mode 100644
index 0defcaba..00000000
--- a/plugins/resetter/interface.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package resetter
-
-// Resetter interface
-type Resetter interface {
- // Reset reload plugin
- Reset() error
-}
diff --git a/plugins/resetter/plugin.go b/plugins/resetter/plugin.go
deleted file mode 100644
index 191185ae..00000000
--- a/plugins/resetter/plugin.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package resetter
-
-import (
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
-)
-
-const PluginName = "resetter"
-
-type Plugin struct {
- registry map[string]Resetter
-}
-
-func (p *Plugin) Init() error {
- p.registry = make(map[string]Resetter)
- return nil
-}
-
-// Reset named service.
-func (p *Plugin) Reset(name string) error {
- const op = errors.Op("resetter_plugin_reset_by_name")
- svc, ok := p.registry[name]
- if !ok {
- return errors.E(op, errors.Errorf("no such plugin: %s", name))
- }
-
- return svc.Reset()
-}
-
-// RegisterTarget resettable service.
-func (p *Plugin) RegisterTarget(name endure.Named, r Resetter) error {
- p.registry[name.Name()] = r
- return nil
-}
-
-// Collects declares services to be collected.
-func (p *Plugin) Collects() []interface{} {
- return []interface{}{
- p.RegisterTarget,
- }
-}
-
-// Name of the service.
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (p *Plugin) Available() {
-}
-
-// RPC returns associated rpc service.
-func (p *Plugin) RPC() interface{} {
- return &rpc{srv: p}
-}
diff --git a/plugins/resetter/rpc.go b/plugins/resetter/rpc.go
deleted file mode 100644
index 79171b5c..00000000
--- a/plugins/resetter/rpc.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package resetter
-
-import "github.com/spiral/errors"
-
-type rpc struct {
- srv *Plugin
-}
-
-// List all resettable plugins.
-func (rpc *rpc) List(_ bool, list *[]string) error {
- *list = make([]string, 0)
-
- for name := range rpc.srv.registry {
- *list = append(*list, name)
- }
- return nil
-}
-
-// Reset named plugin.
-func (rpc *rpc) Reset(service string, done *bool) error {
- const op = errors.Op("resetter_rpc_reset")
- err := rpc.srv.Reset(service)
- if err != nil {
- *done = false
- return errors.E(op, err)
- }
- *done = true
- return nil
-}
diff --git a/plugins/rpc/config.go b/plugins/rpc/config.go
deleted file mode 100644
index 88ad7f0e..00000000
--- a/plugins/rpc/config.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package rpc
-
-import (
- "errors"
- "net"
- "strings"
-
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-// Config defines RPC service config.
-type Config struct {
- // Listen string
- Listen string
-}
-
-// InitDefaults allows to init blank config with pre-defined set of default values.
-func (c *Config) InitDefaults() {
- if c.Listen == "" {
- c.Listen = "tcp://127.0.0.1:6001"
- }
-}
-
-// Valid returns nil if config is valid.
-func (c *Config) Valid() error {
- if dsn := strings.Split(c.Listen, "://"); len(dsn) != 2 {
- return errors.New("invalid socket DSN (tcp://:6001, unix://file.sock)")
- }
-
- return nil
-}
-
-// Listener creates new rpc socket Listener.
-func (c *Config) Listener() (net.Listener, error) {
- return utils.CreateListener(c.Listen)
-}
-
-// Dialer creates rpc socket Dialer.
-func (c *Config) Dialer() (net.Conn, error) {
- dsn := strings.Split(c.Listen, "://")
- if len(dsn) != 2 {
- return nil, errors.New("invalid socket DSN (tcp://:6001, unix://file.sock)")
- }
-
- return net.Dial(dsn[0], dsn[1])
-}
diff --git a/plugins/rpc/doc/plugin_arch.drawio b/plugins/rpc/doc/plugin_arch.drawio
deleted file mode 100644
index dec5f0b2..00000000
--- a/plugins/rpc/doc/plugin_arch.drawio
+++ /dev/null
@@ -1 +0,0 @@
-<mxfile host="Electron" modified="2020-10-19T17:14:19.125Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/13.7.9 Chrome/85.0.4183.121 Electron/10.1.3 Safari/537.36" etag="2J39x4EyFr1zaE9BXKM4" version="13.7.9" type="device"><diagram id="q2oMKs6VHyn7y0AfAXBL" name="Page-1">7Vttc9o4EP41zLQfksE2GPIxQHPXu7RlQntt7ptiC1sX2XJlOUB//a1sGdtIJDQFnE6YyUys1YutfR7trlai44yj5R8cJeEH5mPasbv+suNMOrZtORcO/JOSVSEZWv1CEHDiq0aVYEZ+YCXsKmlGfJw2GgrGqCBJU+ixOMaeaMgQ52zRbDZntPnWBAVYE8w8RHXpV+KLUEkt96Kq+BOTIFSvHtqDoiJCZWM1kzREPlvURM67jjPmjIniKVqOMZXKK/VS9LvaUrv+MI5jsUuHL/zu0yx7//HT3Pln8vfN59vvS/usVHMqVuWMsQ8KUEXGRcgCFiP6rpKOOMtiH8thu1Cq2lwzloDQAuF/WIiVQhNlgoEoFBFVtXhJxLfa860c6ryvSpOlGjkvrMpCLPjqW71Q6yWLVbe8VPabs1hcoYhQKRizjBPMYcIf8UJVqq+8gGKhC6mArTpWohQG8lSrfz88xF8/ds/+uiLe7MsXtLiyZ2clVxEPsHik3WDNBFhCmEUYvh36cUyRIA/N70CKy8G6XQU3PCjEfwZ9q030K8RvazVPoV8BftvA+7dE33KOBP9jX/mAaKbedDOFkbpTmgUk1qjRBH4REoFnCcr1sADj3wT55xVv0PMD5gIvayJdU6rWGSi3otyMYw3OlWRRme21VwlrFtsdHEi9jqbe9zERha+ak0DTL0xVNJWIKAliePZAMaA+ZyQVQsA5XaqKiPh+sShxSn6gu3woiU7CSCzyCfVHnf5EjgXrMC103go+3Q18hho6QwM4pfPcOzg9DZwJTnDspyBk8Rqk8ylnDxCB8N8DLcveD1z2BlxWWa4vpu4x8epreOmuK/YvZcQnIaAoTYm34XeO5kMMun/aFRjdj45QDYG+AYBStrMHUW+YSgpWBOgNtxCgHKJwgapXPercGKhvbwxkbQxUKEYbKCfJetrP542r8aa0vt0U9gsE1rpzKfWVeK97ia+Xc41glolhB1viA32Jj+3O5YhIXc9loAHFEczdpRKWO95Ay/2eyZ1UrqqzQq8S14tkmeurrIanQP0vRvmVQYA052WwVAwHE7+rXrHBp/bCI3f4tPu1jMGReyCwLT06KoLPVPDMExnHmvrSBYkoinGpIVWz07oUcm8y8kJC/Wu0YpmcXiqQd1+WRiHj5AcMi0qIoJqXMNhuo8VM9lQLO1/oeFqiY22IPqBlo+E1SoUSeIxSlKTkbj2NCGwhiUdMCBbt0/k8P47uuQarULapE8Vye4diytDg+ke7R2hAKHaPx4wyIMYkZgWBCKUbopJDFM/FVgalsOEhcXCdt5n0KsmNUoUUMeg7p3kgEoI/wHG+axZIbPUHI9DyWIYl4BnsMZStqpw7iwT22WMWw1wQycHFwKMFTsUvU+Tx1fk0cUr34e7GE/tQBqV0SxpNpJGeYf6QK+VNjMX5TeK9PbGlTbb07ZbZYl1sYUsKTCEeltvAIlKr+aNuSqHqxJw2mTMwBC7HZY6eOSiYMydYni3IeHH8aILnxIk9c8Lq9tomxQ7pCUpyqAszUZ4lWc/iw3qXqQjwOc+8n1kaSRydJI6BEBTdYTqF3WixH57woq1h0/ryueDsGLAOD0UFPeNQ2AcYPmT+G7FK8NvCTMjHkzdply1HdCfmIzhDHvMIR3Av9jDVrKTOjjnUCzPaRzpN1Ra+Ciafk9Xo/nK6wmAsfpMMhrZ+DazZmsHoNTNdPcvgD1xDpmuwB4dgpIX9dLxY8aTKdZ78wp7osn2t/lQyw8SZg3kFPTmqcSZGkTIsgNeJLS2yxZTMOCpb9IizMigcByQFmyITGlYxV4A2o0iqyc+PvOGvYYPmTNbl2Xgzq17Wgdie/Ia1cYFkqO8pHftAx2FGVPUMVVJkul8VLK61cXJl67gc6pTSbAvcVgJ245259TW5Vm5M1k6i9xPlO7uG+b1Ww3zdOVdXCk5h/pHsgtM0C64p7WNywqWz3j8tdsgLX0tXHJ+itiNFbVsu176UIN/SL7xMOQOFR2lOl7a9fN3MP4rYHpbzxq7dsGk/1O1QMzT6nYOAqSAZFqaPvY78hYecQIBjzJGQgbNgsk2UeaH8Ji93RdLvefdY3ohDeZyNlx7G8iGjJMqvA5/pV61fE9YGy93fU6ANxer3NcWNwupXSs67/wE=</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/rpc/interface.go b/plugins/rpc/interface.go
deleted file mode 100644
index eb6da9af..00000000
--- a/plugins/rpc/interface.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package rpc
-
-// RPCer declares the ability to create set of public RPC methods.
-type RPCer interface {
- // RPC Provides methods for the given service.
- RPC() interface{}
-}
diff --git a/plugins/rpc/plugin.go b/plugins/rpc/plugin.go
deleted file mode 100644
index b8ee6d13..00000000
--- a/plugins/rpc/plugin.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package rpc
-
-import (
- "net"
- "net/rpc"
- "sync/atomic"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-// PluginName contains default plugin name.
-const PluginName = "rpc"
-
-// Plugin is RPC service.
-type Plugin struct {
- cfg Config
- log logger.Logger
- rpc *rpc.Server
- // set of the plugins, which are implement RPCer interface and can be plugged into the RR via RPC
- plugins map[string]RPCer
- listener net.Listener
- closed uint32
-}
-
-// Init rpc service. Must return true if service is enabled.
-func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("rpc_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &s.cfg)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
- // Init defaults
- s.cfg.InitDefaults()
- // Init pluggable plugins map
- s.plugins = make(map[string]RPCer, 5)
- // init logs
- s.log = log
-
- // set up state
- atomic.StoreUint32(&s.closed, 0)
-
- // validate config
- err = s.cfg.Valid()
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-// Serve serves the service.
-func (s *Plugin) Serve() chan error {
- const op = errors.Op("rpc_plugin_serve")
- errCh := make(chan error, 1)
-
- s.rpc = rpc.NewServer()
-
- plugins := make([]string, 0, len(s.plugins))
-
- // Attach all services
- for name := range s.plugins {
- err := s.Register(name, s.plugins[name].RPC())
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
-
- plugins = append(plugins, name)
- }
-
- var err error
- s.listener, err = s.cfg.Listener()
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
-
- s.log.Debug("Started RPC service", "address", s.cfg.Listen, "plugins", plugins)
-
- go func() {
- for {
- conn, err := s.listener.Accept()
- if err != nil {
- if atomic.LoadUint32(&s.closed) == 1 {
- // just continue, this is not a critical issue, we just called Stop
- return
- }
-
- s.log.Error("listener accept error", "error", err)
- errCh <- errors.E(errors.Op("listener accept"), errors.Serve, err)
- return
- }
-
- go s.rpc.ServeCodec(goridgeRpc.NewCodec(conn))
- }
- }()
-
- return errCh
-}
-
-// Stop stops the service.
-func (s *Plugin) Stop() error {
- const op = errors.Op("rpc_plugin_stop")
- // store closed state
- atomic.StoreUint32(&s.closed, 1)
- err := s.listener.Close()
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-// Name contains service name.
-func (s *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (s *Plugin) Available() {
-}
-
-// Collects all plugins which implement Name + RPCer interfaces
-func (s *Plugin) Collects() []interface{} {
- return []interface{}{
- s.RegisterPlugin,
- }
-}
-
-// RegisterPlugin registers RPC service plugin.
-func (s *Plugin) RegisterPlugin(name endure.Named, p RPCer) {
- s.plugins[name.Name()] = p
-}
-
-// Register publishes in the server the set of methods of the
-// receiver value that satisfy the following conditions:
-// - exported method of exported type
-// - two arguments, both of exported type
-// - the second argument is a pointer
-// - one return value, of type error
-// It returns an error if the receiver is not an exported type or has
-// no suitable methods. It also logs the error using package log.
-func (s *Plugin) Register(name string, svc interface{}) error {
- if s.rpc == nil {
- return errors.E("RPC service is not configured")
- }
-
- return s.rpc.RegisterName(name, svc)
-}
diff --git a/plugins/server/command.go b/plugins/server/command.go
deleted file mode 100644
index b8bc1395..00000000
--- a/plugins/server/command.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package server
-
-import (
- "os"
- "regexp"
-
- "github.com/spiral/errors"
-)
-
-// pattern for the path finding
-const pattern string = `^\/*([A-z/.:-]+\.(php|sh|ph))$`
-
-func (server *Plugin) scanCommand(cmd []string) error {
- const op = errors.Op("server_command_scan")
- r, err := regexp.Compile(pattern)
- if err != nil {
- return err
- }
-
- for i := 0; i < len(cmd); i++ {
- if r.MatchString(cmd[i]) {
- // try to stat
- _, err := os.Stat(cmd[i])
- if err != nil {
- return errors.E(op, errors.FileNotFound, err)
- }
-
- // stat successful
- return nil
- }
- }
- return errors.E(errors.Str("scan failed, possible path not found, this is not an error"), op)
-}
diff --git a/plugins/server/command_test.go b/plugins/server/command_test.go
deleted file mode 100644
index 74762ccd..00000000
--- a/plugins/server/command_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package server
-
-import (
- "strings"
- "testing"
-
- "github.com/spiral/errors"
- "github.com/stretchr/testify/assert"
-)
-
-func TestServerCommandChecker(t *testing.T) {
- s := &Plugin{}
- cmd1 := "php ../../tests/client.php"
- assert.NoError(t, s.scanCommand(strings.Split(cmd1, " ")))
-
- cmd2 := "C:/../../abcdef/client.php"
- assert.Error(t, s.scanCommand(strings.Split(cmd2, " ")))
-
- cmd3 := "sh ./script.sh"
- err := s.scanCommand(strings.Split(cmd3, " "))
- assert.Error(t, err)
- if !errors.Is(errors.FileNotFound, err) {
- t.Fatal("should be of filenotfound type")
- }
-
- cmd4 := "php ../../tests/client.php --option1 --option2"
- err = s.scanCommand(strings.Split(cmd4, " "))
- assert.NoError(t, err)
-
- cmd5 := "php ../../tests/cluent.php --option1 --option2"
- err = s.scanCommand(strings.Split(cmd5, " "))
- assert.Error(t, err)
- if !errors.Is(errors.FileNotFound, err) {
- t.Fatal("should be of filenotfound type")
- }
-
- cmd6 := "php 0/../../tests/cluent.php --option1 --option2"
- err = s.scanCommand(strings.Split(cmd6, " "))
- assert.Error(t, err)
- if errors.Is(errors.FileNotFound, err) {
- t.Fatal("should be of filenotfound type")
- }
-}
diff --git a/plugins/server/config.go b/plugins/server/config.go
deleted file mode 100644
index 00ce4140..00000000
--- a/plugins/server/config.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package server
-
-import (
- "time"
-)
-
-// Config All config (.rr.yaml)
-// For other section use pointer to distinguish between `empty` and `not present`
-type Config struct {
- // Server config section
- Server struct {
- // Command to run as application.
- Command string `mapstructure:"command"`
- // User to run application under.
- User string `mapstructure:"user"`
- // Group to run application under.
- Group string `mapstructure:"group"`
- // Env represents application environment.
- Env Env `mapstructure:"env"`
- // Relay defines connection method and factory to be used to connect to workers:
- // "pipes", "tcp://:6001", "unix://rr.sock"
- // This config section must not change on re-configuration.
- Relay string `mapstructure:"relay"`
- // RelayTimeout defines for how long socket factory will be waiting for worker connection. This config section
- // must not change on re-configuration. Defaults to 60s.
- RelayTimeout time.Duration `mapstructure:"relay_timeout"`
- } `mapstructure:"server"`
-
- // we just need to know if the section exist, we don't need to read config from it
- RPC *struct {
- Listen string `mapstructure:"listen"`
- } `mapstructure:"rpc"`
- Logs *struct {
- } `mapstructure:"logs"`
- HTTP *struct {
- } `mapstructure:"http"`
- Redis *struct {
- } `mapstructure:"redis"`
- Boltdb *struct {
- } `mapstructure:"boltdb"`
- Memcached *struct {
- } `mapstructure:"memcached"`
- Memory *struct {
- } `mapstructure:"memory"`
- Metrics *struct {
- } `mapstructure:"metrics"`
- Reload *struct {
- } `mapstructure:"reload"`
-}
-
-// InitDefaults for the server config
-func (cfg *Config) InitDefaults() {
- if cfg.Server.Relay == "" {
- cfg.Server.Relay = "pipes"
- }
-
- if cfg.Server.RelayTimeout == 0 {
- cfg.Server.RelayTimeout = time.Second * 60
- }
-}
diff --git a/plugins/server/interface.go b/plugins/server/interface.go
deleted file mode 100644
index b0f84a7f..00000000
--- a/plugins/server/interface.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package server
-
-import (
- "context"
- "os/exec"
-
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/worker"
-)
-
-// Env variables type alias
-type Env map[string]string
-
-// Server creates workers for the application.
-type Server interface {
- // CmdFactory return a new command based on the .rr.yaml server.command section
- CmdFactory(env Env) (func() *exec.Cmd, error)
- // NewWorker return a new worker with provided and attached by the user listeners and environment variables
- NewWorker(ctx context.Context, env Env, listeners ...events.Listener) (*worker.Process, error)
- // NewWorkerPool return new pool of workers (PHP) with attached events listeners, env variables and based on the provided configuration
- NewWorkerPool(ctx context.Context, opt *pool.Config, env Env, listeners ...events.Listener) (pool.Pool, error)
-}
diff --git a/plugins/server/plugin.go b/plugins/server/plugin.go
deleted file mode 100644
index 5f5f2df9..00000000
--- a/plugins/server/plugin.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package server
-
-import (
- "context"
- "fmt"
- "os"
- "os/exec"
- "strings"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/transport"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-
- // core imports
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/transport/pipe"
- "github.com/spiral/roadrunner/v2/pkg/transport/socket"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-const (
- // PluginName for the server
- PluginName = "server"
- // RrRelay env variable key (internal)
- RrRelay = "RR_RELAY"
- // RrRPC env variable key (internal) if the RPC presents
- RrRPC = "RR_RPC"
-)
-
-// Plugin manages worker
-type Plugin struct {
- cfg Config
- log logger.Logger
- factory transport.Factory
-}
-
-// Init application provider.
-func (server *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("server_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
- err := cfg.Unmarshal(&server.cfg)
- if err != nil {
- return errors.E(op, errors.Init, err)
- }
- server.cfg.InitDefaults()
- server.log = log
-
- return nil
-}
-
-// Name contains service name.
-func (server *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (server *Plugin) Available() {}
-
-// Serve (Start) server plugin (just a mock here to satisfy interface)
-func (server *Plugin) Serve() chan error {
- const op = errors.Op("server_plugin_serve")
- errCh := make(chan error, 1)
- var err error
- server.factory, err = server.initFactory()
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
- return errCh
-}
-
-// Stop used to close chosen in config factory
-func (server *Plugin) Stop() error {
- if server.factory == nil {
- return nil
- }
-
- return server.factory.Close()
-}
-
-// CmdFactory provides worker command factory associated with given context.
-func (server *Plugin) CmdFactory(env Env) (func() *exec.Cmd, error) {
- const op = errors.Op("server_plugin_cmd_factory")
- var cmdArgs []string
-
- // create command according to the config
- cmdArgs = append(cmdArgs, strings.Split(server.cfg.Server.Command, " ")...)
- if len(cmdArgs) < 2 {
- return nil, errors.E(op, errors.Str("minimum command should be `<executable> <script>"))
- }
-
- // try to find a path here
- err := server.scanCommand(cmdArgs)
- if err != nil {
- server.log.Info("scan command", "reason", err)
- }
-
- return func() *exec.Cmd {
- cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) //nolint:gosec
- utils.IsolateProcess(cmd)
-
- // if user is not empty, and OS is linux or macos
- // execute php worker from that particular user
- if server.cfg.Server.User != "" {
- err := utils.ExecuteFromUser(cmd, server.cfg.Server.User)
- if err != nil {
- return nil
- }
- }
-
- cmd.Env = server.setEnv(env)
-
- return cmd
- }, nil
-}
-
-// NewWorker issues new standalone worker.
-func (server *Plugin) NewWorker(ctx context.Context, env Env, listeners ...events.Listener) (*worker.Process, error) {
- const op = errors.Op("server_plugin_new_worker")
-
- list := make([]events.Listener, 0, len(listeners))
- list = append(list, server.collectWorkerEvents)
-
- spawnCmd, err := server.CmdFactory(env)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- w, err := server.factory.SpawnWorkerWithTimeout(ctx, spawnCmd(), list...)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return w, nil
-}
-
-// NewWorkerPool issues new worker pool.
-func (server *Plugin) NewWorkerPool(ctx context.Context, opt *pool.Config, env Env, listeners ...events.Listener) (pool.Pool, error) {
- const op = errors.Op("server_plugin_new_worker_pool")
-
- spawnCmd, err := server.CmdFactory(env)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- list := make([]events.Listener, 0, 2)
- list = append(list, server.collectPoolEvents, server.collectWorkerEvents)
- if len(listeners) != 0 {
- list = append(list, listeners...)
- }
-
- p, err := pool.Initialize(ctx, spawnCmd, server.factory, opt, pool.AddListeners(list...))
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return p, nil
-}
-
-// creates relay and worker factory.
-func (server *Plugin) initFactory() (transport.Factory, error) {
- const op = errors.Op("server_plugin_init_factory")
- if server.cfg.Server.Relay == "" || server.cfg.Server.Relay == "pipes" {
- return pipe.NewPipeFactory(), nil
- }
-
- dsn := strings.Split(server.cfg.Server.Relay, "://")
- if len(dsn) != 2 {
- return nil, errors.E(op, errors.Network, errors.Str("invalid DSN (tcp://:6001, unix://file.sock)"))
- }
-
- lsn, err := utils.CreateListener(server.cfg.Server.Relay)
- if err != nil {
- return nil, errors.E(op, errors.Network, err)
- }
-
- switch dsn[0] {
- // sockets group
- case "unix":
- return socket.NewSocketServer(lsn, server.cfg.Server.RelayTimeout), nil
- case "tcp":
- return socket.NewSocketServer(lsn, server.cfg.Server.RelayTimeout), nil
- default:
- return nil, errors.E(op, errors.Network, errors.Str("invalid DSN (tcp://:6001, unix://file.sock)"))
- }
-}
-
-func (server *Plugin) setEnv(e Env) []string {
- env := append(os.Environ(), fmt.Sprintf(RrRelay+"=%s", server.cfg.Server.Relay))
- for k, v := range e {
- env = append(env, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
- }
-
- if server.cfg.RPC != nil && server.cfg.RPC.Listen != "" {
- env = append(env, fmt.Sprintf("%s=%s", RrRPC, server.cfg.RPC.Listen))
- }
-
- // set env variables from the config
- if len(server.cfg.Server.Env) > 0 {
- for k, v := range server.cfg.Server.Env {
- env = append(env, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
- }
- }
-
- return env
-}
-
-func (server *Plugin) collectPoolEvents(event interface{}) {
- if we, ok := event.(events.PoolEvent); ok {
- switch we.Event {
- case events.EventMaxMemory:
- server.log.Warn("worker max memory reached", "pid", we.Payload.(worker.BaseProcess).Pid())
- case events.EventNoFreeWorkers:
- server.log.Warn("no free workers in the pool", "error", we.Payload.(error).Error())
- case events.EventPoolError:
- server.log.Error("pool error", "error", we.Payload.(error).Error())
- case events.EventSupervisorError:
- server.log.Error("pool supervisor error", "error", we.Payload.(error).Error())
- case events.EventTTL:
- server.log.Warn("worker TTL reached", "pid", we.Payload.(worker.BaseProcess).Pid())
- case events.EventWorkerConstruct:
- if _, ok := we.Payload.(error); ok {
- server.log.Error("worker construction error", "error", we.Payload.(error).Error())
- return
- }
- server.log.Debug("worker constructed", "pid", we.Payload.(worker.BaseProcess).Pid())
- case events.EventWorkerDestruct:
- server.log.Debug("worker destructed", "pid", we.Payload.(worker.BaseProcess).Pid())
- case events.EventExecTTL:
- server.log.Warn("worker exec timeout reached", "error", we.Payload.(error).Error())
- case events.EventIdleTTL:
- server.log.Warn("worker idle timeout reached", "pid", we.Payload.(worker.BaseProcess).Pid())
- case events.EventPoolRestart:
- server.log.Warn("requested pool restart")
- }
- }
-}
-
-func (server *Plugin) collectWorkerEvents(event interface{}) {
- if we, ok := event.(events.WorkerEvent); ok {
- switch we.Event {
- case events.EventWorkerError:
- switch e := we.Payload.(type) { //nolint:gocritic
- case error:
- if errors.Is(errors.SoftJob, e) {
- // get source error for the softjob error
- server.log.Error(strings.TrimRight(e.(*errors.Error).Err.Error(), " \n\t"))
- return
- }
-
- // print full error for the other types of errors
- server.log.Error(strings.TrimRight(e.Error(), " \n\t"))
- return
- }
- server.log.Error(strings.TrimRight(we.Payload.(error).Error(), " \n\t"))
- case events.EventWorkerLog:
- server.log.Debug(strings.TrimRight(utils.AsString(we.Payload.([]byte)), " \n\t"))
- // stderr event is INFO level
- case events.EventWorkerStderr:
- server.log.Info(strings.TrimRight(utils.AsString(we.Payload.([]byte)), " \n\t"))
- }
- }
-}
diff --git a/plugins/service/config.go b/plugins/service/config.go
deleted file mode 100644
index 871c8f76..00000000
--- a/plugins/service/config.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package service
-
-import "time"
-
-// Service represents particular service configuration
-type Service struct {
- Command string `mapstructure:"command"`
- ProcessNum int `mapstructure:"process_num"`
- ExecTimeout time.Duration `mapstructure:"exec_timeout"`
- RemainAfterExit bool `mapstructure:"remain_after_exit"`
- RestartSec uint64 `mapstructure:"restart_sec"`
-}
-
-// Config for the services
-type Config struct {
- Services map[string]Service `mapstructure:"service"`
-}
-
-func (c *Config) InitDefault() {
- if len(c.Services) > 0 {
- for k, v := range c.Services {
- if v.ProcessNum == 0 {
- val := c.Services[k]
- val.ProcessNum = 1
- c.Services[k] = val
- }
- if v.RestartSec == 0 {
- val := c.Services[k]
- val.RestartSec = 30
- c.Services[k] = val
- }
- }
- }
-}
diff --git a/plugins/service/plugin.go b/plugins/service/plugin.go
deleted file mode 100644
index 3bd0f956..00000000
--- a/plugins/service/plugin.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package service
-
-import (
- "sync"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const PluginName string = "service"
-
-type Plugin struct {
- sync.Mutex
-
- logger logger.Logger
- cfg Config
-
- // all processes attached to the service
- processes []*Process
-}
-
-func (service *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("service_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(errors.Disabled)
- }
- err := cfg.UnmarshalKey(PluginName, &service.cfg.Services)
- if err != nil {
- return errors.E(op, err)
- }
-
- // init default parameters if not set by user
- service.cfg.InitDefault()
- // save the logger
- service.logger = log
-
- return nil
-}
-
-func (service *Plugin) Serve() chan error {
- errCh := make(chan error, 1)
-
- // start processing
- go func() {
- // lock here, because Stop command might be invoked during the Serve
- service.Lock()
- defer service.Unlock()
-
- service.processes = make([]*Process, 0, len(service.cfg.Services))
- // for the every service
- for k := range service.cfg.Services {
- // create needed number of the processes
- for i := 0; i < service.cfg.Services[k].ProcessNum; i++ {
- // create processor structure, which will process all the services
- service.processes = append(service.processes, NewServiceProcess(
- service.cfg.Services[k].RemainAfterExit,
- service.cfg.Services[k].ExecTimeout,
- service.cfg.Services[k].RestartSec,
- service.cfg.Services[k].Command,
- service.logger,
- errCh,
- ))
- }
- }
-
- // start all processes
- for i := 0; i < len(service.processes); i++ {
- service.processes[i].start()
- }
- }()
-
- return errCh
-}
-
-func (service *Plugin) Workers() []process.State {
- service.Lock()
- defer service.Unlock()
- states := make([]process.State, 0, len(service.processes))
- for i := 0; i < len(service.processes); i++ {
- st, err := process.GeneralProcessState(service.processes[i].Pid, service.processes[i].rawCmd)
- if err != nil {
- continue
- }
- states = append(states, st)
- }
- return states
-}
-
-func (service *Plugin) Stop() error {
- service.Lock()
- defer service.Unlock()
-
- if len(service.processes) > 0 {
- for i := 0; i < len(service.processes); i++ {
- service.processes[i].stop()
- }
- }
- return nil
-}
-
-// Name contains service name.
-func (service *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (service *Plugin) Available() {
-}
diff --git a/plugins/service/process.go b/plugins/service/process.go
deleted file mode 100644
index cac5c41e..00000000
--- a/plugins/service/process.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package service
-
-import (
- "os/exec"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-// Process structure contains an information about process, restart information, log, errors, etc
-type Process struct {
- sync.Mutex
- // command to execute
- command *exec.Cmd
- // rawCmd from the plugin
- rawCmd string
- Pid int
-
- // root plugin error chan
- errCh chan error
- // logger
- log logger.Logger
-
- ExecTimeout time.Duration
- RemainAfterExit bool
- RestartSec uint64
-
- // process start time
- startTime time.Time
- stopped uint64
-}
-
-// NewServiceProcess constructs service process structure
-func NewServiceProcess(restartAfterExit bool, execTimeout time.Duration, restartDelay uint64, command string, l logger.Logger, errCh chan error) *Process {
- return &Process{
- rawCmd: command,
- RestartSec: restartDelay,
- ExecTimeout: execTimeout,
- RemainAfterExit: restartAfterExit,
- errCh: errCh,
- log: l,
- }
-}
-
-// write message to the log (stderr)
-func (p *Process) Write(b []byte) (int, error) {
- p.log.Info(utils.AsString(b))
- return len(b), nil
-}
-
-func (p *Process) start() {
- p.Lock()
- defer p.Unlock()
- const op = errors.Op("processor_start")
-
- // crate fat-process here
- p.createProcess()
-
- // non blocking process start
- err := p.command.Start()
- if err != nil {
- p.errCh <- errors.E(op, err)
- return
- }
-
- // start process waiting routine
- go p.wait()
- // execHandler checks for the execTimeout
- go p.execHandler()
- // save start time
- p.startTime = time.Now()
- p.Pid = p.command.Process.Pid
-}
-
-// create command for the process
-func (p *Process) createProcess() {
- // cmdArgs contain command arguments if the command in form of: php <command> or ls <command> -i -b
- var cmdArgs []string
- cmdArgs = append(cmdArgs, strings.Split(p.rawCmd, " ")...)
- if len(cmdArgs) < 2 {
- p.command = exec.Command(p.rawCmd) //nolint:gosec
- } else {
- p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) //nolint:gosec
- }
- // redirect stderr into the Write function of the process.go
- p.command.Stderr = p
-}
-
-// wait process for exit
-func (p *Process) wait() {
- // Wait error doesn't matter here
- err := p.command.Wait()
- if err != nil {
- p.log.Error("process wait error", "error", err)
- }
- // wait for restart delay
- if p.RemainAfterExit {
- // wait for the delay
- time.Sleep(time.Second * time.Duration(p.RestartSec))
- // and start command again
- p.start()
- }
-}
-
-// stop can be only sent by the Endure when plugin stopped
-func (p *Process) stop() {
- atomic.StoreUint64(&p.stopped, 1)
-}
-
-func (p *Process) execHandler() {
- tt := time.NewTicker(time.Second)
- for range tt.C {
- // lock here, because p.startTime could be changed during the check
- p.Lock()
- // if the exec timeout is set
- if p.ExecTimeout != 0 {
- // if stopped -> kill the process (SIGINT-> SIGKILL) and exit
- if atomic.CompareAndSwapUint64(&p.stopped, 1, 1) {
- err := p.command.Process.Signal(syscall.SIGINT)
- if err != nil {
- _ = p.command.Process.Signal(syscall.SIGKILL)
- }
- tt.Stop()
- p.Unlock()
- return
- }
-
- // check the running time for the script
- if time.Now().After(p.startTime.Add(p.ExecTimeout)) {
- err := p.command.Process.Signal(syscall.SIGINT)
- if err != nil {
- _ = p.command.Process.Signal(syscall.SIGKILL)
- }
- p.Unlock()
- tt.Stop()
- return
- }
- }
- p.Unlock()
- }
-}
diff --git a/plugins/sqs/config.go b/plugins/sqs/config.go
deleted file mode 100644
index 9b2a1ca8..00000000
--- a/plugins/sqs/config.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sqs
-
-import "github.com/aws/aws-sdk-go-v2/aws"
-
-const (
- attributes string = "attributes"
- tags string = "tags"
- queue string = "queue"
- pref string = "prefetch"
- visibility string = "visibility_timeout"
- waitTime string = "wait_time"
-)
-
-type GlobalCfg struct {
- Key string `mapstructure:"key"`
- Secret string `mapstructure:"secret"`
- Region string `mapstructure:"region"`
- SessionToken string `mapstructure:"session_token"`
- Endpoint string `mapstructure:"endpoint"`
-}
-
-// Config is used to parse pipeline configuration
-type Config struct {
- // The duration (in seconds) that the received messages are hidden from subsequent
- // retrieve requests after being retrieved by a ReceiveMessage request.
- VisibilityTimeout int32 `mapstructure:"visibility_timeout"`
- // The duration (in seconds) for which the call waits for a message to arrive
- // in the queue before returning. If a message is available, the call returns
- // sooner than WaitTimeSeconds. If no messages are available and the wait time
- // expires, the call returns successfully with an empty list of messages.
- WaitTimeSeconds int32 `mapstructure:"wait_time_seconds"`
- // Prefetch is the maximum number of messages to return. Amazon SQS never returns more messages
- // than this value (however, fewer messages might be returned). Valid values: 1 to
- // 10. Default: 1.
- Prefetch int32 `mapstructure:"prefetch"`
- // The name of the new queue. The following limits apply to this name:
- //
- // * A queue
- // name can have up to 80 characters.
- //
- // * Valid values: alphanumeric characters,
- // hyphens (-), and underscores (_).
- //
- // * A FIFO queue name must end with the .fifo
- // suffix.
- //
- // Queue URLs and names are case-sensitive.
- //
- // This member is required.
- Queue *string `mapstructure:"queue"`
-
- // A map of attributes with their corresponding values. The following lists the
- // names, descriptions, and values of the special request parameters that the
- // CreateQueue action uses.
- // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html
- Attributes map[string]string `mapstructure:"attributes"`
-
- // From amazon docs:
- // Add cost allocation tags to the specified Amazon SQS queue. For an overview, see
- // Tagging Your Amazon SQS Queues
- // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html)
- // in the Amazon SQS Developer Guide. When you use queue tags, keep the following
- // guidelines in mind:
- //
- // * Adding more than 50 tags to a queue isn't recommended.
- //
- // *
- // Tags don't have any semantic meaning. Amazon SQS interprets tags as character
- // strings.
- //
- // * Tags are case-sensitive.
- //
- // * A new tag with a key identical to that
- // of an existing tag overwrites the existing tag.
- //
- // For a full list of tag
- // restrictions, see Quotas related to queues
- // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues)
- // in the Amazon SQS Developer Guide. To be able to tag a queue on creation, you
- // must have the sqs:CreateQueue and sqs:TagQueue permissions. Cross-account
- // permissions don't apply to this action. For more information, see Grant
- // cross-account permissions to a role and a user name
- // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
- // in the Amazon SQS Developer Guide.
- Tags map[string]string `mapstructure:"tags"`
-}
-
-func (c *GlobalCfg) InitDefault() {
- if c.Endpoint == "" {
- c.Endpoint = "http://127.0.0.1:9324"
- }
-}
-
-func (c *Config) InitDefault() {
- if c.Queue == nil {
- c.Queue = aws.String("default")
- }
-
- if c.Prefetch == 0 || c.Prefetch > 10 {
- c.Prefetch = 10
- }
-
- if c.WaitTimeSeconds == 0 {
- c.WaitTimeSeconds = 5
- }
-
- if c.Attributes == nil {
- c.Attributes = make(map[string]string)
- }
-
- if c.Tags == nil {
- c.Tags = make(map[string]string)
- }
-}
diff --git a/plugins/sqs/consumer.go b/plugins/sqs/consumer.go
deleted file mode 100644
index 92dbd6a8..00000000
--- a/plugins/sqs/consumer.go
+++ /dev/null
@@ -1,421 +0,0 @@
-package sqs
-
-import (
- "context"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/aws/retry"
- "github.com/aws/aws-sdk-go-v2/config"
- "github.com/aws/aws-sdk-go-v2/credentials"
- "github.com/aws/aws-sdk-go-v2/service/sqs"
- "github.com/aws/aws-sdk-go-v2/service/sqs/types"
- "github.com/google/uuid"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- cfgPlugin "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type consumer struct {
- sync.Mutex
- pq priorityqueue.Queue
- log logger.Logger
- eh events.Handler
- pipeline atomic.Value
-
- // connection info
- key string
- secret string
- sessionToken string
- region string
- endpoint string
- queue *string
- messageGroupID string
- waitTime int32
- prefetch int32
- visibilityTimeout int32
-
- // if user invoke several resume operations
- listeners uint32
-
- // queue optional parameters
- attributes map[string]string
- tags map[string]string
-
- client *sqs.Client
- queueURL *string
-
- pauseCh chan struct{}
-}
-
-func NewSQSConsumer(configKey string, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_sqs_consumer")
-
- // if no such key - error
- if !cfg.Has(configKey) {
- return nil, errors.E(op, errors.Errorf("no configuration by provided key: %s", configKey))
- }
-
- // if no global section
- if !cfg.Has(pluginName) {
- return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section"))
- }
-
- // PARSE CONFIGURATION -------
- var pipeCfg Config
- var globalCfg GlobalCfg
-
- err := cfg.UnmarshalKey(configKey, &pipeCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pipeCfg.InitDefault()
-
- err = cfg.UnmarshalKey(pluginName, &globalCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- globalCfg.InitDefault()
-
- // initialize job consumer
- jb := &consumer{
- pq: pq,
- log: log,
- eh: e,
- messageGroupID: uuid.NewString(),
- attributes: pipeCfg.Attributes,
- tags: pipeCfg.Tags,
- queue: pipeCfg.Queue,
- prefetch: pipeCfg.Prefetch,
- visibilityTimeout: pipeCfg.VisibilityTimeout,
- waitTime: pipeCfg.WaitTimeSeconds,
- region: globalCfg.Region,
- key: globalCfg.Key,
- sessionToken: globalCfg.SessionToken,
- secret: globalCfg.Secret,
- endpoint: globalCfg.Endpoint,
- pauseCh: make(chan struct{}, 1),
- }
-
- // PARSE CONFIGURATION -------
-
- awsConf, err := config.LoadDefaultConfig(context.Background(),
- config.WithRegion(globalCfg.Region),
- config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken)))
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // config with retries
- jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) {
- o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) {
- opts.MaxAttempts = 60
- })
- })
-
- out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags})
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // assign a queue URL
- jb.queueURL = out.QueueUrl
-
- // To successfully create a new queue, you must provide a
- // queue name that adheres to the limits related to queues
- // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html)
- // and is unique within the scope of your queues. After you create a queue, you
- // must wait at least one second after the queue is created to be able to use the <------------
- // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require
- time.Sleep(time.Second * 2)
-
- return jb, nil
-}
-
-func FromPipeline(pipe *pipeline.Pipeline, log logger.Logger, cfg cfgPlugin.Configurer, e events.Handler, pq priorityqueue.Queue) (*consumer, error) {
- const op = errors.Op("new_sqs_consumer")
-
- // if no global section
- if !cfg.Has(pluginName) {
- return nil, errors.E(op, errors.Str("no global sqs configuration, global configuration should contain sqs section"))
- }
-
- // PARSE CONFIGURATION -------
- var globalCfg GlobalCfg
-
- err := cfg.UnmarshalKey(pluginName, &globalCfg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- globalCfg.InitDefault()
-
- attr := make(map[string]string)
- err = pipe.Map(attributes, attr)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- tg := make(map[string]string)
- err = pipe.Map(tags, tg)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // initialize job consumer
- jb := &consumer{
- pq: pq,
- log: log,
- eh: e,
- messageGroupID: uuid.NewString(),
- attributes: attr,
- tags: tg,
- queue: aws.String(pipe.String(queue, "default")),
- prefetch: int32(pipe.Int(pref, 10)),
- visibilityTimeout: int32(pipe.Int(visibility, 0)),
- waitTime: int32(pipe.Int(waitTime, 0)),
- region: globalCfg.Region,
- key: globalCfg.Key,
- sessionToken: globalCfg.SessionToken,
- secret: globalCfg.Secret,
- endpoint: globalCfg.Endpoint,
- pauseCh: make(chan struct{}, 1),
- }
-
- // PARSE CONFIGURATION -------
-
- awsConf, err := config.LoadDefaultConfig(context.Background(),
- config.WithRegion(globalCfg.Region),
- config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(jb.key, jb.secret, jb.sessionToken)))
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // config with retries
- jb.client = sqs.NewFromConfig(awsConf, sqs.WithEndpointResolver(sqs.EndpointResolverFromURL(jb.endpoint)), func(o *sqs.Options) {
- o.Retryer = retry.NewStandard(func(opts *retry.StandardOptions) {
- opts.MaxAttempts = 60
- })
- })
-
- out, err := jb.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: jb.queue, Attributes: jb.attributes, Tags: jb.tags})
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- // assign a queue URL
- jb.queueURL = out.QueueUrl
-
- // To successfully create a new queue, you must provide a
- // queue name that adheres to the limits related to queues
- // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html)
- // and is unique within the scope of your queues. After you create a queue, you
- // must wait at least one second after the queue is created to be able to use the <------------
- // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require
- time.Sleep(time.Second * 2)
-
- return jb, nil
-}
-
-func (c *consumer) Push(ctx context.Context, jb *job.Job) error {
- const op = errors.Op("sqs_push")
- // check if the pipeline registered
-
- // load atomic value
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != jb.Options.Pipeline {
- return errors.E(op, errors.Errorf("no such pipeline: %s, actual: %s", jb.Options.Pipeline, pipe.Name()))
- }
-
- // The length of time, in seconds, for which to delay a specific message. Valid
- // values: 0 to 900. Maximum: 15 minutes.
- if jb.Options.Delay > 900 {
- return errors.E(op, errors.Errorf("unable to push, maximum possible delay is 900 seconds (15 minutes), provided: %d", jb.Options.Delay))
- }
-
- err := c.handleItem(ctx, fromJob(jb))
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-func (c *consumer) State(ctx context.Context) (*jobState.State, error) {
- const op = errors.Op("sqs_state")
- attr, err := c.client.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{
- QueueUrl: c.queueURL,
- AttributeNames: []types.QueueAttributeName{
- types.QueueAttributeNameApproximateNumberOfMessages,
- types.QueueAttributeNameApproximateNumberOfMessagesDelayed,
- types.QueueAttributeNameApproximateNumberOfMessagesNotVisible,
- },
- })
-
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
-
- out := &jobState.State{
- Pipeline: pipe.Name(),
- Driver: pipe.Driver(),
- Queue: *c.queueURL,
- Ready: ready(atomic.LoadUint32(&c.listeners)),
- }
-
- nom, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessages)])
- if err == nil {
- out.Active = int64(nom)
- }
-
- delayed, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessagesDelayed)])
- if err == nil {
- out.Delayed = int64(delayed)
- }
-
- nv, err := strconv.Atoi(attr.Attributes[string(types.QueueAttributeNameApproximateNumberOfMessagesNotVisible)])
- if err == nil {
- out.Reserved = int64(nv)
- }
-
- return out, nil
-}
-
-func (c *consumer) Register(_ context.Context, p *pipeline.Pipeline) error {
- c.pipeline.Store(p)
- return nil
-}
-
-func (c *consumer) Run(_ context.Context, p *pipeline.Pipeline) error {
- start := time.Now()
- const op = errors.Op("sqs_run")
-
- c.Lock()
- defer c.Unlock()
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p.Name() {
- return errors.E(op, errors.Errorf("no such pipeline registered: %s", pipe.Name()))
- }
-
- atomic.AddUint32(&c.listeners, 1)
-
- // start listener
- go c.listen(context.Background())
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-
- return nil
-}
-
-func (c *consumer) Stop(context.Context) error {
- start := time.Now()
- if atomic.LoadUint32(&c.listeners) > 0 {
- c.pauseCh <- struct{}{}
- }
-
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeStopped,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
- return nil
-}
-
-func (c *consumer) Pause(_ context.Context, p string) {
- start := time.Now()
- // load atomic value
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested", p, "actual", pipe.Name())
- return
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 0 {
- c.log.Warn("no active listeners, nothing to pause")
- return
- }
-
- atomic.AddUint32(&c.listeners, ^uint32(0))
-
- // stop consume
- c.pauseCh <- struct{}{}
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipePaused,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) Resume(_ context.Context, p string) {
- start := time.Now()
- // load atomic value
- pipe := c.pipeline.Load().(*pipeline.Pipeline)
- if pipe.Name() != p {
- c.log.Error("no such pipeline", "requested", p, "actual", pipe.Name())
- return
- }
-
- l := atomic.LoadUint32(&c.listeners)
- // no active listeners
- if l == 1 {
- c.log.Warn("sqs listener already in the active state")
- return
- }
-
- // start listener
- go c.listen(context.Background())
-
- // increase num of listeners
- atomic.AddUint32(&c.listeners, 1)
-
- c.eh.Push(events.JobEvent{
- Event: events.EventPipeActive,
- Driver: pipe.Driver(),
- Pipeline: pipe.Name(),
- Start: start,
- Elapsed: time.Since(start),
- })
-}
-
-func (c *consumer) handleItem(ctx context.Context, msg *Item) error {
- d, err := msg.pack(c.queueURL)
- if err != nil {
- return err
- }
- _, err = c.client.SendMessage(ctx, d)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func ready(r uint32) bool {
- return r > 0
-}
diff --git a/plugins/sqs/item.go b/plugins/sqs/item.go
deleted file mode 100644
index 969d8b5b..00000000
--- a/plugins/sqs/item.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package sqs
-
-import (
- "context"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/service/sqs"
- "github.com/aws/aws-sdk-go-v2/service/sqs/types"
- json "github.com/json-iterator/go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/jobs/job"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-const (
- StringType string = "String"
- NumberType string = "Number"
- BinaryType string = "Binary"
- ApproximateReceiveCount string = "ApproximateReceiveCount"
-)
-
-var itemAttributes = []string{
- job.RRID,
- job.RRJob,
- job.RRDelay,
- job.RRPriority,
- job.RRHeaders,
-}
-
-type Item struct {
- // Job contains pluginName of job broker (usually PHP class).
- Job string `json:"job"`
-
- // Ident is unique identifier of the job, should be provided from outside
- Ident string `json:"id"`
-
- // Payload is string data (usually JSON) passed to Job broker.
- Payload string `json:"payload"`
-
- // Headers with key-values pairs
- Headers map[string][]string `json:"headers"`
-
- // Options contains set of PipelineOptions specific to job execution. Can be empty.
- Options *Options `json:"options,omitempty"`
-}
-
-// Options carry information about how to handle given job.
-type Options struct {
- // Priority is job priority, default - 10
- // pointer to distinguish 0 as a priority and nil as priority not set
- Priority int64 `json:"priority"`
-
- // Pipeline manually specified pipeline.
- Pipeline string `json:"pipeline,omitempty"`
-
- // Delay defines time duration to delay execution for. Defaults to none.
- Delay int64 `json:"delay,omitempty"`
-
- // Private ================
- approxReceiveCount int64
- queue *string
- receiptHandler *string
- client *sqs.Client
- requeueFn func(context.Context, *Item) error
-}
-
-// DelayDuration returns delay duration in a form of time.Duration.
-func (o *Options) DelayDuration() time.Duration {
- return time.Second * time.Duration(o.Delay)
-}
-
-func (i *Item) ID() string {
- return i.Ident
-}
-
-func (i *Item) Priority() int64 {
- return i.Options.Priority
-}
-
-// Body packs job payload into binary payload.
-func (i *Item) Body() []byte {
- return utils.AsBytes(i.Payload)
-}
-
-// Context packs job context (job, id) into binary payload.
-// Not used in the sqs, MessageAttributes used instead
-func (i *Item) Context() ([]byte, error) {
- ctx, err := json.Marshal(
- struct {
- ID string `json:"id"`
- Job string `json:"job"`
- Headers map[string][]string `json:"headers"`
- Pipeline string `json:"pipeline"`
- }{ID: i.Ident, Job: i.Job, Headers: i.Headers, Pipeline: i.Options.Pipeline},
- )
-
- if err != nil {
- return nil, err
- }
-
- return ctx, nil
-}
-
-func (i *Item) Ack() error {
- _, err := i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{
- QueueUrl: i.Options.queue,
- ReceiptHandle: i.Options.receiptHandler,
- })
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (i *Item) Nack() error {
- // requeue message
- err := i.Options.requeueFn(context.Background(), i)
- if err != nil {
- return err
- }
-
- _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{
- QueueUrl: i.Options.queue,
- ReceiptHandle: i.Options.receiptHandler,
- })
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (i *Item) Requeue(headers map[string][]string, delay int64) error {
- // overwrite the delay
- i.Options.Delay = delay
- i.Headers = headers
-
- // requeue message
- err := i.Options.requeueFn(context.Background(), i)
- if err != nil {
- return err
- }
-
- // Delete job from the queue only after successful requeue
- _, err = i.Options.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{
- QueueUrl: i.Options.queue,
- ReceiptHandle: i.Options.receiptHandler,
- })
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func fromJob(job *job.Job) *Item {
- return &Item{
- Job: job.Job,
- Ident: job.Ident,
- Payload: job.Payload,
- Headers: job.Headers,
- Options: &Options{
- Priority: job.Options.Priority,
- Pipeline: job.Options.Pipeline,
- Delay: job.Options.Delay,
- },
- }
-}
-
-func (i *Item) pack(queue *string) (*sqs.SendMessageInput, error) {
- // pack headers map
- data, err := json.Marshal(i.Headers)
- if err != nil {
- return nil, err
- }
-
- return &sqs.SendMessageInput{
- MessageBody: aws.String(i.Payload),
- QueueUrl: queue,
- DelaySeconds: int32(i.Options.Delay),
- MessageAttributes: map[string]types.MessageAttributeValue{
- job.RRID: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Ident)},
- job.RRJob: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(i.Job)},
- job.RRDelay: {DataType: aws.String(StringType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Delay)))},
- job.RRHeaders: {DataType: aws.String(BinaryType), BinaryValue: data, BinaryListValues: nil, StringListValues: nil, StringValue: nil},
- job.RRPriority: {DataType: aws.String(NumberType), BinaryValue: nil, BinaryListValues: nil, StringListValues: nil, StringValue: aws.String(strconv.Itoa(int(i.Options.Priority)))},
- },
- }, nil
-}
-
-func (c *consumer) unpack(msg *types.Message) (*Item, error) {
- const op = errors.Op("sqs_unpack")
- // reserved
- if _, ok := msg.Attributes[ApproximateReceiveCount]; !ok {
- return nil, errors.E(op, errors.Str("failed to unpack the ApproximateReceiveCount attribute"))
- }
-
- for i := 0; i < len(itemAttributes); i++ {
- if _, ok := msg.MessageAttributes[itemAttributes[i]]; !ok {
- return nil, errors.E(op, errors.Errorf("missing queue attribute: %s", itemAttributes[i]))
- }
- }
-
- var h map[string][]string
- err := json.Unmarshal(msg.MessageAttributes[job.RRHeaders].BinaryValue, &h)
- if err != nil {
- return nil, err
- }
-
- delay, err := strconv.Atoi(*msg.MessageAttributes[job.RRDelay].StringValue)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- priority, err := strconv.Atoi(*msg.MessageAttributes[job.RRPriority].StringValue)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- recCount, err := strconv.Atoi(msg.Attributes[ApproximateReceiveCount])
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- item := &Item{
- Job: *msg.MessageAttributes[job.RRJob].StringValue,
- Ident: *msg.MessageAttributes[job.RRID].StringValue,
- Payload: *msg.Body,
- Headers: h,
- Options: &Options{
- Delay: int64(delay),
- Priority: int64(priority),
-
- // private
- approxReceiveCount: int64(recCount),
- client: c.client,
- queue: c.queueURL,
- receiptHandler: msg.ReceiptHandle,
- requeueFn: c.handleItem,
- },
- }
-
- return item, nil
-}
diff --git a/plugins/sqs/listener.go b/plugins/sqs/listener.go
deleted file mode 100644
index 215dd6a5..00000000
--- a/plugins/sqs/listener.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sqs
-
-import (
- "context"
- "time"
-
- "github.com/aws/aws-sdk-go-v2/aws/transport/http"
- "github.com/aws/aws-sdk-go-v2/service/sqs"
- "github.com/aws/aws-sdk-go-v2/service/sqs/types"
- "github.com/aws/smithy-go"
-)
-
-const (
- // All - get all message attribute names
- All string = "All"
-
- // NonExistentQueue AWS error code
- NonExistentQueue string = "AWS.SimpleQueueService.NonExistentQueue"
-)
-
-func (c *consumer) listen(ctx context.Context) { //nolint:gocognit
- for {
- select {
- case <-c.pauseCh:
- c.log.Warn("sqs listener stopped")
- return
- default:
- message, err := c.client.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{
- QueueUrl: c.queueURL,
- MaxNumberOfMessages: c.prefetch,
- AttributeNames: []types.QueueAttributeName{types.QueueAttributeName(ApproximateReceiveCount)},
- MessageAttributeNames: []string{All},
- // The new value for the message's visibility timeout (in seconds). Values range: 0
- // to 43200. Maximum: 12 hours.
- VisibilityTimeout: c.visibilityTimeout,
- WaitTimeSeconds: c.waitTime,
- })
-
- if err != nil {
- if oErr, ok := (err).(*smithy.OperationError); ok {
- if rErr, ok := oErr.Err.(*http.ResponseError); ok {
- if apiErr, ok := rErr.Err.(*smithy.GenericAPIError); ok {
- // in case of NonExistentQueue - recreate the queue
- if apiErr.Code == NonExistentQueue {
- c.log.Error("receive message", "error code", apiErr.ErrorCode(), "message", apiErr.ErrorMessage(), "error fault", apiErr.ErrorFault())
- _, err = c.client.CreateQueue(context.Background(), &sqs.CreateQueueInput{QueueName: c.queue, Attributes: c.attributes, Tags: c.tags})
- if err != nil {
- c.log.Error("create queue", "error", err)
- }
- // To successfully create a new queue, you must provide a
- // queue name that adheres to the limits related to the queues
- // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html)
- // and is unique within the scope of your queues. After you create a queue, you
- // must wait at least one second after the queue is created to be able to use the <------------
- // queue. To get the queue URL, use the GetQueueUrl action. GetQueueUrl require
- time.Sleep(time.Second * 2)
- continue
- }
- }
- }
- }
-
- c.log.Error("receive message", "error", err)
- continue
- }
-
- for i := 0; i < len(message.Messages); i++ {
- m := message.Messages[i]
- item, err := c.unpack(&m)
- if err != nil {
- _, errD := c.client.DeleteMessage(context.Background(), &sqs.DeleteMessageInput{
- QueueUrl: c.queueURL,
- ReceiptHandle: m.ReceiptHandle,
- })
- if errD != nil {
- c.log.Error("message unpack, failed to delete the message from the queue", "error", err)
- }
-
- c.log.Error("message unpack", "error", err)
- continue
- }
-
- c.pq.Insert(item)
- }
- }
- }
-}
diff --git a/plugins/sqs/plugin.go b/plugins/sqs/plugin.go
deleted file mode 100644
index 54f61ff5..00000000
--- a/plugins/sqs/plugin.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package sqs
-
-import (
- "github.com/spiral/roadrunner/v2/common/jobs"
- "github.com/spiral/roadrunner/v2/pkg/events"
- priorityqueue "github.com/spiral/roadrunner/v2/pkg/priority_queue"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/jobs/pipeline"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- pluginName string = "sqs"
-)
-
-type Plugin struct {
- log logger.Logger
- cfg config.Configurer
-}
-
-func (p *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- p.log = log
- p.cfg = cfg
- return nil
-}
-
-func (p *Plugin) Available() {}
-
-func (p *Plugin) Name() string {
- return pluginName
-}
-
-func (p *Plugin) JobsConstruct(configKey string, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return NewSQSConsumer(configKey, p.log, p.cfg, e, pq)
-}
-
-func (p *Plugin) FromPipeline(pipe *pipeline.Pipeline, e events.Handler, pq priorityqueue.Queue) (jobs.Consumer, error) {
- return FromPipeline(pipe, p.log, p.cfg, e, pq)
-}
diff --git a/plugins/static/config.go b/plugins/static/config.go
deleted file mode 100644
index c3f9c17d..00000000
--- a/plugins/static/config.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package static
-
-import (
- "os"
-
- "github.com/spiral/errors"
-)
-
-// Config describes file location and controls access to them.
-type Config struct {
- Static *struct {
- // Dir contains name of directory to control access to.
- // Default - "."
- Dir string
-
- // CalculateEtag can be true/false and used to calculate etag for the static
- CalculateEtag bool `mapstructure:"calculate_etag"`
-
- // Weak etag `W/`
- Weak bool
-
- // forbid specifies list of file extensions which are forbidden for access.
- // example: .php, .exe, .bat, .htaccess and etc.
- Forbid []string
-
- // Allow specifies list of file extensions which are allowed for access.
- // example: .php, .exe, .bat, .htaccess and etc.
- Allow []string
-
- // Request headers to add to every static.
- Request map[string]string
-
- // Response headers to add to every static.
- Response map[string]string
- }
-}
-
-// Valid returns nil if config is valid.
-func (c *Config) Valid() error {
- const op = errors.Op("static_plugin_valid")
- st, err := os.Stat(c.Static.Dir)
- if err != nil {
- if os.IsNotExist(err) {
- return errors.E(op, errors.Errorf("root directory '%s' does not exists", c.Static.Dir))
- }
-
- return err
- }
-
- if !st.IsDir() {
- return errors.E(op, errors.Errorf("invalid root directory '%s'", c.Static.Dir))
- }
-
- return nil
-}
diff --git a/plugins/static/etag.go b/plugins/static/etag.go
deleted file mode 100644
index 5ee0d2f3..00000000
--- a/plugins/static/etag.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package static
-
-import (
- "hash/crc32"
- "io"
- "net/http"
-
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-const etag string = "Etag"
-
-// weak Etag prefix
-var weakPrefix = []byte(`W/`)
-
-// CRC32 table
-var crc32q = crc32.MakeTable(0x48D90782)
-
-// SetEtag sets etag for the file
-func SetEtag(weak bool, f http.File, name string, w http.ResponseWriter) {
- // preallocate
- calculatedEtag := make([]byte, 0, 64)
-
- // write weak
- if weak {
- calculatedEtag = append(calculatedEtag, weakPrefix...)
- calculatedEtag = append(calculatedEtag, '"')
- calculatedEtag = appendUint(calculatedEtag, crc32.Checksum(utils.AsBytes(name), crc32q))
- calculatedEtag = append(calculatedEtag, '"')
-
- w.Header().Set(etag, utils.AsString(calculatedEtag))
- return
- }
-
- // read the file content
- body, err := io.ReadAll(f)
- if err != nil {
- return
- }
-
- // skip for 0 body
- if len(body) == 0 {
- return
- }
-
- calculatedEtag = append(calculatedEtag, '"')
- calculatedEtag = appendUint(calculatedEtag, uint32(len(body)))
- calculatedEtag = append(calculatedEtag, '-')
- calculatedEtag = appendUint(calculatedEtag, crc32.Checksum(body, crc32q))
- calculatedEtag = append(calculatedEtag, '"')
-
- w.Header().Set(etag, utils.AsString(calculatedEtag))
-}
-
-// appendUint appends n to dst and returns the extended dst.
-func appendUint(dst []byte, n uint32) []byte {
- var b [20]byte
- buf := b[:]
- i := len(buf)
- var q uint32
- for n >= 10 {
- i--
- q = n / 10
- buf[i] = '0' + byte(n-q*10)
- n = q
- }
- i--
- buf[i] = '0' + byte(n)
-
- dst = append(dst, buf[i:]...)
- return dst
-}
diff --git a/plugins/static/plugin.go b/plugins/static/plugin.go
deleted file mode 100644
index f6d9a0f2..00000000
--- a/plugins/static/plugin.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package static
-
-import (
- "net/http"
- "path"
- "strings"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-// PluginName contains default service name.
-const PluginName = "static"
-
-const RootPluginName = "http"
-
-// Plugin serves static files. Potentially convert into middleware?
-type Plugin struct {
- // server configuration (location, forbidden files and etc)
- cfg *Config
-
- log logger.Logger
-
- // root is initiated http directory
- root http.Dir
-
- // file extensions which are allowed to be served
- allowedExtensions map[string]struct{}
-
- // file extensions which are forbidden to be served
- forbiddenExtensions map[string]struct{}
-}
-
-// Init must return configure service and return true if service hasStatus enabled. Must return error in case of
-// misconfiguration. Services must not be used without proper configuration pushed first.
-func (s *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- const op = errors.Op("static_plugin_init")
- if !cfg.Has(RootPluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(RootPluginName, &s.cfg)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- if s.cfg.Static == nil {
- return errors.E(op, errors.Disabled)
- }
-
- s.log = log
- s.root = http.Dir(s.cfg.Static.Dir)
-
- err = s.cfg.Valid()
- if err != nil {
- return errors.E(op, err)
- }
-
- // create 2 hashmaps with the allowed and forbidden file extensions
- s.allowedExtensions = make(map[string]struct{}, len(s.cfg.Static.Allow))
- s.forbiddenExtensions = make(map[string]struct{}, len(s.cfg.Static.Forbid))
-
- // init forbidden
- for i := 0; i < len(s.cfg.Static.Forbid); i++ {
- // skip empty lines
- if s.cfg.Static.Forbid[i] == "" {
- continue
- }
- s.forbiddenExtensions[s.cfg.Static.Forbid[i]] = struct{}{}
- }
-
- // init allowed
- for i := 0; i < len(s.cfg.Static.Allow); i++ {
- // skip empty lines
- if s.cfg.Static.Allow[i] == "" {
- continue
- }
- s.allowedExtensions[s.cfg.Static.Allow[i]] = struct{}{}
- }
-
- // check if any forbidden items presented in the allowed
- // if presented, delete such items from allowed
- for k := range s.forbiddenExtensions {
- delete(s.allowedExtensions, k)
- }
-
- // at this point we have distinct allowed and forbidden hashmaps, also with alwaysServed
- return nil
-}
-
-func (s *Plugin) Name() string {
- return PluginName
-}
-
-// Middleware must return true if request/response pair is handled within the middleware.
-func (s *Plugin) Middleware(next http.Handler) http.Handler {
- // Define the http.HandlerFunc
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // do not allow paths like ../../resource
- // only specified folder and resources in it
- // https://lgtm.com/rules/1510366186013/
- if strings.Contains(r.URL.Path, "..") {
- w.WriteHeader(http.StatusForbidden)
- return
- }
-
- if s.cfg.Static.Request != nil {
- for k, v := range s.cfg.Static.Request {
- r.Header.Add(k, v)
- }
- }
-
- if s.cfg.Static.Response != nil {
- for k, v := range s.cfg.Static.Response {
- w.Header().Set(k, v)
- }
- }
-
- // first - create a proper file path
- fPath := path.Clean(r.URL.Path)
- ext := strings.ToLower(path.Ext(fPath))
-
- // check that file extension in the forbidden list
- if _, ok := s.forbiddenExtensions[ext]; ok {
- s.log.Debug("file extension is forbidden", "ext", ext)
- next.ServeHTTP(w, r)
- return
- }
-
- // if we have some allowed extensions, we should check them
- // if not - all extensions allowed except forbidden
- if len(s.allowedExtensions) > 0 {
- // not found in allowed
- if _, ok := s.allowedExtensions[ext]; !ok {
- next.ServeHTTP(w, r)
- return
- }
-
- // file extension allowed
- }
-
- // ok, file is not in the forbidden list
- // Stat it and get file info
- f, err := s.root.Open(fPath)
- if err != nil {
- // else no such file, show error in logs only in debug mode
- s.log.Debug("no such file or directory", "error", err)
- // pass request to the worker
- next.ServeHTTP(w, r)
- return
- }
-
- // at high confidence there is should not be an error
- // because we stat-ed the path previously and know, that that is file (not a dir), and it exists
- finfo, err := f.Stat()
- if err != nil {
- // else no such file, show error in logs only in debug mode
- s.log.Debug("no such file or directory", "error", err)
- // pass request to the worker
- next.ServeHTTP(w, r)
- return
- }
-
- defer func() {
- err = f.Close()
- if err != nil {
- s.log.Error("file close error", "error", err)
- }
- }()
-
- // if provided path to the dir, do not serve the dir, but pass the request to the worker
- if finfo.IsDir() {
- s.log.Debug("possible path to dir provided")
- // pass request to the worker
- next.ServeHTTP(w, r)
- return
- }
-
- // set etag
- if s.cfg.Static.CalculateEtag {
- SetEtag(s.cfg.Static.Weak, f, finfo.Name(), w)
- }
-
- // we passed all checks - serve the file
- http.ServeContent(w, r, finfo.Name(), finfo.ModTime(), f)
- })
-}
diff --git a/plugins/status/config.go b/plugins/status/config.go
deleted file mode 100644
index f751898b..00000000
--- a/plugins/status/config.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package status
-
-import "net/http"
-
-// Config is the configuration reference for the Status plugin
-type Config struct {
- // Address of the http server
- Address string
- // Status code returned in case of fail, 503 by default
- UnavailableStatusCode int `mapstructure:"unavailable_status_code"`
-}
-
-// InitDefaults configuration options
-func (c *Config) InitDefaults() {
- if c.UnavailableStatusCode == 0 {
- c.UnavailableStatusCode = http.StatusServiceUnavailable
- }
-}
diff --git a/plugins/status/interface.go b/plugins/status/interface.go
deleted file mode 100644
index 9d5a13af..00000000
--- a/plugins/status/interface.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package status
-
-// Status consists of status code from the service
-type Status struct {
- Code int
-}
-
-// Checker interface used to get latest status from plugin
-type Checker interface {
- Status() Status
-}
-
-// Readiness interface used to get readiness status from the plugin
-// that means, that worker poll inside the plugin has 1+ plugins which are ready to work
-// at the particular moment
-type Readiness interface {
- Ready() Status
-}
diff --git a/plugins/status/plugin.go b/plugins/status/plugin.go
deleted file mode 100644
index b76ad0a3..00000000
--- a/plugins/status/plugin.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package status
-
-import (
- "fmt"
- "net/http"
- "time"
-
- "github.com/gofiber/fiber/v2"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const (
- // PluginName declares public plugin name.
- PluginName = "status"
-)
-
-type Plugin struct {
- // plugins which needs to be checked just as Status
- statusRegistry map[string]Checker
- // plugins which needs to send Readiness status
- readyRegistry map[string]Readiness
- server *fiber.App
- log logger.Logger
- cfg *Config
-}
-
-func (c *Plugin) Init(log logger.Logger, cfg config.Configurer) error {
- const op = errors.Op("checker_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
- err := cfg.UnmarshalKey(PluginName, &c.cfg)
- if err != nil {
- return errors.E(op, errors.Disabled, err)
- }
-
- // init defaults for the status plugin
- c.cfg.InitDefaults()
-
- c.readyRegistry = make(map[string]Readiness)
- c.statusRegistry = make(map[string]Checker)
-
- c.log = log
-
- return nil
-}
-
-func (c *Plugin) Serve() chan error {
- errCh := make(chan error, 1)
- c.server = fiber.New(fiber.Config{
- ReadTimeout: time.Second * 5,
- WriteTimeout: time.Second * 5,
- IdleTimeout: time.Second * 5,
- DisableStartupMessage: true,
- })
-
- c.server.Use("/health", c.healthHandler)
- c.server.Use("/ready", c.readinessHandler)
-
- go func() {
- err := c.server.Listen(c.cfg.Address)
- if err != nil {
- errCh <- err
- }
- }()
-
- return errCh
-}
-
-func (c *Plugin) Stop() error {
- const op = errors.Op("checker_plugin_stop")
- err := c.server.Shutdown()
- if err != nil {
- return errors.E(op, err)
- }
- return nil
-}
-
-// status returns a Checker interface implementation
-// Reset named service. This is not an Status interface implementation
-func (c *Plugin) status(name string) (Status, error) {
- const op = errors.Op("checker_plugin_status")
- svc, ok := c.statusRegistry[name]
- if !ok {
- return Status{}, errors.E(op, errors.Errorf("no such plugin: %s", name))
- }
-
- return svc.Status(), nil
-}
-
-// ready used to provide a readiness check for the plugin
-func (c *Plugin) ready(name string) (Status, error) {
- const op = errors.Op("checker_plugin_ready")
- svc, ok := c.readyRegistry[name]
- if !ok {
- return Status{}, errors.E(op, errors.Errorf("no such plugin: %s", name))
- }
-
- return svc.Ready(), nil
-}
-
-// CollectCheckerImpls collects services which can provide Status.
-func (c *Plugin) CollectCheckerImpls(name endure.Named, r Checker) error {
- c.statusRegistry[name.Name()] = r
- return nil
-}
-
-// CollectReadinessImpls collects services which can provide Readiness check.
-func (c *Plugin) CollectReadinessImpls(name endure.Named, r Readiness) error {
- c.readyRegistry[name.Name()] = r
- return nil
-}
-
-// Collects declares services to be collected.
-func (c *Plugin) Collects() []interface{} {
- return []interface{}{
- c.CollectReadinessImpls,
- c.CollectCheckerImpls,
- }
-}
-
-// Name of the service.
-func (c *Plugin) Name() string {
- return PluginName
-}
-
-// Available interface implementation
-func (c *Plugin) Available() {}
-
-// RPC returns associated rpc service.
-func (c *Plugin) RPC() interface{} {
- return &rpc{srv: c, log: c.log}
-}
-
-type Plugins struct {
- Plugins []string `query:"plugin"`
-}
-
-const template string = "Service: %s: Status: %d\n"
-
-func (c *Plugin) healthHandler(ctx *fiber.Ctx) error {
- const op = errors.Op("checker_plugin_health_handler")
- plugins := &Plugins{}
- err := ctx.QueryParser(plugins)
- if err != nil {
- return errors.E(op, err)
- }
-
- if len(plugins.Plugins) == 0 {
- ctx.Status(http.StatusOK)
- _, _ = ctx.WriteString("No plugins provided in query. Query should be in form of: health?plugin=plugin1&plugin=plugin2 \n")
- return nil
- }
-
- // iterate over all provided plugins
- for i := 0; i < len(plugins.Plugins); i++ {
- // check if the plugin exists
- if plugin, ok := c.statusRegistry[plugins.Plugins[i]]; ok {
- st := plugin.Status()
- if st.Code >= 500 {
- // if there is 500 or 503 status code return immediately
- ctx.Status(c.cfg.UnavailableStatusCode)
- return nil
- } else if st.Code >= 100 && st.Code <= 400 {
- _, _ = ctx.WriteString(fmt.Sprintf(template, plugins.Plugins[i], st.Code))
- }
- } else {
- _, _ = ctx.WriteString(fmt.Sprintf("Service: %s not found", plugins.Plugins[i]))
- }
- }
-
- ctx.Status(http.StatusOK)
- return nil
-}
-
-// readinessHandler return 200OK if all plugins are ready to serve
-// if one of the plugins return status from the 5xx range, the status for all query will be 503
-func (c *Plugin) readinessHandler(ctx *fiber.Ctx) error {
- const op = errors.Op("checker_plugin_readiness_handler")
- plugins := &Plugins{}
- err := ctx.QueryParser(plugins)
- if err != nil {
- return errors.E(op, err)
- }
-
- if len(plugins.Plugins) == 0 {
- ctx.Status(http.StatusOK)
- _, _ = ctx.WriteString("No plugins provided in query. Query should be in form of: ready?plugin=plugin1&plugin=plugin2 \n")
- return nil
- }
-
- // iterate over all provided plugins
- for i := 0; i < len(plugins.Plugins); i++ {
- // check if the plugin exists
- if plugin, ok := c.readyRegistry[plugins.Plugins[i]]; ok {
- st := plugin.Ready()
- if st.Code >= 500 {
- // if there is 500 or 503 status code return immediately
- ctx.Status(c.cfg.UnavailableStatusCode)
- return nil
- } else if st.Code >= 100 && st.Code <= 400 {
- _, _ = ctx.WriteString(fmt.Sprintf(template, plugins.Plugins[i], st.Code))
- }
- } else {
- _, _ = ctx.WriteString(fmt.Sprintf("Service: %s not found", plugins.Plugins[i]))
- }
- }
-
- ctx.Status(http.StatusOK)
- return nil
-}
diff --git a/plugins/status/rpc.go b/plugins/status/rpc.go
deleted file mode 100644
index 755a06fa..00000000
--- a/plugins/status/rpc.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package status
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-type rpc struct {
- srv *Plugin
- log logger.Logger
-}
-
-// Status return current status of the provided plugin
-func (rpc *rpc) Status(service string, status *Status) error {
- const op = errors.Op("checker_rpc_status")
- rpc.log.Debug("started Status method", "service", service)
- st, err := rpc.srv.status(service)
- if err != nil {
- return errors.E(op, err)
- }
-
- *status = st
-
- rpc.log.Debug("status code", "code", st.Code)
- rpc.log.Debug("successfully finished the Status method")
- return nil
-}
-
-// Status return current status of the provided plugin
-func (rpc *rpc) Ready(service string, status *Status) error {
- const op = errors.Op("checker_rpc_ready")
- rpc.log.Debug("started Ready method", "service", service)
- st, err := rpc.srv.ready(service)
- if err != nil {
- return errors.E(op, err)
- }
-
- *status = st
-
- rpc.log.Debug("status code", "code", st.Code)
- rpc.log.Debug("successfully finished the Ready method")
- return nil
-}
diff --git a/plugins/websockets/commands/enums.go b/plugins/websockets/commands/enums.go
deleted file mode 100644
index 18c63be3..00000000
--- a/plugins/websockets/commands/enums.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package commands
-
-type Command string
-
-const (
- Leave string = "leave"
- Join string = "join"
- Headers string = "headers"
-)
diff --git a/plugins/websockets/config.go b/plugins/websockets/config.go
deleted file mode 100644
index 933a12e0..00000000
--- a/plugins/websockets/config.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package websockets
-
-import (
- "strings"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/pool"
-)
-
-/*
-websockets:
- broker: default
- allowed_origin: "*"
- path: "/ws"
-*/
-
-// Config represents configuration for the ws plugin
-type Config struct {
- // http path for the websocket
- Path string `mapstructure:"path"`
- AllowedOrigin string `mapstructure:"allowed_origin"`
- Broker string `mapstructure:"broker"`
-
- // wildcard origin
- allowedWOrigins []wildcard
- allowedOrigins []string
- allowedAll bool
-
- // Pool with the workers for the websockets
- Pool *pool.Config `mapstructure:"pool"`
-}
-
-// InitDefault initialize default values for the ws config
-func (c *Config) InitDefault() error {
- if c.Path == "" {
- c.Path = "/ws"
- }
-
- // broker is mandatory
- if c.Broker == "" {
- return errors.Str("broker key should be specified")
- }
-
- if c.Pool == nil {
- c.Pool = &pool.Config{}
- if c.Pool.NumWorkers == 0 {
- // 2 workers by default
- c.Pool.NumWorkers = 2
- }
-
- if c.Pool.AllocateTimeout == 0 {
- c.Pool.AllocateTimeout = time.Minute
- }
-
- if c.Pool.DestroyTimeout == 0 {
- c.Pool.DestroyTimeout = time.Minute
- }
- if c.Pool.Supervisor != nil {
- c.Pool.Supervisor.InitDefaults()
- }
- }
-
- if c.AllowedOrigin == "" {
- c.AllowedOrigin = "*"
- }
-
- // Normalize
- origin := strings.ToLower(c.AllowedOrigin)
- if origin == "*" {
- // If "*" is present in the list, turn the whole list into a match all
- c.allowedAll = true
- return nil
- } else if i := strings.IndexByte(origin, '*'); i >= 0 {
- // Split the origin in two: start and end string without the *
- w := wildcard{origin[0:i], origin[i+1:]}
- c.allowedWOrigins = append(c.allowedWOrigins, w)
- } else {
- c.allowedOrigins = append(c.allowedOrigins, origin)
- }
-
- return nil
-}
diff --git a/plugins/websockets/connection/connection.go b/plugins/websockets/connection/connection.go
deleted file mode 100644
index 04c29d83..00000000
--- a/plugins/websockets/connection/connection.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package connection
-
-import (
- "sync"
-
- "github.com/fasthttp/websocket"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-// Connection represents wrapped and safe to use from the different threads websocket connection
-type Connection struct {
- sync.RWMutex
- log logger.Logger
- conn *websocket.Conn
-}
-
-func NewConnection(wsConn *websocket.Conn, log logger.Logger) *Connection {
- return &Connection{
- conn: wsConn,
- log: log,
- }
-}
-
-func (c *Connection) Write(data []byte) error {
- c.Lock()
- defer c.Unlock()
-
- const op = errors.Op("websocket_write")
- // handle a case when a goroutine tried to write into the closed connection
- defer func() {
- if r := recover(); r != nil {
- c.log.Warn("panic handled, tried to write into the closed connection")
- }
- }()
-
- err := c.conn.WriteMessage(websocket.TextMessage, data)
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
-
-func (c *Connection) Read() (int, []byte, error) {
- const op = errors.Op("websocket_read")
-
- mt, data, err := c.conn.ReadMessage()
- if err != nil {
- return -1, nil, errors.E(op, err)
- }
-
- return mt, data, nil
-}
-
-func (c *Connection) Close() error {
- c.Lock()
- defer c.Unlock()
- const op = errors.Op("websocket_close")
-
- err := c.conn.Close()
- if err != nil {
- return errors.E(op, err)
- }
-
- return nil
-}
diff --git a/plugins/websockets/doc/broadcast.drawio b/plugins/websockets/doc/broadcast.drawio
deleted file mode 100644
index 230870f2..00000000
--- a/plugins/websockets/doc/broadcast.drawio
+++ /dev/null
@@ -1 +0,0 @@
-<mxfile host="Electron" modified="2021-05-27T20:56:56.848Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/14.5.1 Chrome/89.0.4389.128 Electron/12.0.9 Safari/537.36" etag="Pt0MY_-SPz7R7foQA1VL" version="14.5.1" type="device"><diagram id="fD2kwGC0DAS2S_q_IsmE" name="Page-1">7V1Zc9rIFv411HUeULV28WgwTjxjx06wJ8l9mRKoAU2ExEjCS3797W4t9IbAoMZLrqcqg1oLrT77OV8fOuZg8fgx9ZfzqySAUccAwWPHPOsYhmECC/0PjzwVI7ppesXILA2Dcmw9MAp/wXIQlKOrMIAZc2GeJFEeLtnBSRLHcJIzY36aJg/sZdMkYr916c+gMDCa+JE4+i0M8nkxagIA1ic+wXA2z/kzC7+6uhzI5n6QPFBD5rBjDtIkyYtPi8cBjPDyVQtT3He+4Ww9sxTG+S43PFlXl7c/+n48uTWjn1/SEMSn3V45t/ypemMYoAUoD5M0nyezJPaj4Xq0vx69TJIlukxHg//APH8q6eev8gQNzfNFVJ6Fj2H+nfr8A30GmmGXh2eYYUB18FQdxHn69J0+oG/Dx+v7yFF1o7gy5WJlySqdwIblqFjMT2cwb7qupCleLOobyoX/CJMFRBNCF6Qw8vPwnuUmv2TKWX1dfetNEqI5G6CUIN2y7eKeSoB0y2AfUsy1vG9NfvSBmsh6iDCFnEGuvFUc/53d9od/zbI/xr9On7y7rm7swSFpsooDGJTkOIBfKB75QZ3axi8Ui/xgOETOLxlaw/wUaws0ECcxrMbOQ7xYFE9x9FbLZNaBLMWwwrPp7hQPvvejVflVlpQTLv0xUvoM9fwonMXo8wQtDUzRwD1M8xAp1dPyxCIMgoJRYBb+8sfkeXiVl5iNyavY/Y59Vq87fgB8ZN641PjlzWs1S1OkgaPFdS0f3wUacD2PkbqKZPtKc/WYHntHMp1mUInUms7LSq1LSy3YUWpZLe9u0/IvL7XSpTeclxTbyjpQYvsV+gEa+Tz8fnvyATsmMMuw14O8oTRZ4Fea44PlapytxnhOMb48g3GQ4W/LictVX/aQpD9his8skR/2PjSCuYFk5eOBpjuGyyoE4zCFUOka1+H0jHssDWHrAqPIdcZ7oybS77Zlm+y6m62Qk+UR3VNAyybVRJHy27A/uh78ObwdofFPd2iVwclo+PWvIfpwfvd58EEg9cM8zOFo6RMd+ICiOZbkU6RTB0mUpORqM/ChN50QhZsmPyF1xpl4cDzFdyRxTo0D8tfknAs8sJGEumFxXrFhlwL5sA7SdKvkmDkVnzlAlb/0igzuzm7ys7zkFm2psaMt1e2XtKWiBzzy7+FaNYVJXJvLB58Yyili98pSjtPEDyZ+lq9tbvY+NGyzdALNNAxWPtsyl6brauyTLbsaOILJNAWG8H4HgiKLaRkeR9EDI6Ly0Y6hWazVdLRqmdUT1Ph/buOlNPvB+bKDNLttCZIsT4S+OUk2mpMbiHt6wFChm3X2qci3UJHsaOI52v3Fbu8VocMDWjGBtM9zdqc2/k/q7JI/0dkt/tpydh1W9TpeyQy0rwskvq6lzNcFwooeUW/uWUOgEkq1ut1Vb04iP8vCCac69XZVpyWqzs3R3qGVBvRm/hN1QambNos3qLiuruSVeuR89ztc0HyHaTffgT4U8243V/rCXsDuTsAutnxvR+GtcrKYhelxXGT1elqP+uMChg0FtOdKSM/hmbfXzO7CPJ99g+5tESiDF0HuDjUCVUkxZZDt9+Fdmc3eFQqUXNdhS0ftZBaRyXW0qgBXh2A69xx1LpboL38dnl2M8PcY4OJz92p4df31x6F+1hQ6E2lSMXB7Y5I8VJhU7AFOVoyeJKdoKPKzTOvvs+ki/1N/7D1G/t/9weWPv8r0F7Ps5h626hDrxPha7o6+ls7Yne2VvP0tTeWJ7mZqBPPWVPKxqpCw4gf7MEk+QhlA1Ltfb9CjQP9udKBoQj2woSsTzZ7jmr4kBGo13+/slO+XyWYb+f7B2Y9o8V83eFwtT/8MgssvZ3nW3afAvo8g7pmsf369ZichlC7FrngWY0cZbD0HJJ21JwjLoMQQMvXuj9d4ynGwLMX4hCmaI09iGs7EatrRvBuaXk18urMX1OTcOI7L6sS28vo9U+PTy0dLAleahGIE9+Xouae3upn0DSbO0S2dXXWrFXLqntbj4BK6oTmGAoo26SOKoAI9/WxZoIWn4SOO4mmiiqQRyBwuCGyY2LxSdevGevwsXMzQzKNwjP71f61SiF9yBmOY+mj2532MS4aplt3PWjKQllAYs3UxS+jVF9Em0lXmvu6TWGktlbIr6kx7Vlpwfytp7mgl24cWH0REQ3Qq5WQ9urlr5LnD7R3QDNNWUvTk4gqkGlWUSpqYkCLmzV3/8mL0SSDpM+MDc+IBIPMv+6Bny0L3KflrKz5wdI2DHCDbtqsCNJUpQNES7QMTeuUqcfcK8z5q0xbVZhNn7x7gG5xog31k+9llFBfwbFr6RRtzuNvuYHO4ivINTaShlcmnmwMViQ9swwikgeqZfq5ekfCJBssVtYg00dCGEpGuck9Y5S46vFvOUj8QwGgkcH1ADOhEaF79cYo+zfAnfE+JXitx3HCcJZOfmDOYR0hvxNJMrlssCrhbhBURFhY/COMZfu3iRWJ0STGwRryxMHPZ5ASeQeTK5a5EBKf5YY4EVpZZ6bO3xDWmy1Xoq2OKaUxHwjS2MssjMA2BRAyiEJZVrmeIaBtLZLlsWcSWgBg894i2WRSrii+XzOI4/67wBs3+AhmgELHaKToLlo/oX7IyoBjv5tgw43MWdQ6zcbfkXHyu9IOZ0wGcJKlPZI9cg8vKaRQiK1p/9VpMSsGpBq7qnRxIW64m+QpDXYpr0JqM+fvQ2JIfm6f8yF6vX4glPunhk/zMb5NlOCH3nBWTJTpi86xUzKGP7UkqTAKcpDAIkVIaEKW1SNKnD6KGuvGfooRsoynuTv0H9O/4KYdSdXbcNxvUavnkH6TuileJINH2AyJPyFKk2Qf5vDhdwGtWwti8+k3QVdOIOHxT4uQJmRB8fO4vwgjL+ycY3UP81Pb1isd7R6YHtCp5SKtfmc32epprK9IvkmKM6PrHAec3SzP/Haaatq2YJodJNVfu9nfPJVmNJnW71T2nSGY3GMxDM5Suw/t5vCku3lxAhUiSnTr/KMA9SvEO7X1KworAePvt53/WRs+2oXhyFK3I1ZsrWkfIzMm3egoKZhgHxAVAUkRunyRRVBa05tjEgQUFiS03rsDHZZLhwyJ+ID76PM+xnVpGK2yVXizZ1ypyp9GMdIGGTQQjx23VtnTd1mzu0dXxEbY4CEzy6fYWBeng5vLu48VngbisYtgSGAgk56vONvQCSxbMe8bYJMDpjZQWxHazDwAqW1uHXzvG7Mo2A7rCql8slhFcQJIcAjer8YhstQ7xuk3x8u4eAbchX9IY+CAiiGBGS0IEV0IEQxURJNlXtPBRmM07TIV/XAYGb0zNbRMKE2gAWJZnuLbnGIDbm6yO9y051vNV+yPPw9ao21Il52MJnFpN54n9UtoOJ/qeswWVLNywBfeMXOxe0w1qUMyWuP13UOdJL87eyVZea8MO67VjZFoWB/ppCSUCND5teiynyBbt8waswjukJ+C38raEUDcNjQuqu0drZqKLsdAn5GF0l8i7StIFCtqwoe+PbgmFk7RImj6E+bz2AnKcoszqsAgFmckk9HMYdOj6iEzuW3WZp94EyjHwY8/GSYp2XGabQzZ45ku7zJYcdPfKG2i4r9pxkNTOX2PLKhGJ+5GUJf1JvsJ4OfCQdZtKlOj//gLLWTzOSLK66MiRIkEkK1el7pG+Kr3+xTtR9u42ZW/x+7LbQTDZumZ5ki11VVMOXVPR+0huy8VCXmUNfgPymlzyua3N/JZmHY2Ckl1Ot3OMLOhm/pSkI/1s3l34S95k/4RP2GB3RQtdGnHyxOoSRokotuIO8Hu6dLuMceY6O6BYDrPurskGWK4n4hHqDsttQ4HlNBY97nUupm5I+MZzYLrt8sVI2TYl2cKrS4KJxrXYEzZeETnJc38yJ04u3eORtHeUZ//fHFEsfl+nqYtEMWT9M9QRRbRZBVEmBYyKqKsKn5VptaS8eVK4HCks3daALoqId0xqSBDulY8ZhPc8gCNb+nE1RjBVlZwgN/snMTZsVplFv617r57UeJRpCKOABmfQX0ENs5OReLwYyJJW9T4/Z76Okm5K5jekvo8OvzuIrTyhLUJPDGZNU8JSbYDL5NXGV1STV9Vk/8jBbHXh9p6RL9o23ZRkopB0Rljw/rjGBVes5y+Hp6QVawmolWDIRnf90eDrRR9ddX73mTp6D4GNubUajzxotkPvgZvvyycbKK5htzzyKkBhoCqGORv6aL03egJNdw122VtquNw1WC7Rdc1V0ROlUSPRAJzvw8Hd7fVXUZ7LJuwUKv6kEn7ck73A06sMSfWxr0NDFpIC4AxPzxWHpHzHZk9io6Vun7KEsyHW99RSwEdBhiFvI3he9spQmhSwPb7gptc/VvRiaX9DTAy8byo4EipIdioemQpiluB9U8GVUMF4cSrI9qS8Zyp4Eiq8eCHSfOFC5H5NoRXt2G0ldKtI+spDN0vcHF/UIetWP0yJge0DtMpyclhgDMZPHQZXIOWot+fmb61H2U6V8D7YsXc1dsOgfrxATXQMKyjpb0BBg2vx1E7BuKv3+Mi7q6R7pfy15T/T8KqxJVwPxVeWjdtdpR/6Cy77YVI9j+PiyofYhDE1gJBFfu4dHnuDIlSqmF8sjBSHkWHNE1VBp0zTMk3uw2AHhNvb1HXeNl3nuNxvj7Rju1yDZSTjaIZLjOJ0eSHiHRLTtjwFtOxSP2RR2a2jAWMqJfvbxIM9STwo20Z03HhQTPPyv4EJTj5eiz1A31x53uWQEj3ZPrqjolcsMSNyCWcwDkg3hE19F0ihXNJlgW/HQYrZG7ts0FV4XeuQXXs8YIm1sqT7EfCXy+fW8hXN2sCzptA+r2JSJp7UunFJhVngIUg1fAI36gynIT7PYh7C9X7lRTbrUEDfV/GeFssyslejO4+EcZdrPnLE+dt4rt9qwFGHArVgHOYa1LKJ8av9rJ/R15W/MYzCpYD1MSnPs/QDyHCFg1H3eg7LcstVNicVuDUevFP9rvF/KKX+ZQVXNXRmXDrME/QYGKicrYtnWzVqqh1z0qqGmi6BHq3bUqubjkemQ0cWqyVpPcYJ7EPJyKs4/Jcs293dxRmxCRn6hvXV+Gcy8XUFZIn8aGZNEOpxVMRykq/RwB9UvmoPv+rwEU6QaU1L3u8YjZ3MJK3TVMxMB3hq59SPi27EkjBzIhDPlIC+AkKx8xQpmfuaGufMC/VHt91qG5TCd8HGlFXlWIVcX18Wd90U0jfjOtMVSVd59Frv3eJfR+l7FOYV5qs0Ltmd4+FCjyicAbGlTXtlhDXJBWA9kTGFcyR28FvDXhyeYeVzeeO+NY98le23s3sy15rPe+/gW2MxTzAV1sEoCgXnV0mAg/7h/wA=</diagram></mxfile> \ No newline at end of file
diff --git a/plugins/websockets/doc/doc.go b/plugins/websockets/doc/doc.go
deleted file mode 100644
index fc214be8..00000000
--- a/plugins/websockets/doc/doc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package doc
-
-/*
-RPC message structure:
-
-type Msg struct {
- // Topic message been pushed into.
- Topics_ []string `json:"topic"`
-
- // Command (join, leave, headers)
- Command_ string `json:"command"`
-
- // Broker (redis, memory)
- Broker_ string `json:"broker"`
-
- // Payload to be broadcasted
- Payload_ []byte `json:"payload"`
-}
-
-1. Topics - string array (slice) with topics to join or leave
-2. Command - string, command to apply on the provided topics
-3. Broker - string, pub-sub broker to use, for the one-node systems might be used `memory` broker or `redis`. For the multi-node -
-`redis` broker should be used.
-4. Payload - raw byte array to send to the subscribers (binary messages).
-
-
-*/
diff --git a/plugins/websockets/executor/executor.go b/plugins/websockets/executor/executor.go
deleted file mode 100644
index c1f79a78..00000000
--- a/plugins/websockets/executor/executor.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package executor
-
-import (
- "fmt"
- "net/http"
- "sync"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/websockets/commands"
- "github.com/spiral/roadrunner/v2/plugins/websockets/connection"
- "github.com/spiral/roadrunner/v2/plugins/websockets/validator"
- websocketsv1 "github.com/spiral/roadrunner/v2/proto/websockets/v1beta"
-)
-
-type Response struct {
- Topic string `json:"topic"`
- Payload []string `json:"payload"`
-}
-
-type Executor struct {
- sync.Mutex
- // raw ws connection
- conn *connection.Connection
- log logger.Logger
-
- // associated connection ID
- connID string
-
- // subscriber drivers
- sub pubsub.Subscriber
- actualTopics map[string]struct{}
-
- req *http.Request
- accessValidator validator.AccessValidatorFn
-}
-
-// NewExecutor creates protected connection and starts command loop
-func NewExecutor(conn *connection.Connection, log logger.Logger,
- connID string, sub pubsub.Subscriber, av validator.AccessValidatorFn, r *http.Request) *Executor {
- return &Executor{
- conn: conn,
- connID: connID,
- log: log,
- sub: sub,
- accessValidator: av,
- actualTopics: make(map[string]struct{}, 10),
- req: r,
- }
-}
-
-func (e *Executor) StartCommandLoop() error { //nolint:gocognit
- const op = errors.Op("executor_command_loop")
- for {
- mt, data, err := e.conn.Read()
- if err != nil {
- if mt == -1 {
- e.log.Info("socket was closed", "reason", err, "message type", mt)
- return nil
- }
-
- return errors.E(op, err)
- }
-
- msg := &websocketsv1.Message{}
-
- err = json.Unmarshal(data, msg)
- if err != nil {
- e.log.Error("unmarshal message", "error", err)
- continue
- }
-
- // nil message, continue
- if msg == nil {
- e.log.Warn("nil message, skipping")
- continue
- }
-
- switch msg.Command {
- // handle leave
- case commands.Join:
- e.log.Debug("received join command", "msg", msg)
-
- val, err := e.accessValidator(e.req, msg.Topics...)
- if err != nil {
- if val != nil {
- e.log.Debug("validation error", "status", val.Status, "headers", val.Header, "body", val.Body)
- }
-
- resp := &Response{
- Topic: "#join",
- Payload: msg.Topics,
- }
-
- packet, errJ := json.Marshal(resp)
- if errJ != nil {
- e.log.Error("marshal the body", "error", errJ)
- return errors.E(op, fmt.Errorf("%v,%v", err, errJ))
- }
-
- errW := e.conn.Write(packet)
- if errW != nil {
- e.log.Error("write payload to the connection", "payload", packet, "error", errW)
- return errors.E(op, fmt.Errorf("%v,%v", err, errW))
- }
-
- continue
- }
-
- resp := &Response{
- Topic: "@join",
- Payload: msg.Topics,
- }
-
- packet, err := json.Marshal(resp)
- if err != nil {
- e.log.Error("marshal the body", "error", err)
- return errors.E(op, err)
- }
-
- err = e.conn.Write(packet)
- if err != nil {
- e.log.Error("write payload to the connection", "payload", packet, "error", err)
- return errors.E(op, err)
- }
-
- // subscribe to the topic
- err = e.Set(msg.Topics)
- if err != nil {
- return errors.E(op, err)
- }
-
- // handle leave
- case commands.Leave:
- e.log.Debug("received leave command", "msg", msg)
-
- // prepare response
- resp := &Response{
- Topic: "@leave",
- Payload: msg.Topics,
- }
-
- packet, err := json.Marshal(resp)
- if err != nil {
- e.log.Error("marshal the body", "error", err)
- return errors.E(op, err)
- }
-
- err = e.conn.Write(packet)
- if err != nil {
- e.log.Error("write payload to the connection", "payload", packet, "error", err)
- return errors.E(op, err)
- }
-
- err = e.Leave(msg.Topics)
- if err != nil {
- return errors.E(op, err)
- }
-
- case commands.Headers:
-
- default:
- e.log.Warn("unknown command", "command", msg.Command)
- }
- }
-}
-
-func (e *Executor) Set(topics []string) error {
- // associate connection with topics
- err := e.sub.Subscribe(e.connID, topics...)
- if err != nil {
- e.log.Error("subscribe to the provided topics", "topics", topics, "error", err.Error())
- // in case of error, unsubscribe connection from the dead topics
- _ = e.sub.Unsubscribe(e.connID, topics...)
- return err
- }
-
- // save topics for the connection
- for i := 0; i < len(topics); i++ {
- e.actualTopics[topics[i]] = struct{}{}
- }
-
- return nil
-}
-
-func (e *Executor) Leave(topics []string) error {
- // remove associated connections from the storage
- err := e.sub.Unsubscribe(e.connID, topics...)
- if err != nil {
- e.log.Error("subscribe to the provided topics", "topics", topics, "error", err.Error())
- return err
- }
-
- // remove topics for the connection
- for i := 0; i < len(topics); i++ {
- delete(e.actualTopics, topics[i])
- }
-
- return nil
-}
-
-func (e *Executor) CleanUp() {
- // unsubscribe particular connection from the topics
- for topic := range e.actualTopics {
- _ = e.sub.Unsubscribe(e.connID, topic)
- }
-
- // clean up the actualTopics data
- for k := range e.actualTopics {
- delete(e.actualTopics, k)
- }
-}
diff --git a/plugins/websockets/origin.go b/plugins/websockets/origin.go
deleted file mode 100644
index c6d9c9b8..00000000
--- a/plugins/websockets/origin.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package websockets
-
-import (
- "strings"
-)
-
-func isOriginAllowed(origin string, cfg *Config) bool {
- if cfg.allowedAll {
- return true
- }
-
- origin = strings.ToLower(origin)
- // simple case
- origin = strings.ToLower(origin)
- for _, o := range cfg.allowedOrigins {
- if o == origin {
- return true
- }
- }
- // check wildcards
- for _, w := range cfg.allowedWOrigins {
- if w.match(origin) {
- return true
- }
- }
-
- return false
-}
diff --git a/plugins/websockets/origin_test.go b/plugins/websockets/origin_test.go
deleted file mode 100644
index bbc49bbb..00000000
--- a/plugins/websockets/origin_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package websockets
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestConfig_Origin(t *testing.T) {
- cfg := &Config{
- AllowedOrigin: "*",
- Broker: "any",
- }
-
- err := cfg.InitDefault()
- assert.NoError(t, err)
-
- assert.True(t, isOriginAllowed("http://some.some.some.sssome", cfg))
- assert.True(t, isOriginAllowed("http://", cfg))
- assert.True(t, isOriginAllowed("http://google.com", cfg))
- assert.True(t, isOriginAllowed("ws://*", cfg))
- assert.True(t, isOriginAllowed("*", cfg))
- assert.True(t, isOriginAllowed("you are bad programmer", cfg)) // True :(
- assert.True(t, isOriginAllowed("****", cfg))
- assert.True(t, isOriginAllowed("asde!@#!!@#!%", cfg))
- assert.True(t, isOriginAllowed("http://*.domain.com", cfg))
-}
-
-func TestConfig_OriginWildCard(t *testing.T) {
- cfg := &Config{
- AllowedOrigin: "https://*my.site.com",
- Broker: "any",
- }
-
- err := cfg.InitDefault()
- assert.NoError(t, err)
-
- assert.True(t, isOriginAllowed("https://my.site.com", cfg))
- assert.False(t, isOriginAllowed("http://", cfg))
- assert.False(t, isOriginAllowed("http://google.com", cfg))
- assert.False(t, isOriginAllowed("ws://*", cfg))
- assert.False(t, isOriginAllowed("*", cfg))
- assert.False(t, isOriginAllowed("you are bad programmer", cfg)) // True :(
- assert.False(t, isOriginAllowed("****", cfg))
- assert.False(t, isOriginAllowed("asde!@#!!@#!%", cfg))
- assert.False(t, isOriginAllowed("http://*.domain.com", cfg))
-
- assert.False(t, isOriginAllowed("https://*site.com", cfg))
- assert.True(t, isOriginAllowed("https://some.my.site.com", cfg))
-}
-
-func TestConfig_OriginWildCard2(t *testing.T) {
- cfg := &Config{
- AllowedOrigin: "https://my.*.com",
- Broker: "any",
- }
-
- err := cfg.InitDefault()
- assert.NoError(t, err)
-
- assert.True(t, isOriginAllowed("https://my.site.com", cfg))
- assert.False(t, isOriginAllowed("http://", cfg))
- assert.False(t, isOriginAllowed("http://google.com", cfg))
- assert.False(t, isOriginAllowed("ws://*", cfg))
- assert.False(t, isOriginAllowed("*", cfg))
- assert.False(t, isOriginAllowed("you are bad programmer", cfg)) // True :(
- assert.False(t, isOriginAllowed("****", cfg))
- assert.False(t, isOriginAllowed("asde!@#!!@#!%", cfg))
- assert.False(t, isOriginAllowed("http://*.domain.com", cfg))
-
- assert.False(t, isOriginAllowed("https://*site.com", cfg))
- assert.True(t, isOriginAllowed("https://my.bad.com", cfg))
-}
diff --git a/plugins/websockets/plugin.go b/plugins/websockets/plugin.go
deleted file mode 100644
index 395b056f..00000000
--- a/plugins/websockets/plugin.go
+++ /dev/null
@@ -1,370 +0,0 @@
-package websockets
-
-import (
- "context"
- "net/http"
- "sync"
- "time"
-
- "github.com/fasthttp/websocket"
- "github.com/google/uuid"
- json "github.com/json-iterator/go"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- phpPool "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/http/attributes"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/websockets/connection"
- "github.com/spiral/roadrunner/v2/plugins/websockets/executor"
- "github.com/spiral/roadrunner/v2/plugins/websockets/pool"
- "github.com/spiral/roadrunner/v2/plugins/websockets/validator"
-)
-
-const (
- PluginName string = "websockets"
-
- RrMode string = "RR_MODE"
- RrBroadcastPath string = "RR_BROADCAST_PATH"
-)
-
-type Plugin struct {
- sync.RWMutex
-
- // subscriber+reader interfaces
- subReader pubsub.SubReader
- // broadcaster
- broadcaster broadcast.Broadcaster
-
- cfg *Config
- log logger.Logger
-
- // global connections map
- connections sync.Map
-
- // GO workers pool
- workersPool *pool.WorkersPool
-
- wsUpgrade *websocket.Upgrader
- serveExit chan struct{}
-
- // workers pool
- phpPool phpPool.Pool
- // server which produces commands to the pool
- server server.Server
-
- // stop receiving messages
- cancel context.CancelFunc
- ctx context.Context
-
- // function used to validate access to the requested resource
- accessValidator validator.AccessValidatorFn
-}
-
-func (p *Plugin) Init(cfg config.Configurer, log logger.Logger, server server.Server, b broadcast.Broadcaster) error {
- const op = errors.Op("websockets_plugin_init")
- if !cfg.Has(PluginName) {
- return errors.E(op, errors.Disabled)
- }
-
- err := cfg.UnmarshalKey(PluginName, &p.cfg)
- if err != nil {
- return errors.E(op, err)
- }
-
- err = p.cfg.InitDefault()
- if err != nil {
- return errors.E(op, err)
- }
-
- p.wsUpgrade = &websocket.Upgrader{
- HandshakeTimeout: time.Second * 60,
- ReadBufferSize: 1024,
- WriteBufferSize: 1024,
- CheckOrigin: func(r *http.Request) bool {
- return isOriginAllowed(r.Header.Get("Origin"), p.cfg)
- },
- }
- p.serveExit = make(chan struct{})
- p.server = server
- p.log = log
- p.broadcaster = b
-
- ctx, cancel := context.WithCancel(context.Background())
- p.ctx = ctx
- p.cancel = cancel
- return nil
-}
-
-func (p *Plugin) Serve() chan error {
- const op = errors.Op("websockets_plugin_serve")
- errCh := make(chan error, 1)
- // init broadcaster
- var err error
- p.subReader, err = p.broadcaster.GetDriver(p.cfg.Broker)
- if err != nil {
- errCh <- errors.E(op, err)
- return errCh
- }
-
- go func() {
- var err error
- p.Lock()
- defer p.Unlock()
-
- p.phpPool, err = p.server.NewWorkerPool(context.Background(), &phpPool.Config{
- Debug: p.cfg.Pool.Debug,
- NumWorkers: p.cfg.Pool.NumWorkers,
- MaxJobs: p.cfg.Pool.MaxJobs,
- AllocateTimeout: p.cfg.Pool.AllocateTimeout,
- DestroyTimeout: p.cfg.Pool.DestroyTimeout,
- Supervisor: p.cfg.Pool.Supervisor,
- }, map[string]string{RrMode: "http", RrBroadcastPath: p.cfg.Path})
- if err != nil {
- errCh <- errors.E(op, err)
- return
- }
-
- p.accessValidator = p.defaultAccessValidator(p.phpPool)
- }()
-
- p.workersPool = pool.NewWorkersPool(p.subReader, &p.connections, p.log)
-
- // we need here only Reader part of the interface
- go func(ps pubsub.Reader) {
- for {
- data, err := ps.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
-
- errCh <- errors.E(op, err)
- return
- }
-
- p.workersPool.Queue(data)
- }
- }(p.subReader)
-
- return errCh
-}
-
-func (p *Plugin) Stop() error {
- // close workers pool
- p.workersPool.Stop()
- // cancel context
- p.cancel()
- p.Lock()
- if p.phpPool == nil {
- p.Unlock()
- return nil
- }
- p.phpPool.Destroy(context.Background())
- p.Unlock()
-
- return nil
-}
-
-func (p *Plugin) Available() {}
-
-func (p *Plugin) Name() string {
- return PluginName
-}
-
-func (p *Plugin) Middleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path != p.cfg.Path {
- next.ServeHTTP(w, r)
- return
- }
-
- // we need to lock here, because accessValidator might not be set in the Serve func at the moment
- p.RLock()
- // before we hijacked connection, we still can write to the response headers
- val, err := p.accessValidator(r)
- p.RUnlock()
- if err != nil {
- p.log.Error("access validation")
- w.WriteHeader(400)
- return
- }
-
- if val.Status != http.StatusOK {
- for k, v := range val.Header {
- for i := 0; i < len(v); i++ {
- w.Header().Add(k, v[i])
- }
- }
- w.WriteHeader(val.Status)
- _, _ = w.Write(val.Body)
- return
- }
-
- // upgrade connection to websocket connection
- _conn, err := p.wsUpgrade.Upgrade(w, r, nil)
- if err != nil {
- // connection hijacked, do not use response.writer or request
- p.log.Error("upgrade connection", "error", err)
- return
- }
-
- // construct safe connection protected by mutexes
- safeConn := connection.NewConnection(_conn, p.log)
- // generate UUID from the connection
- connectionID := uuid.NewString()
- // store connection
- p.connections.Store(connectionID, safeConn)
-
- // Executor wraps a connection to have a safe abstraction
- e := executor.NewExecutor(safeConn, p.log, connectionID, p.subReader, p.accessValidator, r)
- p.log.Info("websocket client connected", "uuid", connectionID)
-
- err = e.StartCommandLoop()
- if err != nil {
- p.log.Error("command loop error, disconnecting", "error", err.Error())
- return
- }
-
- // when exiting - delete the connection
- p.connections.Delete(connectionID)
-
- // remove connection from all topics from all pub-sub drivers
- e.CleanUp()
-
- err = r.Body.Close()
- if err != nil {
- p.log.Error("body close", "error", err)
- }
-
- // close the connection on exit
- err = safeConn.Close()
- if err != nil {
- p.log.Error("connection close", "error", err)
- }
-
- safeConn = nil
- p.log.Info("disconnected", "connectionID", connectionID)
- })
-}
-
-// Workers returns slice with the process states for the workers
-func (p *Plugin) Workers() []*process.State {
- p.RLock()
- defer p.RUnlock()
-
- workers := p.workers()
-
- ps := make([]*process.State, 0, len(workers))
- for i := 0; i < len(workers); i++ {
- state, err := process.WorkerProcessState(workers[i])
- if err != nil {
- return nil
- }
- ps = append(ps, state)
- }
-
- return ps
-}
-
-// internal
-func (p *Plugin) workers() []worker.BaseProcess {
- return p.phpPool.Workers()
-}
-
-// Reset destroys the old pool and replaces it with new one, waiting for old pool to die
-func (p *Plugin) Reset() error {
- p.Lock()
- defer p.Unlock()
- const op = errors.Op("ws_plugin_reset")
- p.log.Info("WS plugin got restart request. Restarting...")
- p.phpPool.Destroy(context.Background())
- p.phpPool = nil
-
- var err error
- p.phpPool, err = p.server.NewWorkerPool(context.Background(), &phpPool.Config{
- Debug: p.cfg.Pool.Debug,
- NumWorkers: p.cfg.Pool.NumWorkers,
- MaxJobs: p.cfg.Pool.MaxJobs,
- AllocateTimeout: p.cfg.Pool.AllocateTimeout,
- DestroyTimeout: p.cfg.Pool.DestroyTimeout,
- Supervisor: p.cfg.Pool.Supervisor,
- }, map[string]string{RrMode: "http", RrBroadcastPath: p.cfg.Path})
- if err != nil {
- return errors.E(op, err)
- }
-
- // attach validators
- p.accessValidator = p.defaultAccessValidator(p.phpPool)
-
- p.log.Info("WS plugin successfully restarted")
- return nil
-}
-
-func (p *Plugin) defaultAccessValidator(pool phpPool.Pool) validator.AccessValidatorFn {
- return func(r *http.Request, topics ...string) (*validator.AccessValidator, error) {
- const op = errors.Op("access_validator")
-
- p.log.Debug("validation", "topics", topics)
- r = attributes.Init(r)
-
- // if channels len is eq to 0, we use serverValidator
- if len(topics) == 0 {
- ctx, err := validator.ServerAccessValidator(r)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- val, err := exec(ctx, pool)
- if err != nil {
- return nil, errors.E(err)
- }
-
- return val, nil
- }
-
- ctx, err := validator.TopicsAccessValidator(r, topics...)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- val, err := exec(ctx, pool)
- if err != nil {
- return nil, errors.E(op)
- }
-
- if val.Status != http.StatusOK {
- return val, errors.E(op, errors.Errorf("access forbidden, code: %d", val.Status))
- }
-
- return val, nil
- }
-}
-
-func exec(ctx []byte, pool phpPool.Pool) (*validator.AccessValidator, error) {
- const op = errors.Op("exec")
- pd := &payload.Payload{
- Context: ctx,
- }
-
- resp, err := pool.Exec(pd)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- val := &validator.AccessValidator{
- Body: resp.Body,
- }
-
- err = json.Unmarshal(resp.Context, val)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return val, nil
-}
diff --git a/plugins/websockets/pool/workers_pool.go b/plugins/websockets/pool/workers_pool.go
deleted file mode 100644
index 758620f6..00000000
--- a/plugins/websockets/pool/workers_pool.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package pool
-
-import (
- "sync"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/websockets/connection"
- "github.com/spiral/roadrunner/v2/utils"
-)
-
-type WorkersPool struct {
- subscriber pubsub.Subscriber
- connections *sync.Map
- resPool sync.Pool
- log logger.Logger
-
- queue chan *pubsub.Message
- exit chan struct{}
-}
-
-// NewWorkersPool constructs worker pool for the websocket connections
-func NewWorkersPool(subscriber pubsub.Subscriber, connections *sync.Map, log logger.Logger) *WorkersPool {
- wp := &WorkersPool{
- connections: connections,
- queue: make(chan *pubsub.Message, 100),
- subscriber: subscriber,
- log: log,
- exit: make(chan struct{}),
- }
-
- wp.resPool.New = func() interface{} {
- return make(map[string]struct{}, 10)
- }
-
- // start 10 workers
- for i := 0; i < 50; i++ {
- wp.do()
- }
-
- return wp
-}
-
-func (wp *WorkersPool) Queue(msg *pubsub.Message) {
- wp.queue <- msg
-}
-
-func (wp *WorkersPool) Stop() {
- for i := 0; i < 50; i++ {
- wp.exit <- struct{}{}
- }
-
- close(wp.exit)
-}
-
-func (wp *WorkersPool) put(res map[string]struct{}) {
- // optimized
- // https://go-review.googlesource.com/c/go/+/110055/
- // not O(n), but O(1)
- for k := range res {
- delete(res, k)
- }
-}
-
-func (wp *WorkersPool) get() map[string]struct{} {
- return wp.resPool.Get().(map[string]struct{})
-}
-
-// Response from the server
-type Response struct {
- Topic string `json:"topic"`
- Payload string `json:"payload"`
-}
-
-func (wp *WorkersPool) do() { //nolint:gocognit
- go func() {
- for {
- select {
- case msg, ok := <-wp.queue:
- if !ok {
- return
- }
- _ = msg
- if msg == nil || msg.Topic == "" {
- continue
- }
-
- // get free map
- res := wp.get()
-
- // get connections for the particular topic
- wp.subscriber.Connections(msg.Topic, res)
-
- if len(res) == 0 {
- wp.log.Info("no connections associated with provided topic", "topic", msg.Topic)
- wp.put(res)
- continue
- }
-
- // res is a map with a connectionsID
- for connID := range res {
- c, ok := wp.connections.Load(connID)
- if !ok {
- wp.log.Warn("the websocket disconnected before the message being written to it", "topics", msg.Topic)
- wp.put(res)
- continue
- }
-
- d, err := json.Marshal(&Response{
- Topic: msg.Topic,
- Payload: utils.AsString(msg.Payload),
- })
-
- if err != nil {
- wp.log.Error("error marshaling response", "error", err)
- wp.put(res)
- break
- }
-
- // put data into the bytes buffer
- err = c.(*connection.Connection).Write(d)
- if err != nil {
- wp.log.Error("error sending payload over the connection", "error", err, "topic", msg.Topic)
- wp.put(res)
- continue
- }
- }
- case <-wp.exit:
- wp.log.Info("get exit signal, exiting from the workers pool")
- return
- }
- }
- }()
-}
diff --git a/plugins/websockets/validator/access_validator.go b/plugins/websockets/validator/access_validator.go
deleted file mode 100644
index 2685da7f..00000000
--- a/plugins/websockets/validator/access_validator.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package validator
-
-import (
- "net/http"
- "strings"
-
- json "github.com/json-iterator/go"
- "github.com/spiral/errors"
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
- "github.com/spiral/roadrunner/v2/plugins/http/attributes"
-)
-
-type AccessValidatorFn = func(r *http.Request, channels ...string) (*AccessValidator, error)
-
-const (
- joinServer string = "ws:joinServer"
- joinTopics string = "ws:joinTopics"
-)
-
-type AccessValidator struct {
- Header http.Header `json:"headers"`
- Status int `json:"status"`
- Body []byte
-}
-
-func ServerAccessValidator(r *http.Request) ([]byte, error) {
- const op = errors.Op("server_access_validator")
-
- err := attributes.Set(r, "ws:joinServer", true)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- defer delete(attributes.All(r), joinServer)
-
- req := &handler.Request{
- RemoteAddr: handler.FetchIP(r.RemoteAddr),
- Protocol: r.Proto,
- Method: r.Method,
- URI: handler.URI(r),
- Header: r.Header,
- Cookies: make(map[string]string),
- RawQuery: r.URL.RawQuery,
- Attributes: attributes.All(r),
- }
-
- data, err := json.Marshal(req)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return data, nil
-}
-
-func TopicsAccessValidator(r *http.Request, topics ...string) ([]byte, error) {
- const op = errors.Op("topic_access_validator")
- err := attributes.Set(r, "ws:joinTopics", strings.Join(topics, ","))
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- defer delete(attributes.All(r), joinTopics)
-
- req := &handler.Request{
- RemoteAddr: handler.FetchIP(r.RemoteAddr),
- Protocol: r.Proto,
- Method: r.Method,
- URI: handler.URI(r),
- Header: r.Header,
- Cookies: make(map[string]string),
- RawQuery: r.URL.RawQuery,
- Attributes: attributes.All(r),
- }
-
- data, err := json.Marshal(req)
- if err != nil {
- return nil, errors.E(op, err)
- }
-
- return data, nil
-}
diff --git a/plugins/websockets/wildcard.go b/plugins/websockets/wildcard.go
deleted file mode 100644
index 2f1c6601..00000000
--- a/plugins/websockets/wildcard.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package websockets
-
-import "strings"
-
-type wildcard struct {
- prefix string
- suffix string
-}
-
-func (w wildcard) match(s string) bool {
- return len(s) >= len(w.prefix)+len(w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix)
-}
diff --git a/pkg/pool/config.go b/pool/config.go
index 3a058956..3a058956 100644
--- a/pkg/pool/config.go
+++ b/pool/config.go
diff --git a/pkg/pool/interface.go b/pool/interface.go
index 4049122c..d089092f 100644
--- a/pkg/pool/interface.go
+++ b/pool/interface.go
@@ -3,8 +3,8 @@ package pool
import (
"context"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/worker"
)
// Pool managed set of inner worker processes.
diff --git a/pkg/pool/static_pool.go b/pool/static_pool.go
index 7e190846..25097395 100755
--- a/pkg/pool/static_pool.go
+++ b/pool/static_pool.go
@@ -6,12 +6,12 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/transport"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- workerWatcher "github.com/spiral/roadrunner/v2/pkg/worker_watcher"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/transport"
"github.com/spiral/roadrunner/v2/utils"
+ "github.com/spiral/roadrunner/v2/worker"
+ workerWatcher "github.com/spiral/roadrunner/v2/worker_watcher"
)
// StopRequest can be sent by worker to indicate that restart is required.
@@ -229,7 +229,7 @@ func (sp *StaticPool) takeWorker(ctxGetFree context.Context, op errors.Op) (work
if err != nil {
// if the error is of kind NoFreeWorkers, it means, that we can't get worker from the stack during the allocate timeout
if errors.Is(errors.NoFreeWorkers, err) {
- sp.events.Push(events.PoolEvent{Event: events.EventNoFreeWorkers, Payload: errors.E(op, err)})
+ sp.events.Push(events.PoolEvent{Event: events.EventNoFreeWorkers, Error: errors.E(op, err)})
return nil, errors.E(op, err)
}
// else if err not nil - return error
@@ -249,7 +249,7 @@ func defaultErrEncoder(sp *StaticPool) ErrorEncoder {
// just push event if on any stage was timeout error
switch {
case errors.Is(errors.ExecTTL, err):
- sp.events.Push(events.PoolEvent{Event: events.EventExecTTL, Payload: errors.E(op, err)})
+ sp.events.Push(events.PoolEvent{Event: events.EventExecTTL, Error: errors.E(op, err)})
w.State().Set(worker.StateInvalid)
return nil, err
diff --git a/pkg/pool/static_pool_test.go b/pool/static_pool_test.go
index cb6578a8..9861f0d8 100755
--- a/pkg/pool/static_pool_test.go
+++ b/pool/static_pool_test.go
@@ -12,11 +12,11 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/transport/pipe"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/transport/pipe"
"github.com/spiral/roadrunner/v2/utils"
+ "github.com/spiral/roadrunner/v2/worker"
"github.com/stretchr/testify/assert"
)
@@ -30,7 +30,7 @@ func Test_NewPool(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
cfg,
)
@@ -44,7 +44,7 @@ func Test_NewPool(t *testing.T) {
func Test_StaticPool_Invalid(t *testing.T) {
p, err := Initialize(
context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../tests/invalid.php") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/invalid.php") },
pipe.NewPipeFactory(),
cfg,
)
@@ -56,7 +56,7 @@ func Test_StaticPool_Invalid(t *testing.T) {
func Test_ConfigNoErrorInitDefaults(t *testing.T) {
p, err := Initialize(
context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
&Config{
AllocateTimeout: time.Second,
@@ -72,7 +72,7 @@ func Test_StaticPool_Echo(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
cfg,
)
@@ -96,7 +96,7 @@ func Test_StaticPool_Echo_NilContext(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
cfg,
)
@@ -120,7 +120,7 @@ func Test_StaticPool_Echo_Context(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "head", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "head", "pipes") },
pipe.NewPipeFactory(),
cfg,
)
@@ -144,7 +144,7 @@ func Test_StaticPool_JobError(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "error", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "error", "pipes") },
pipe.NewPipeFactory(),
cfg,
)
@@ -183,7 +183,7 @@ func Test_StaticPool_Broken_Replace(t *testing.T) {
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "broken", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "broken", "pipes") },
pipe.NewPipeFactory(),
cfg,
AddListeners(listener),
@@ -221,7 +221,7 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) {
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
cfg2,
AddListeners(listener),
@@ -261,7 +261,7 @@ func Test_StaticPool_Broken_FromOutside(t *testing.T) {
func Test_StaticPool_AllocateTimeout(t *testing.T) {
p, err := Initialize(
context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "delay", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 1,
@@ -280,7 +280,7 @@ func Test_StaticPool_Replace_Worker(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "pid", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 1,
@@ -319,7 +319,7 @@ func Test_StaticPool_Debug_Worker(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "pid", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "pid", "pipes") },
pipe.NewPipeFactory(),
&Config{
Debug: true,
@@ -361,7 +361,7 @@ func Test_StaticPool_Stop_Worker(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "stop", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "stop", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 1,
@@ -402,7 +402,7 @@ func Test_Static_Pool_Destroy_And_Close(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "delay", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 1,
@@ -424,7 +424,7 @@ func Test_Static_Pool_Destroy_And_Close_While_Wait(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "delay", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "delay", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 1,
@@ -454,7 +454,7 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/slow-destroy.php", "echo", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 5,
@@ -479,7 +479,7 @@ func Test_Static_Pool_Handle_Dead(t *testing.T) {
func Test_Static_Pool_Slow_Destroy(t *testing.T) {
p, err := Initialize(
context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../tests/slow-destroy.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/slow-destroy.php", "echo", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 5,
@@ -509,7 +509,7 @@ func Test_StaticPool_NoFreeWorkers(t *testing.T) {
p, err := Initialize(
ctx,
// sleep for the 3 seconds
- func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/sleep.php", "pipes") },
pipe.NewPipeFactory(),
&Config{
Debug: false,
@@ -541,7 +541,7 @@ func Test_StaticPool_NoFreeWorkers(t *testing.T) {
func Test_Static_Pool_WrongCommand1(t *testing.T) {
p, err := Initialize(
context.Background(),
- func() *exec.Cmd { return exec.Command("phg", "../../tests/slow-destroy.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("phg", "../tests/slow-destroy.php", "echo", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 5,
@@ -593,7 +593,7 @@ func Benchmark_Pool_Echo(b *testing.B) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
cfg,
)
@@ -625,7 +625,7 @@ func Benchmark_Pool_Echo_Batched(b *testing.B) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: uint64(runtime.NumCPU()),
@@ -667,7 +667,7 @@ func Benchmark_Pool_Echo_Replaced(b *testing.B) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/client.php", "echo", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/client.php", "echo", "pipes") },
pipe.NewPipeFactory(),
&Config{
NumWorkers: 1,
diff --git a/pkg/pool/supervisor_pool.go b/pool/supervisor_pool.go
index e6b2bd7c..99af168c 100755
--- a/pkg/pool/supervisor_pool.go
+++ b/pool/supervisor_pool.go
@@ -6,10 +6,10 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/state/process"
+ "github.com/spiral/roadrunner/v2/worker"
)
const MB = 1024 * 1024
diff --git a/pkg/pool/supervisor_test.go b/pool/supervisor_test.go
index 14df513e..032e220b 100644
--- a/pkg/pool/supervisor_test.go
+++ b/pool/supervisor_test.go
@@ -7,10 +7,10 @@ import (
"testing"
"time"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/transport/pipe"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/transport/pipe"
+ "github.com/spiral/roadrunner/v2/worker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -32,7 +32,7 @@ func TestSupervisedPool_Exec(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/memleak.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/memleak.php", "pipes") },
pipe.NewPipeFactory(),
cfgSupervised,
)
@@ -66,7 +66,7 @@ func TestSupervisedPool_ExecWithDebugMode(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/supervised.php") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/supervised.php") },
pipe.NewPipeFactory(),
cfgSupervised,
)
@@ -104,7 +104,7 @@ func TestSupervisedPool_ExecTTL_TimedOut(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/sleep.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/sleep.php", "pipes") },
pipe.NewPipeFactory(),
cfgExecTTL,
)
@@ -139,7 +139,7 @@ func TestSupervisedPool_ExecTTL_WorkerRestarted(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/sleep-ttl.php") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/sleep-ttl.php") },
pipe.NewPipeFactory(),
cfgExecTTL,
)
@@ -196,7 +196,7 @@ func TestSupervisedPool_Idle(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/idle.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/idle.php", "pipes") },
pipe.NewPipeFactory(),
cfgExecTTL,
)
@@ -243,7 +243,7 @@ func TestSupervisedPool_IdleTTL_StateAfterTimeout(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/exec_ttl.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/exec_ttl.php", "pipes") },
pipe.NewPipeFactory(),
cfgExecTTL,
)
@@ -286,7 +286,7 @@ func TestSupervisedPool_ExecTTL_OK(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/exec_ttl.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/exec_ttl.php", "pipes") },
pipe.NewPipeFactory(),
cfgExecTTL,
)
@@ -341,7 +341,7 @@ func TestSupervisedPool_MaxMemoryReached(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/memleak.php", "pipes") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/memleak.php", "pipes") },
pipe.NewPipeFactory(),
cfgExecTTL,
AddListeners(listener),
@@ -377,7 +377,7 @@ func TestSupervisedPool_AllocateFailedOK(t *testing.T) {
ctx := context.Background()
p, err := Initialize(
ctx,
- func() *exec.Cmd { return exec.Command("php", "../../tests/allocate-failed.php") },
+ func() *exec.Cmd { return exec.Command("php", "../tests/allocate-failed.php") },
pipe.NewPipeFactory(),
cfgExecTTL,
)
diff --git a/pkg/priority_queue/binary_heap.go b/priority_queue/binary_heap.go
index fc043927..fc043927 100644
--- a/pkg/priority_queue/binary_heap.go
+++ b/priority_queue/binary_heap.go
diff --git a/pkg/priority_queue/binary_heap_test.go b/priority_queue/binary_heap_test.go
index ab0f9266..ab0f9266 100644
--- a/pkg/priority_queue/binary_heap_test.go
+++ b/priority_queue/binary_heap_test.go
diff --git a/pkg/priority_queue/interface.go b/priority_queue/interface.go
index 9efa4652..9efa4652 100644
--- a/pkg/priority_queue/interface.go
+++ b/priority_queue/interface.go
diff --git a/proto/jobs/v1beta/jobs.pb.go b/proto/jobs/v1beta/jobs.pb.go
deleted file mode 100644
index 4c237ffa..00000000
--- a/proto/jobs/v1beta/jobs.pb.go
+++ /dev/null
@@ -1,840 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.17.3
-// source: jobs.proto
-
-package jobsv1beta
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// single job request
-type PushRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Job *Job `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"`
-}
-
-func (x *PushRequest) Reset() {
- *x = PushRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PushRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PushRequest) ProtoMessage() {}
-
-func (x *PushRequest) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PushRequest.ProtoReflect.Descriptor instead.
-func (*PushRequest) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *PushRequest) GetJob() *Job {
- if x != nil {
- return x.Job
- }
- return nil
-}
-
-// batch jobs request
-type PushBatchRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"`
-}
-
-func (x *PushBatchRequest) Reset() {
- *x = PushBatchRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PushBatchRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PushBatchRequest) ProtoMessage() {}
-
-func (x *PushBatchRequest) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PushBatchRequest.ProtoReflect.Descriptor instead.
-func (*PushBatchRequest) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *PushBatchRequest) GetJobs() []*Job {
- if x != nil {
- return x.Jobs
- }
- return nil
-}
-
-// request to pause/resume/list/Destroy
-type Pipelines struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Pipelines []string `protobuf:"bytes,1,rep,name=pipelines,proto3" json:"pipelines,omitempty"`
-}
-
-func (x *Pipelines) Reset() {
- *x = Pipelines{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Pipelines) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Pipelines) ProtoMessage() {}
-
-func (x *Pipelines) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Pipelines.ProtoReflect.Descriptor instead.
-func (*Pipelines) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Pipelines) GetPipelines() []string {
- if x != nil {
- return x.Pipelines
- }
- return nil
-}
-
-// some endpoints receives nothing
-// all endpoints returns nothing, except error
-type Empty struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *Empty) Reset() {
- *x = Empty{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Empty) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Empty) ProtoMessage() {}
-
-func (x *Empty) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
-func (*Empty) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{3}
-}
-
-type DeclareRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Pipeline map[string]string `protobuf:"bytes,1,rep,name=pipeline,proto3" json:"pipeline,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (x *DeclareRequest) Reset() {
- *x = DeclareRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeclareRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeclareRequest) ProtoMessage() {}
-
-func (x *DeclareRequest) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeclareRequest.ProtoReflect.Descriptor instead.
-func (*DeclareRequest) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *DeclareRequest) GetPipeline() map[string]string {
- if x != nil {
- return x.Pipeline
- }
- return nil
-}
-
-type Job struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Job string `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"`
- Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
- Payload string `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
- Headers map[string]*HeaderValue `protobuf:"bytes,4,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Options *Options `protobuf:"bytes,5,opt,name=options,proto3" json:"options,omitempty"`
-}
-
-func (x *Job) Reset() {
- *x = Job{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Job) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Job) ProtoMessage() {}
-
-func (x *Job) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Job.ProtoReflect.Descriptor instead.
-func (*Job) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *Job) GetJob() string {
- if x != nil {
- return x.Job
- }
- return ""
-}
-
-func (x *Job) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *Job) GetPayload() string {
- if x != nil {
- return x.Payload
- }
- return ""
-}
-
-func (x *Job) GetHeaders() map[string]*HeaderValue {
- if x != nil {
- return x.Headers
- }
- return nil
-}
-
-func (x *Job) GetOptions() *Options {
- if x != nil {
- return x.Options
- }
- return nil
-}
-
-type Options struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Priority int64 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"`
- Pipeline string `protobuf:"bytes,2,opt,name=pipeline,proto3" json:"pipeline,omitempty"`
- Delay int64 `protobuf:"varint,3,opt,name=delay,proto3" json:"delay,omitempty"`
-}
-
-func (x *Options) Reset() {
- *x = Options{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Options) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Options) ProtoMessage() {}
-
-func (x *Options) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Options.ProtoReflect.Descriptor instead.
-func (*Options) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *Options) GetPriority() int64 {
- if x != nil {
- return x.Priority
- }
- return 0
-}
-
-func (x *Options) GetPipeline() string {
- if x != nil {
- return x.Pipeline
- }
- return ""
-}
-
-func (x *Options) GetDelay() int64 {
- if x != nil {
- return x.Delay
- }
- return 0
-}
-
-type HeaderValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"`
-}
-
-func (x *HeaderValue) Reset() {
- *x = HeaderValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HeaderValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HeaderValue) ProtoMessage() {}
-
-func (x *HeaderValue) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead.
-func (*HeaderValue) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *HeaderValue) GetValue() []string {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-type Stats struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Stats []*Stat `protobuf:"bytes,1,rep,name=Stats,proto3" json:"Stats,omitempty"`
-}
-
-func (x *Stats) Reset() {
- *x = Stats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Stats) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Stats) ProtoMessage() {}
-
-func (x *Stats) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Stats.ProtoReflect.Descriptor instead.
-func (*Stats) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *Stats) GetStats() []*Stat {
- if x != nil {
- return x.Stats
- }
- return nil
-}
-
-// Stat used as a response for the Stats RPC call
-type Stat struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Pipeline string `protobuf:"bytes,1,opt,name=pipeline,proto3" json:"pipeline,omitempty"`
- Driver string `protobuf:"bytes,2,opt,name=driver,proto3" json:"driver,omitempty"`
- Queue string `protobuf:"bytes,3,opt,name=queue,proto3" json:"queue,omitempty"`
- Active int64 `protobuf:"varint,4,opt,name=active,proto3" json:"active,omitempty"`
- Delayed int64 `protobuf:"varint,5,opt,name=delayed,proto3" json:"delayed,omitempty"`
- Reserved int64 `protobuf:"varint,6,opt,name=reserved,proto3" json:"reserved,omitempty"`
- Ready bool `protobuf:"varint,7,opt,name=ready,proto3" json:"ready,omitempty"`
-}
-
-func (x *Stat) Reset() {
- *x = Stat{}
- if protoimpl.UnsafeEnabled {
- mi := &file_jobs_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Stat) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Stat) ProtoMessage() {}
-
-func (x *Stat) ProtoReflect() protoreflect.Message {
- mi := &file_jobs_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Stat.ProtoReflect.Descriptor instead.
-func (*Stat) Descriptor() ([]byte, []int) {
- return file_jobs_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *Stat) GetPipeline() string {
- if x != nil {
- return x.Pipeline
- }
- return ""
-}
-
-func (x *Stat) GetDriver() string {
- if x != nil {
- return x.Driver
- }
- return ""
-}
-
-func (x *Stat) GetQueue() string {
- if x != nil {
- return x.Queue
- }
- return ""
-}
-
-func (x *Stat) GetActive() int64 {
- if x != nil {
- return x.Active
- }
- return 0
-}
-
-func (x *Stat) GetDelayed() int64 {
- if x != nil {
- return x.Delayed
- }
- return 0
-}
-
-func (x *Stat) GetReserved() int64 {
- if x != nil {
- return x.Reserved
- }
- return 0
-}
-
-func (x *Stat) GetReady() bool {
- if x != nil {
- return x.Ready
- }
- return false
-}
-
-var File_jobs_proto protoreflect.FileDescriptor
-
-var file_jobs_proto_rawDesc = []byte{
- 0x0a, 0x0a, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x6a, 0x6f,
- 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x22, 0x31, 0x0a, 0x0b, 0x50, 0x75, 0x73,
- 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x03, 0x6a, 0x6f, 0x62, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62,
- 0x65, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x62, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x22, 0x38, 0x0a, 0x10,
- 0x50, 0x75, 0x73, 0x68, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x24, 0x0a, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
- 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x62,
- 0x52, 0x04, 0x6a, 0x6f, 0x62, 0x73, 0x22, 0x29, 0x0a, 0x09, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69,
- 0x6e, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65,
- 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x94, 0x01, 0x0a, 0x0e, 0x44,
- 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
- 0x08, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x29, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x65,
- 0x63, 0x6c, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x69, 0x70,
- 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x69, 0x70, 0x65,
- 0x6c, 0x69, 0x6e, 0x65, 0x1a, 0x3b, 0x0a, 0x0d, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x22, 0x80, 0x02, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x10, 0x0a, 0x03, 0x6a, 0x6f, 0x62,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6a, 0x6f, 0x62, 0x12, 0x0e, 0x0a, 0x02, 0x69,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70,
- 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61,
- 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x37, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31,
- 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x14, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x54,
- 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x18, 0x2e, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x48, 0x65,
- 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70,
- 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70,
- 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x22, 0x23, 0x0a,
- 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x22, 0x30, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x53,
- 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6a, 0x6f, 0x62,
- 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x52, 0x05, 0x53,
- 0x74, 0x61, 0x74, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x61, 0x74, 0x12, 0x1a, 0x0a,
- 0x08, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x72, 0x69,
- 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x72, 0x69, 0x76, 0x65,
- 0x72, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x76,
- 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12,
- 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x07, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x72, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x07,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x42, 0x0f, 0x5a, 0x0d, 0x2e,
- 0x2f, 0x3b, 0x6a, 0x6f, 0x62, 0x73, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_jobs_proto_rawDescOnce sync.Once
- file_jobs_proto_rawDescData = file_jobs_proto_rawDesc
-)
-
-func file_jobs_proto_rawDescGZIP() []byte {
- file_jobs_proto_rawDescOnce.Do(func() {
- file_jobs_proto_rawDescData = protoimpl.X.CompressGZIP(file_jobs_proto_rawDescData)
- })
- return file_jobs_proto_rawDescData
-}
-
-var file_jobs_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
-var file_jobs_proto_goTypes = []interface{}{
- (*PushRequest)(nil), // 0: jobs.v1beta.PushRequest
- (*PushBatchRequest)(nil), // 1: jobs.v1beta.PushBatchRequest
- (*Pipelines)(nil), // 2: jobs.v1beta.Pipelines
- (*Empty)(nil), // 3: jobs.v1beta.Empty
- (*DeclareRequest)(nil), // 4: jobs.v1beta.DeclareRequest
- (*Job)(nil), // 5: jobs.v1beta.Job
- (*Options)(nil), // 6: jobs.v1beta.Options
- (*HeaderValue)(nil), // 7: jobs.v1beta.HeaderValue
- (*Stats)(nil), // 8: jobs.v1beta.Stats
- (*Stat)(nil), // 9: jobs.v1beta.Stat
- nil, // 10: jobs.v1beta.DeclareRequest.PipelineEntry
- nil, // 11: jobs.v1beta.Job.HeadersEntry
-}
-var file_jobs_proto_depIdxs = []int32{
- 5, // 0: jobs.v1beta.PushRequest.job:type_name -> jobs.v1beta.Job
- 5, // 1: jobs.v1beta.PushBatchRequest.jobs:type_name -> jobs.v1beta.Job
- 10, // 2: jobs.v1beta.DeclareRequest.pipeline:type_name -> jobs.v1beta.DeclareRequest.PipelineEntry
- 11, // 3: jobs.v1beta.Job.headers:type_name -> jobs.v1beta.Job.HeadersEntry
- 6, // 4: jobs.v1beta.Job.options:type_name -> jobs.v1beta.Options
- 9, // 5: jobs.v1beta.Stats.Stats:type_name -> jobs.v1beta.Stat
- 7, // 6: jobs.v1beta.Job.HeadersEntry.value:type_name -> jobs.v1beta.HeaderValue
- 7, // [7:7] is the sub-list for method output_type
- 7, // [7:7] is the sub-list for method input_type
- 7, // [7:7] is the sub-list for extension type_name
- 7, // [7:7] is the sub-list for extension extendee
- 0, // [0:7] is the sub-list for field type_name
-}
-
-func init() { file_jobs_proto_init() }
-func file_jobs_proto_init() {
- if File_jobs_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_jobs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PushRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PushBatchRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Pipelines); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Empty); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeclareRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Job); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Options); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HeaderValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Stats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_jobs_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Stat); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_jobs_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 12,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_jobs_proto_goTypes,
- DependencyIndexes: file_jobs_proto_depIdxs,
- MessageInfos: file_jobs_proto_msgTypes,
- }.Build()
- File_jobs_proto = out.File
- file_jobs_proto_rawDesc = nil
- file_jobs_proto_goTypes = nil
- file_jobs_proto_depIdxs = nil
-}
diff --git a/proto/jobs/v1beta/jobs.proto b/proto/jobs/v1beta/jobs.proto
deleted file mode 100644
index c030c0df..00000000
--- a/proto/jobs/v1beta/jobs.proto
+++ /dev/null
@@ -1,60 +0,0 @@
-syntax = "proto3";
-
-package jobs.v1beta;
-option go_package = "./;jobsv1beta";
-
-// single job request
-message PushRequest {
- Job job = 1;
-}
-
-// batch jobs request
-message PushBatchRequest {
- repeated Job jobs = 1;
-}
-
-// request to pause/resume/list/Destroy
-message Pipelines {
- repeated string pipelines = 1;
-}
-
-// some endpoints receives nothing
-// all endpoints returns nothing, except error
-message Empty {}
-
-message DeclareRequest {
- map<string, string> pipeline = 1;
-}
-
-message Job {
- string job = 1;
- string id = 2;
- string payload = 3;
- map<string, HeaderValue> headers = 4;
- Options options = 5;
-}
-
-message Options {
- int64 priority = 1;
- string pipeline = 2;
- int64 delay = 3;
-}
-
-message HeaderValue {
- repeated string value = 1;
-}
-
-message Stats {
- repeated Stat Stats = 1;
-}
-
-// Stat used as a response for the Stats RPC call
-message Stat {
- string pipeline = 1;
- string driver = 2;
- string queue = 3;
- int64 active = 4;
- int64 delayed = 5;
- int64 reserved = 6;
- bool ready = 7;
-}
diff --git a/proto/kv/v1beta/kv.pb.go b/proto/kv/v1beta/kv.pb.go
deleted file mode 100644
index 19621735..00000000
--- a/proto/kv/v1beta/kv.pb.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.17.3
-// source: kv.proto
-
-package kvv1beta
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // could be an enum in the future
- Storage string `protobuf:"bytes,1,opt,name=storage,proto3" json:"storage,omitempty"`
- Items []*Item `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
-}
-
-func (x *Request) Reset() {
- *x = Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_kv_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Request) ProtoMessage() {}
-
-func (x *Request) ProtoReflect() protoreflect.Message {
- mi := &file_kv_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Request.ProtoReflect.Descriptor instead.
-func (*Request) Descriptor() ([]byte, []int) {
- return file_kv_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Request) GetStorage() string {
- if x != nil {
- return x.Storage
- }
- return ""
-}
-
-func (x *Request) GetItems() []*Item {
- if x != nil {
- return x.Items
- }
- return nil
-}
-
-type Item struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- // RFC 3339
- Timeout string `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
-}
-
-func (x *Item) Reset() {
- *x = Item{}
- if protoimpl.UnsafeEnabled {
- mi := &file_kv_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Item) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Item) ProtoMessage() {}
-
-func (x *Item) ProtoReflect() protoreflect.Message {
- mi := &file_kv_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Item.ProtoReflect.Descriptor instead.
-func (*Item) Descriptor() ([]byte, []int) {
- return file_kv_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Item) GetKey() string {
- if x != nil {
- return x.Key
- }
- return ""
-}
-
-func (x *Item) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-func (x *Item) GetTimeout() string {
- if x != nil {
- return x.Timeout
- }
- return ""
-}
-
-// KV response for the KV RPC methods
-type Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Items []*Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
-}
-
-func (x *Response) Reset() {
- *x = Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_kv_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Response) ProtoMessage() {}
-
-func (x *Response) ProtoReflect() protoreflect.Message {
- mi := &file_kv_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Response.ProtoReflect.Descriptor instead.
-func (*Response) Descriptor() ([]byte, []int) {
- return file_kv_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Response) GetItems() []*Item {
- if x != nil {
- return x.Items
- }
- return nil
-}
-
-var File_kv_proto protoreflect.FileDescriptor
-
-var file_kv_proto_rawDesc = []byte{
- 0x0a, 0x08, 0x6b, 0x76, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x6b, 0x76, 0x2e, 0x76,
- 0x31, 0x62, 0x65, 0x74, 0x61, 0x22, 0x4a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x18, 0x0a, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x69, 0x74,
- 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6b, 0x76, 0x2e, 0x76,
- 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d,
- 0x73, 0x22, 0x48, 0x0a, 0x04, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x31, 0x0a, 0x08, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6b, 0x76, 0x2e, 0x76, 0x31, 0x62, 0x65,
- 0x74, 0x61, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x42, 0x0d,
- 0x5a, 0x0b, 0x2e, 0x2f, 0x3b, 0x6b, 0x76, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_kv_proto_rawDescOnce sync.Once
- file_kv_proto_rawDescData = file_kv_proto_rawDesc
-)
-
-func file_kv_proto_rawDescGZIP() []byte {
- file_kv_proto_rawDescOnce.Do(func() {
- file_kv_proto_rawDescData = protoimpl.X.CompressGZIP(file_kv_proto_rawDescData)
- })
- return file_kv_proto_rawDescData
-}
-
-var file_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_kv_proto_goTypes = []interface{}{
- (*Request)(nil), // 0: kv.v1beta.Request
- (*Item)(nil), // 1: kv.v1beta.Item
- (*Response)(nil), // 2: kv.v1beta.Response
-}
-var file_kv_proto_depIdxs = []int32{
- 1, // 0: kv.v1beta.Request.items:type_name -> kv.v1beta.Item
- 1, // 1: kv.v1beta.Response.items:type_name -> kv.v1beta.Item
- 2, // [2:2] is the sub-list for method output_type
- 2, // [2:2] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_kv_proto_init() }
-func file_kv_proto_init() {
- if File_kv_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_kv_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_kv_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Item); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_kv_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_kv_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_kv_proto_goTypes,
- DependencyIndexes: file_kv_proto_depIdxs,
- MessageInfos: file_kv_proto_msgTypes,
- }.Build()
- File_kv_proto = out.File
- file_kv_proto_rawDesc = nil
- file_kv_proto_goTypes = nil
- file_kv_proto_depIdxs = nil
-}
diff --git a/proto/kv/v1beta/kv.proto b/proto/kv/v1beta/kv.proto
deleted file mode 100644
index 1e3b8177..00000000
--- a/proto/kv/v1beta/kv.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-syntax = "proto3";
-
-package kv.v1beta;
-option go_package = "./;kvv1beta";
-
-message Request {
- // could be an enum in the future
- string storage = 1;
- repeated Item items = 2;
-}
-
-message Item {
- string key = 1;
- bytes value = 2;
- // RFC 3339
- string timeout = 3;
-}
-
-// KV response for the KV RPC methods
-message Response {
- repeated Item items = 1;
-}
diff --git a/proto/websockets/v1beta/websockets.pb.go b/proto/websockets/v1beta/websockets.pb.go
deleted file mode 100644
index 188dcf08..00000000
--- a/proto/websockets/v1beta/websockets.pb.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.17.3
-// source: websockets.proto
-
-package websocketsv1beta
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type Message struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Command string `protobuf:"bytes,1,opt,name=command,proto3" json:"command,omitempty"`
- Topics []string `protobuf:"bytes,2,rep,name=topics,proto3" json:"topics,omitempty"`
- Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
-}
-
-func (x *Message) Reset() {
- *x = Message{}
- if protoimpl.UnsafeEnabled {
- mi := &file_websockets_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Message) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Message) ProtoMessage() {}
-
-func (x *Message) ProtoReflect() protoreflect.Message {
- mi := &file_websockets_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Message.ProtoReflect.Descriptor instead.
-func (*Message) Descriptor() ([]byte, []int) {
- return file_websockets_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Message) GetCommand() string {
- if x != nil {
- return x.Command
- }
- return ""
-}
-
-func (x *Message) GetTopics() []string {
- if x != nil {
- return x.Topics
- }
- return nil
-}
-
-func (x *Message) GetPayload() []byte {
- if x != nil {
- return x.Payload
- }
- return nil
-}
-
-// RPC request with messages
-type Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Messages []*Message `protobuf:"bytes,1,rep,name=messages,proto3" json:"messages,omitempty"`
-}
-
-func (x *Request) Reset() {
- *x = Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_websockets_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Request) ProtoMessage() {}
-
-func (x *Request) ProtoReflect() protoreflect.Message {
- mi := &file_websockets_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Request.ProtoReflect.Descriptor instead.
-func (*Request) Descriptor() ([]byte, []int) {
- return file_websockets_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Request) GetMessages() []*Message {
- if x != nil {
- return x.Messages
- }
- return nil
-}
-
-// RPC response (false in case of error)
-type Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"`
-}
-
-func (x *Response) Reset() {
- *x = Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_websockets_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Response) ProtoMessage() {}
-
-func (x *Response) ProtoReflect() protoreflect.Message {
- mi := &file_websockets_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Response.ProtoReflect.Descriptor instead.
-func (*Response) Descriptor() ([]byte, []int) {
- return file_websockets_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Response) GetOk() bool {
- if x != nil {
- return x.Ok
- }
- return false
-}
-
-var File_websockets_proto protoreflect.FileDescriptor
-
-var file_websockets_proto_rawDesc = []byte{
- 0x0a, 0x10, 0x77, 0x65, 0x62, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x11, 0x77, 0x65, 0x62, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x76,
- 0x31, 0x62, 0x65, 0x74, 0x61, 0x22, 0x55, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f,
- 0x70, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69,
- 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x41, 0x0a, 0x07,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x77, 0x65, 0x62, 0x73,
- 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x62, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22,
- 0x1a, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f,
- 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x42, 0x15, 0x5a, 0x13, 0x2e,
- 0x2f, 0x3b, 0x77, 0x65, 0x62, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x76, 0x31, 0x62, 0x65,
- 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_websockets_proto_rawDescOnce sync.Once
- file_websockets_proto_rawDescData = file_websockets_proto_rawDesc
-)
-
-func file_websockets_proto_rawDescGZIP() []byte {
- file_websockets_proto_rawDescOnce.Do(func() {
- file_websockets_proto_rawDescData = protoimpl.X.CompressGZIP(file_websockets_proto_rawDescData)
- })
- return file_websockets_proto_rawDescData
-}
-
-var file_websockets_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_websockets_proto_goTypes = []interface{}{
- (*Message)(nil), // 0: websockets.v1beta.Message
- (*Request)(nil), // 1: websockets.v1beta.Request
- (*Response)(nil), // 2: websockets.v1beta.Response
-}
-var file_websockets_proto_depIdxs = []int32{
- 0, // 0: websockets.v1beta.Request.messages:type_name -> websockets.v1beta.Message
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_websockets_proto_init() }
-func file_websockets_proto_init() {
- if File_websockets_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_websockets_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Message); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_websockets_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_websockets_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_websockets_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_websockets_proto_goTypes,
- DependencyIndexes: file_websockets_proto_depIdxs,
- MessageInfos: file_websockets_proto_msgTypes,
- }.Build()
- File_websockets_proto = out.File
- file_websockets_proto_rawDesc = nil
- file_websockets_proto_goTypes = nil
- file_websockets_proto_depIdxs = nil
-}
diff --git a/proto/websockets/v1beta/websockets.proto b/proto/websockets/v1beta/websockets.proto
deleted file mode 100644
index 5be6f70f..00000000
--- a/proto/websockets/v1beta/websockets.proto
+++ /dev/null
@@ -1,20 +0,0 @@
-syntax = "proto3";
-
-package websockets.v1beta;
-option go_package = "./;websocketsv1beta";
-
-message Message {
- string command = 1;
- repeated string topics = 2;
- bytes payload = 3;
-}
-
-// RPC request with messages
-message Request {
- repeated Message messages = 1;
-}
-
-// RPC response (false in case of error)
-message Response {
- bool ok = 1;
-}
diff --git a/pkg/state/job/state.go b/state/job/state.go
index 56050084..56050084 100644
--- a/pkg/state/job/state.go
+++ b/state/job/state.go
diff --git a/pkg/state/process/state.go b/state/process/state.go
index bfc3a287..f88f8b03 100644
--- a/pkg/state/process/state.go
+++ b/state/process/state.go
@@ -3,7 +3,7 @@ package process
import (
"github.com/shirou/gopsutil/process"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/worker"
)
// State provides information about specific worker.
diff --git a/tests/env/Dockerfile-beanstalkd.yaml b/tests/env/Dockerfile-beanstalkd.yaml
deleted file mode 100644
index 7b36f8d3..00000000
--- a/tests/env/Dockerfile-beanstalkd.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM ubuntu:latest
-
-ARG DEBIAN_FRONTEND=noninteractive
-
-RUN apt-get update && apt-get install -y curl build-essential pkg-config
-
-RUN curl -sL https://github.com/kr/beanstalkd/archive/v1.12.tar.gz | tar xvz -C /tmp
-
-WORKDIR /tmp/beanstalkd-1.12
-RUN make
-RUN cp beanstalkd /usr/bin
-
-EXPOSE 11300
-ENTRYPOINT ["/usr/bin/beanstalkd"]
diff --git a/tests/env/Dockerfile-elastic-mq.yaml b/tests/env/Dockerfile-elastic-mq.yaml
deleted file mode 100644
index 75d8a8ff..00000000
--- a/tests/env/Dockerfile-elastic-mq.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM openjdk:16
-
-ADD https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-1.2.1.jar /
-COPY custom.conf /
-ENTRYPOINT ["java", "-Dconfig.file=custom.conf", "-jar", "/elasticmq-server-1.2.1.jar"]
-
-EXPOSE 9324
-
-CMD ["-help"]
diff --git a/tests/env/custom.conf b/tests/env/custom.conf
deleted file mode 100644
index 9be7730e..00000000
--- a/tests/env/custom.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-include classpath("application.conf")
-
-node-address {
- protocol = http
- host = "*"
- port = 9324
- context-path = ""
-}
diff --git a/tests/env/docker-compose.yaml b/tests/env/docker-compose.yaml
deleted file mode 100644
index 8ef2a99b..00000000
--- a/tests/env/docker-compose.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-version: "3"
-
-services:
- memcached:
- image: memcached:latest
- ports:
- - "127.0.0.1:11211:11211"
- redis:
- image: redis:6
- ports:
- - "127.0.0.1:6379:6379"
- redis2:
- image: redis:6
- ports:
- - "127.0.0.1:6378:6379"
-
- toxicproxy:
- image: ghcr.io/shopify/toxiproxy:latest
- network_mode: host
-
- beanstalk:
- build:
- context: .
- dockerfile: Dockerfile-beanstalkd.yaml
- ports:
- - "127.0.0.1:11300:11300"
-
- sqs:
- build:
- context: .
- dockerfile: Dockerfile-elastic-mq.yaml
- ports:
- - "127.0.0.1:9324:9324"
-
- rabbitmq:
- image: rabbitmq:3-management
- ports:
- - "127.0.0.1:15672:15672"
- - "127.0.0.1:5672:5672"
-
- prometheus:
- image: prom/prometheus
- ports:
- - "9090:9090" \ No newline at end of file
diff --git a/tests/mocks/mock_log.go b/tests/mocks/mock_log.go
deleted file mode 100644
index 696accc0..00000000
--- a/tests/mocks/mock_log.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package mocks
-
-import (
- reflect "reflect"
-
- gomock "github.com/golang/mock/gomock"
- logger "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-// MockLogger is a mock of Logger interface.
-type MockLogger struct {
- ctrl *gomock.Controller
- recorder *MockLoggerMockRecorder
-}
-
-// MockLoggerMockRecorder is the mock recorder for MockLogger.
-type MockLoggerMockRecorder struct {
- mock *MockLogger
-}
-
-// NewMockLogger creates a new mock instance.
-func NewMockLogger(ctrl *gomock.Controller) *MockLogger {
- mock := &MockLogger{ctrl: ctrl}
- mock.recorder = &MockLoggerMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockLogger) EXPECT() *MockLoggerMockRecorder {
- return m.recorder
-}
-
-func (m *MockLogger) Init() error {
- mock := &MockLogger{ctrl: m.ctrl}
- mock.recorder = &MockLoggerMockRecorder{mock}
- return nil
-}
-
-// Debug mocks base method.
-func (m *MockLogger) Debug(msg string, keyvals ...interface{}) {
- m.ctrl.T.Helper()
- varargs := []interface{}{msg}
- for _, a := range keyvals {
- varargs = append(varargs, a)
- }
- m.ctrl.Call(m, "Debug", varargs...)
-}
-
-// Debug indicates an expected call of Debug.
-func (mr *MockLoggerMockRecorder) Debug(msg interface{}, keyvals ...interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- varargs := append([]interface{}{msg}, keyvals...)
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debug", reflect.TypeOf((*MockLogger)(nil).Debug), varargs...)
-}
-
-// Error mocks base method.
-func (m *MockLogger) Error(msg string, keyvals ...interface{}) {
- m.ctrl.T.Helper()
- varargs := []interface{}{msg}
- for _, a := range keyvals {
- varargs = append(varargs, a)
- }
- m.ctrl.Call(m, "Error", varargs...)
-}
-
-// Error indicates an expected call of Error.
-func (mr *MockLoggerMockRecorder) Error(msg interface{}, keyvals ...interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- varargs := append([]interface{}{msg}, keyvals...)
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...)
-}
-
-// Info mocks base method.
-func (m *MockLogger) Info(msg string, keyvals ...interface{}) {
- m.ctrl.T.Helper()
- varargs := []interface{}{msg}
- for _, a := range keyvals {
- varargs = append(varargs, a)
- }
- m.ctrl.Call(m, "Info", varargs...)
-}
-
-// Info indicates an expected call of Info.
-func (mr *MockLoggerMockRecorder) Info(msg interface{}, keyvals ...interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- varargs := append([]interface{}{msg}, keyvals...)
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...)
-}
-
-// Warn mocks base method.
-func (m *MockLogger) Warn(msg string, keyvals ...interface{}) {
- m.ctrl.T.Helper()
- varargs := []interface{}{msg}
- for _, a := range keyvals {
- varargs = append(varargs, a)
- }
- m.ctrl.Call(m, "Warn", varargs...)
-}
-
-// Warn indicates an expected call of Warn.
-func (mr *MockLoggerMockRecorder) Warn(msg interface{}, keyvals ...interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- varargs := append([]interface{}{msg}, keyvals...)
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warn", reflect.TypeOf((*MockLogger)(nil).Warn), varargs...)
-}
-
-// MockWithLogger is a mock of WithLogger interface.
-type MockWithLogger struct {
- ctrl *gomock.Controller
- recorder *MockWithLoggerMockRecorder
-}
-
-// MockWithLoggerMockRecorder is the mock recorder for MockWithLogger.
-type MockWithLoggerMockRecorder struct {
- mock *MockWithLogger
-}
-
-// NewMockWithLogger creates a new mock instance.
-func NewMockWithLogger(ctrl *gomock.Controller) *MockWithLogger {
- mock := &MockWithLogger{ctrl: ctrl}
- mock.recorder = &MockWithLoggerMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockWithLogger) EXPECT() *MockWithLoggerMockRecorder {
- return m.recorder
-}
-
-// With mocks base method.
-func (m *MockWithLogger) With(keyvals ...interface{}) logger.Logger {
- m.ctrl.T.Helper()
- varargs := []interface{}{}
- for _, a := range keyvals {
- varargs = append(varargs, a)
- }
- ret := m.ctrl.Call(m, "With", varargs...)
- ret0, _ := ret[0].(logger.Logger)
- return ret0
-}
-
-// With indicates an expected call of With.
-func (mr *MockWithLoggerMockRecorder) With(keyvals ...interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "With", reflect.TypeOf((*MockWithLogger)(nil).With), keyvals...)
-}
diff --git a/tests/plugins/broadcast/broadcast_plugin_test.go b/tests/plugins/broadcast/broadcast_plugin_test.go
deleted file mode 100644
index 5d8c9ce9..00000000
--- a/tests/plugins/broadcast/broadcast_plugin_test.go
+++ /dev/null
@@ -1,513 +0,0 @@
-package broadcast
-
-import (
- "context"
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- goRedis "github.com/go-redis/redis/v8"
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/memory"
- "github.com/spiral/roadrunner/v2/plugins/redis"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/websockets"
- websocketsv1 "github.com/spiral/roadrunner/v2/proto/websockets/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/spiral/roadrunner/v2/tests/plugins/broadcast/plugins"
- "github.com/stretchr/testify/assert"
-)
-
-func TestBroadcastInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-broadcast-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &broadcast.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestBroadcastConfigError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-broadcast-config-error.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &broadcast.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
-
- &plugins.Plugin1{},
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- assert.Error(t, err)
-}
-
-func TestBroadcastNoConfig(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-broadcast-no-config.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", []string{}).MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- &broadcast.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- // should be just disabled
- _, err = cont.Serve()
- assert.NoError(t, err)
-}
-
-func TestBroadcastSameSubscriber(t *testing.T) {
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6379"))
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6378"))
-
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel), endure.GracefulShutdownTimeout(time.Second))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-broadcast-same-section.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6002", "plugins", []string{"broadcast"}).AnyTimes()
- mockLogger.EXPECT().Debug("message published", "msg", gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info(`plugin1: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin1: {foo2 hello}`).Times(2)
- mockLogger.EXPECT().Info(`plugin1: {foo3 hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin2: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin3: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin4: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin5: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin6: {foo hello}`).Times(3)
-
- err = cont.RegisterAll(
- cfg,
- &broadcast.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
-
- // test - redis
- // test2 - redis (port 6378)
- // test3 - memory
- // test4 - memory
- &plugins.Plugin1{}, // foo, foo2, foo3 test
- &plugins.Plugin2{}, // foo, test
- &plugins.Plugin3{}, // foo, test2
- &plugins.Plugin4{}, // foo, test3
- &plugins.Plugin5{}, // foo, test4
- &plugins.Plugin6{}, // foo, test3
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
-
- t.Run("PublishHelloFooFoo2Foo3", BroadcastPublishFooFoo2Foo3("6002"))
- time.Sleep(time.Second)
- t.Run("PublishHelloFoo2", BroadcastPublishFoo2("6002"))
- time.Sleep(time.Second)
- t.Run("PublishHelloFoo3", BroadcastPublishFoo3("6002"))
- time.Sleep(time.Second)
- t.Run("PublishAsyncHelloFooFoo2Foo3", BroadcastPublishAsyncFooFoo2Foo3("6002"))
-
- time.Sleep(time.Second * 5)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6379"))
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6378"))
-
- time.Sleep(time.Second * 5)
-}
-
-func TestBroadcastSameSubscriberGlobal(t *testing.T) {
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6379"))
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6378"))
-
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel), endure.GracefulShutdownTimeout(time.Second))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-broadcast-global.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6003", "plugins", []string{"broadcast"}).AnyTimes()
- mockLogger.EXPECT().Debug("message published", "msg", gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info(`plugin1: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin1: {foo2 hello}`).Times(2)
- mockLogger.EXPECT().Info(`plugin1: {foo3 hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin2: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin3: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin4: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin5: {foo hello}`).Times(3)
- mockLogger.EXPECT().Info(`plugin6: {foo hello}`).Times(3)
-
- err = cont.RegisterAll(
- cfg,
- &broadcast.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
-
- // test - redis
- // test2 - redis (port 6378)
- // test3 - memory
- // test4 - memory
- &plugins.Plugin1{}, // foo, foo2, foo3 test
- &plugins.Plugin2{}, // foo, test
- &plugins.Plugin3{}, // foo, test2
- &plugins.Plugin4{}, // foo, test3
- &plugins.Plugin5{}, // foo, test4
- &plugins.Plugin6{}, // foo, test3
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
-
- t.Run("PublishHelloFooFoo2Foo3", BroadcastPublishFooFoo2Foo3("6003"))
- time.Sleep(time.Second)
- t.Run("PublishHelloFoo2", BroadcastPublishFoo2("6003"))
- time.Sleep(time.Second)
- t.Run("PublishHelloFoo3", BroadcastPublishFoo3("6003"))
- time.Sleep(time.Second)
- t.Run("PublishAsyncHelloFooFoo2Foo3", BroadcastPublishAsyncFooFoo2Foo3("6003"))
-
- time.Sleep(time.Second * 4)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-
- time.Sleep(time.Second * 5)
-
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6379"))
- t.Run("RedisFlush", redisFlushAll("127.0.0.1:6378"))
-}
-
-func BroadcastPublishFooFoo2Foo3(port string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:"+port)
- if err != nil {
- t.Fatal(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.Publish", makeMessage([]byte("hello"), "foo", "foo2", "foo3"), ret)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func BroadcastPublishFoo2(port string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:"+port)
- if err != nil {
- t.Fatal(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.Publish", makeMessage([]byte("hello"), "foo"), ret)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func BroadcastPublishFoo3(port string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:"+port)
- if err != nil {
- t.Fatal(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.Publish", makeMessage([]byte("hello"), "foo3"), ret)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func BroadcastPublishAsyncFooFoo2Foo3(port string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:"+port)
- if err != nil {
- t.Fatal(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.PublishAsync", makeMessage([]byte("hello"), "foo", "foo2", "foo3"), ret)
- if err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func makeMessage(payload []byte, topics ...string) *websocketsv1.Request {
- m := &websocketsv1.Request{
- Messages: []*websocketsv1.Message{
- {
- Topics: topics,
- Payload: payload,
- },
- },
- }
-
- return m
-}
-
-func redisFlushAll(addr string) func(t *testing.T) {
- return func(t *testing.T) {
- rdb := goRedis.NewClient(&goRedis.Options{
- Addr: addr,
- Password: "", // no password set
- DB: 0, // use default DB
- })
-
- rdb.FlushAll(context.Background())
- }
-}
diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml
deleted file mode 100644
index 1474feb7..00000000
--- a/tests/plugins/broadcast/configs/.rr-broadcast-config-error.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21345
- max_request_size: 1024
- middleware: [ "websockets" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-# no global or local config
-broadcast:
- default:
- driver: redis
-
-logs:
- mode: development
- level: debug
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml
deleted file mode 100644
index a7f9c35d..00000000
--- a/tests/plugins/broadcast/configs/.rr-broadcast-global.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6003
-
-server:
- command: "php ../../psr-worker-bench.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21543
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-test:
- addrs:
- - "127.0.0.1:6379"
-
-broadcast:
- test:
- driver: redis
- test2:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6378"
- test3:
- driver: memory
- config: {}
- test4:
- driver: memory
- config: {}
-
-logs:
- mode: development
- level: info
diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml
deleted file mode 100644
index 1cbebdd7..00000000
--- a/tests/plugins/broadcast/configs/.rr-broadcast-init.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21345
- max_request_size: 1024
- middleware: [ "websockets" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- default:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6379"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-no-config.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-no-config.yaml
deleted file mode 100644
index 90790869..00000000
--- a/tests/plugins/broadcast/configs/.rr-broadcast-no-config.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21345
- max_request_size: 1024
- middleware: [ "websockets" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-logs:
- mode: development
- level: debug
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml b/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml
deleted file mode 100644
index 85a767cb..00000000
--- a/tests/plugins/broadcast/configs/.rr-broadcast-same-section.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6002
-
-server:
- command: "php ../../psr-worker-bench.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21345
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- test:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6379"
- test2:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6378"
- test3:
- driver: memory
- config: {}
- test4:
- driver: memory
- config: {}
-logs:
- mode: development
- level: info
diff --git a/tests/plugins/broadcast/plugins/plugin1.go b/tests/plugins/broadcast/plugins/plugin1.go
deleted file mode 100644
index ed5139a8..00000000
--- a/tests/plugins/broadcast/plugins/plugin1.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package plugins
-
-import (
- "context"
- "fmt"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const Plugin1Name = "plugin1"
-
-type Plugin1 struct {
- log logger.Logger
- b broadcast.Broadcaster
- driver pubsub.SubReader
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (p *Plugin1) Init(log logger.Logger, b broadcast.Broadcaster) error {
- p.log = log
- p.b = b
- p.ctx, p.cancel = context.WithCancel(context.Background())
- return nil
-}
-
-func (p *Plugin1) Serve() chan error {
- errCh := make(chan error, 1)
-
- var err error
- p.driver, err = p.b.GetDriver("test")
- if err != nil {
- errCh <- err
- return errCh
- }
-
- err = p.driver.Subscribe("1", "foo", "foo2", "foo3")
- if err != nil {
- panic(err)
- }
-
- go func() {
- for {
- msg, err := p.driver.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
- errCh <- err
- return
- }
-
- p.log.Info(fmt.Sprintf("%s: %s", Plugin1Name, *msg))
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin1) Stop() error {
- _ = p.driver.Unsubscribe("1", "foo")
- _ = p.driver.Unsubscribe("1", "foo2")
- _ = p.driver.Unsubscribe("1", "foo3")
- p.cancel()
- return nil
-}
-
-func (p *Plugin1) Name() string {
- return Plugin1Name
-}
diff --git a/tests/plugins/broadcast/plugins/plugin2.go b/tests/plugins/broadcast/plugins/plugin2.go
deleted file mode 100644
index 20cc1b24..00000000
--- a/tests/plugins/broadcast/plugins/plugin2.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package plugins
-
-import (
- "context"
- "fmt"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const Plugin2Name = "plugin2"
-
-type Plugin2 struct {
- log logger.Logger
- b broadcast.Broadcaster
- driver pubsub.SubReader
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (p *Plugin2) Init(log logger.Logger, b broadcast.Broadcaster) error {
- p.log = log
- p.b = b
- p.ctx, p.cancel = context.WithCancel(context.Background())
- return nil
-}
-
-func (p *Plugin2) Serve() chan error {
- errCh := make(chan error, 1)
-
- var err error
- p.driver, err = p.b.GetDriver("test")
- if err != nil {
- panic(err)
- }
-
- err = p.driver.Subscribe("2", "foo")
- if err != nil {
- panic(err)
- }
-
- go func() {
- for {
- msg, err := p.driver.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
- errCh <- err
- return
- }
-
- if msg == nil {
- continue
- }
-
- p.log.Info(fmt.Sprintf("%s: %s", Plugin2Name, *msg))
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin2) Stop() error {
- _ = p.driver.Unsubscribe("2", "foo")
- p.cancel()
- return nil
-}
-
-func (p *Plugin2) Name() string {
- return Plugin2Name
-}
diff --git a/tests/plugins/broadcast/plugins/plugin3.go b/tests/plugins/broadcast/plugins/plugin3.go
deleted file mode 100644
index 2f416d2e..00000000
--- a/tests/plugins/broadcast/plugins/plugin3.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package plugins
-
-import (
- "context"
- "fmt"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const Plugin3Name = "plugin3"
-
-type Plugin3 struct {
- log logger.Logger
- b broadcast.Broadcaster
- driver pubsub.SubReader
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (p *Plugin3) Init(log logger.Logger, b broadcast.Broadcaster) error {
- p.log = log
- p.b = b
- p.ctx, p.cancel = context.WithCancel(context.Background())
- return nil
-}
-
-func (p *Plugin3) Serve() chan error {
- errCh := make(chan error, 1)
-
- var err error
- p.driver, err = p.b.GetDriver("test2")
- if err != nil {
- panic(err)
- }
-
- err = p.driver.Subscribe("3", "foo")
- if err != nil {
- panic(err)
- }
-
- go func() {
- for {
- msg, err := p.driver.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
- errCh <- err
- return
- }
-
- if msg == nil {
- continue
- }
-
- p.log.Info(fmt.Sprintf("%s: %s", Plugin3Name, *msg))
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin3) Stop() error {
- _ = p.driver.Unsubscribe("3", "foo")
- p.cancel()
- return nil
-}
-
-func (p *Plugin3) Name() string {
- return Plugin3Name
-}
diff --git a/tests/plugins/broadcast/plugins/plugin4.go b/tests/plugins/broadcast/plugins/plugin4.go
deleted file mode 100644
index e2209648..00000000
--- a/tests/plugins/broadcast/plugins/plugin4.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package plugins
-
-import (
- "context"
- "fmt"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const Plugin4Name = "plugin4"
-
-type Plugin4 struct {
- log logger.Logger
- b broadcast.Broadcaster
- driver pubsub.SubReader
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (p *Plugin4) Init(log logger.Logger, b broadcast.Broadcaster) error {
- p.log = log
- p.b = b
- p.ctx, p.cancel = context.WithCancel(context.Background())
- return nil
-}
-
-func (p *Plugin4) Serve() chan error {
- errCh := make(chan error, 1)
-
- var err error
- p.driver, err = p.b.GetDriver("test3")
- if err != nil {
- panic(err)
- }
-
- err = p.driver.Subscribe("4", "foo")
- if err != nil {
- panic(err)
- }
-
- go func() {
- for {
- msg, err := p.driver.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
- errCh <- err
- return
- }
-
- if msg == nil {
- continue
- }
-
- p.log.Info(fmt.Sprintf("%s: %s", Plugin4Name, *msg))
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin4) Stop() error {
- _ = p.driver.Unsubscribe("4", "foo")
- p.cancel()
- return nil
-}
-
-func (p *Plugin4) Name() string {
- return Plugin4Name
-}
diff --git a/tests/plugins/broadcast/plugins/plugin5.go b/tests/plugins/broadcast/plugins/plugin5.go
deleted file mode 100644
index 122046b8..00000000
--- a/tests/plugins/broadcast/plugins/plugin5.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package plugins
-
-import (
- "context"
- "fmt"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const Plugin5Name = "plugin5"
-
-type Plugin5 struct {
- log logger.Logger
- b broadcast.Broadcaster
- driver pubsub.SubReader
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (p *Plugin5) Init(log logger.Logger, b broadcast.Broadcaster) error {
- p.log = log
- p.b = b
- p.ctx, p.cancel = context.WithCancel(context.Background())
- return nil
-}
-
-func (p *Plugin5) Serve() chan error {
- errCh := make(chan error, 1)
-
- var err error
- p.driver, err = p.b.GetDriver("test4")
- if err != nil {
- panic(err)
- }
-
- err = p.driver.Subscribe("5", "foo")
- if err != nil {
- panic(err)
- }
-
- go func() {
- for {
- msg, err := p.driver.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
- errCh <- err
- return
- }
-
- if msg == nil {
- continue
- }
-
- p.log.Info(fmt.Sprintf("%s: %s", Plugin5Name, *msg))
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin5) Stop() error {
- _ = p.driver.Unsubscribe("5", "foo")
- p.cancel()
- return nil
-}
-
-func (p *Plugin5) Name() string {
- return Plugin5Name
-}
diff --git a/tests/plugins/broadcast/plugins/plugin6.go b/tests/plugins/broadcast/plugins/plugin6.go
deleted file mode 100644
index 6ace0a79..00000000
--- a/tests/plugins/broadcast/plugins/plugin6.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package plugins
-
-import (
- "context"
- "fmt"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/common/pubsub"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/logger"
-)
-
-const Plugin6Name = "plugin6"
-
-type Plugin6 struct {
- log logger.Logger
- b broadcast.Broadcaster
- driver pubsub.SubReader
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (p *Plugin6) Init(log logger.Logger, b broadcast.Broadcaster) error {
- p.log = log
- p.b = b
- p.ctx, p.cancel = context.WithCancel(context.Background())
- return nil
-}
-
-func (p *Plugin6) Serve() chan error {
- errCh := make(chan error, 1)
-
- var err error
- p.driver, err = p.b.GetDriver("test")
- if err != nil {
- panic(err)
- }
-
- err = p.driver.Subscribe("6", "foo")
- if err != nil {
- panic(err)
- }
-
- go func() {
- for {
- msg, err := p.driver.Next(p.ctx)
- if err != nil {
- if errors.Is(errors.TimeOut, err) {
- return
- }
- errCh <- err
- return
- }
-
- if msg == nil {
- continue
- }
-
- p.log.Info(fmt.Sprintf("%s: %s", Plugin6Name, *msg))
- }
- }()
-
- return errCh
-}
-
-func (p *Plugin6) Stop() error {
- _ = p.driver.Unsubscribe("6", "foo")
- p.cancel()
- return nil
-}
-
-func (p *Plugin6) Name() string {
- return Plugin6Name
-}
diff --git a/tests/plugins/config/config_test.go b/tests/plugins/config/config_test.go
deleted file mode 100755
index 87ab1eaa..00000000
--- a/tests/plugins/config/config_test.go
+++ /dev/null
@@ -1,272 +0,0 @@
-package config
-
-import (
- "os"
- "os/signal"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-func TestViperProvider_Init(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- vp := &config.Viper{}
- vp.Path = "configs/.rr.yaml"
- vp.Prefix = "rr"
- vp.Flags = nil
-
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- tt := time.NewTicker(time.Second * 2)
- defer tt.Stop()
-
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- assert.NoError(t, er)
- return
- case <-tt.C:
- assert.NoError(t, container.Stop())
- return
- }
- }
-}
-
-func TestConfigOverwriteFail(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(false), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- vp := &config.Viper{}
- vp.Path = "configs/.rr.yaml"
- vp.Prefix = "rr"
- vp.Flags = []string{"rpc.listen=tcp//not_exist"}
-
- err = container.RegisterAll(
- &logger.ZapLogger{},
- &rpc.Plugin{},
- vp,
- &Foo2{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- assert.Error(t, err)
-}
-
-func TestConfigOverwriteValid(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(false), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- vp := &config.Viper{}
- vp.Path = "configs/.rr.yaml"
- vp.Prefix = "rr"
- vp.Flags = []string{"rpc.listen=tcp://127.0.0.1:36643"}
-
- err = container.RegisterAll(
- &logger.ZapLogger{},
- &rpc.Plugin{},
- vp,
- &Foo2{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- assert.NoError(t, err)
-
- errCh, err := container.Serve()
- assert.NoError(t, err)
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- tt := time.NewTicker(time.Second * 3)
- defer tt.Stop()
-
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- assert.NoError(t, er)
- return
- case <-tt.C:
- assert.NoError(t, container.Stop())
- return
- }
- }
-}
-
-func TestConfigEnvVariables(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(false), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- err = os.Setenv("SUPER_RPC_ENV", "tcp://127.0.0.1:36643")
- assert.NoError(t, err)
-
- vp := &config.Viper{}
- vp.Path = "configs/.rr-env.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- &logger.ZapLogger{},
- &rpc.Plugin{},
- vp,
- &Foo2{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- assert.NoError(t, err)
-
- errCh, err := container.Serve()
- assert.NoError(t, err)
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- tt := time.NewTicker(time.Second * 3)
- defer tt.Stop()
-
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- assert.NoError(t, er)
- return
- case <-tt.C:
- assert.NoError(t, container.Stop())
- return
- }
- }
-}
-
-func TestConfigEnvVariablesFail(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(false), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- err = os.Setenv("SUPER_RPC_ENV", "tcp://127.0.0.1:6065")
- assert.NoError(t, err)
-
- vp := &config.Viper{}
- vp.Path = "configs/.rr-env.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- &logger.ZapLogger{},
- &rpc.Plugin{},
- vp,
- &Foo2{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- assert.NoError(t, err)
-
- _, err = container.Serve()
- assert.Error(t, err)
-}
-
-func TestConfigProvider_GeneralSection(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- vp := &config.Viper{}
- vp.Path = "configs/.rr.yaml"
- vp.Prefix = "rr"
- vp.Flags = nil
- vp.CommonConfig = &config.General{GracefulTimeout: time.Second * 10}
-
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo3{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- tt := time.NewTicker(time.Second * 2)
- defer tt.Stop()
-
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- assert.NoError(t, er)
- return
- case <-tt.C:
- assert.NoError(t, container.Stop())
- return
- }
- }
-}
diff --git a/tests/plugins/config/configs/.rr-env.yaml b/tests/plugins/config/configs/.rr-env.yaml
deleted file mode 100755
index 3cacb5d0..00000000
--- a/tests/plugins/config/configs/.rr-env.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: ${SUPER_RPC_ENV}
-
-logs:
- mode: development
- level: error
-
-reload:
- interval: 1s
- patterns: [ ".php" ]
- services:
- http:
- recursive: true
- ignore: [ "vendor" ]
- patterns: [ ".php", ".go",".md", ]
- dirs: [ "." ]
- jobs:
- recursive: false
- ignore: [ "service/metrics" ]
- dirs: [ "./jobs" ]
- rpc:
- recursive: true
- patterns: [ ".json" ]
- dirs: [ "" ]
diff --git a/tests/plugins/config/configs/.rr.yaml b/tests/plugins/config/configs/.rr.yaml
deleted file mode 100755
index 575cdd33..00000000
--- a/tests/plugins/config/configs/.rr.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6060
-
-logs:
- mode: development
- level: error
-
-reload:
- interval: 1s
- patterns: [".php"]
- services:
- http:
- recursive: true
- ignore: ["vendor"]
- patterns: [".php", ".go",".md",]
- dirs: ["."]
- jobs:
- recursive: false
- ignore: ["service/metrics"]
- dirs: ["./jobs"]
- rpc:
- recursive: true
- patterns: [".json"]
- dirs: [""]
diff --git a/tests/plugins/config/plugin1.go b/tests/plugins/config/plugin1.go
deleted file mode 100755
index 08a48a4f..00000000
--- a/tests/plugins/config/plugin1.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package config
-
-import (
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-type AllConfig struct {
- RPC struct {
- Listen string `mapstructure:"listen"`
- } `mapstructure:"rpc"`
- Reload struct {
- Enabled bool `mapstructure:"enabled"`
- Interval string `mapstructure:"interval"`
- Patterns []string `mapstructure:"patterns"`
- Services struct {
- HTTP struct {
- Recursive bool `mapstructure:"recursive"`
- Ignore []string `mapstructure:"ignore"`
- Patterns []string `mapstructure:"patterns"`
- Dirs []string `mapstructure:"dirs"`
- } `mapstructure:"http"`
- Jobs struct {
- Recursive bool `mapstructure:"recursive"`
- Ignore []string `mapstructure:"ignore"`
- Dirs []string `mapstructure:"dirs"`
- } `mapstructure:"jobs"`
- RPC struct {
- Recursive bool `mapstructure:"recursive"`
- Patterns []string `mapstructure:"patterns"`
- Dirs []string `mapstructure:"dirs"`
- } `mapstructure:"rpc"`
- } `mapstructure:"services"`
- } `mapstructure:"reload"`
-}
-
-// ReloadConfig is a Reload configuration point.
-type ReloadConfig struct {
- Interval time.Duration
- Patterns []string
- Services map[string]ServiceConfig
-}
-
-type ServiceConfig struct {
- Enabled bool
- Recursive bool
- Patterns []string
- Dirs []string
- Ignore []string
-}
-
-type Foo struct {
- configProvider config.Configurer
-}
-
-// Depends on S2 and DB (S3 in the current case)
-func (f *Foo) Init(p config.Configurer) error {
- f.configProvider = p
- return nil
-}
-
-func (f *Foo) Serve() chan error {
- const op = errors.Op("foo_plugin_serve")
- errCh := make(chan error, 1)
-
- r := &ReloadConfig{}
- err := f.configProvider.UnmarshalKey("reload", r)
- if err != nil {
- errCh <- err
- }
-
- if len(r.Patterns) == 0 {
- errCh <- errors.E(op, errors.Str("should be at least one pattern, but got 0"))
- return errCh
- }
-
- var allCfg AllConfig
- err = f.configProvider.Unmarshal(&allCfg)
- if err != nil {
- errCh <- errors.E(op, errors.Str("should be at least one pattern, but got 0"))
- return errCh
- }
-
- if allCfg.RPC.Listen != "tcp://127.0.0.1:6060" {
- errCh <- errors.E(op, errors.Str("RPC.Listen should be parsed"))
- return errCh
- }
-
- return errCh
-}
-
-func (f *Foo) Stop() error {
- return nil
-}
diff --git a/tests/plugins/config/plugin2.go b/tests/plugins/config/plugin2.go
deleted file mode 100755
index 8c6f36c1..00000000
--- a/tests/plugins/config/plugin2.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package config
-
-import (
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-type Foo2 struct {
- configProvider config.Configurer
-}
-
-// Depends on S2 and DB (S3 in the current case)
-func (f *Foo2) Init(p config.Configurer) error {
- f.configProvider = p
- return nil
-}
-
-func (f *Foo2) Serve() chan error {
- const op = errors.Op("foo_plugin_serve")
- errCh := make(chan error, 1)
-
- r := &ReloadConfig{}
- err := f.configProvider.UnmarshalKey("reload", r)
- if err != nil {
- errCh <- err
- }
-
- if len(r.Patterns) == 0 {
- errCh <- errors.E(op, errors.Str("should be at least one pattern, but got 0"))
- return errCh
- }
-
- var allCfg AllConfig
- err = f.configProvider.Unmarshal(&allCfg)
- if err != nil {
- errCh <- errors.E(op, errors.Str("should be at least one pattern, but got 0"))
- return errCh
- }
-
- if allCfg.RPC.Listen != "tcp://127.0.0.1:36643" {
- errCh <- errors.E(op, errors.Str("RPC.Listen should be overwritten"))
- return errCh
- }
-
- return errCh
-}
-
-func (f *Foo2) Stop() error {
- return nil
-}
diff --git a/tests/plugins/config/plugin3.go b/tests/plugins/config/plugin3.go
deleted file mode 100755
index 41b79259..00000000
--- a/tests/plugins/config/plugin3.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package config
-
-import (
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-type Foo3 struct {
- configProvider config.Configurer
-}
-
-// Depends on S2 and DB (S3 in the current case)
-func (f *Foo3) Init(p config.Configurer) error {
- f.configProvider = p
- return nil
-}
-
-func (f *Foo3) Serve() chan error {
- const op = errors.Op("foo_plugin_serve")
- errCh := make(chan error, 1)
-
- if f.configProvider.GetCommonConfig().GracefulTimeout != time.Second*10 {
- errCh <- errors.E(op, errors.Str("GracefulTimeout should be eq to 10 seconds"))
- return errCh
- }
-
- return errCh
-}
-
-func (f *Foo3) Stop() error {
- return nil
-}
diff --git a/tests/plugins/grpc/configs/.rr-grpc-init.yaml b/tests/plugins/grpc/configs/.rr-grpc-init.yaml
deleted file mode 100644
index b743a766..00000000
--- a/tests/plugins/grpc/configs/.rr-grpc-init.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-rpc:
- listen: "tcp://127.0.0.1:6001"
-
-server:
- command: "php ../../psr-worker-bench.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-# GRPC service configuration
-grpc:
- # socket to listen
- listen: "tcp://localhost:9001"
-
- # proto root file
- proto: "configs/test.proto"
-
- # max send limit (MB)
- max_send_msg_size: 50
-
- # max receive limit (MB)
- max_recv_msg_size: 50
-
- # MaxConnectionIdle is a duration for the amount of time after which an
- # idle connection would be closed by sending a GoAway. Idleness duration is
- # defined since the most recent time the number of outstanding RPCs became
- # zero or the connection establishment.
- max_connection_idle: 0s
-
- # MaxConnectionAge is a duration for the maximum amount of time a
- # connection may exist before it will be closed by sending a GoAway. A
- # random jitter of +/-10% will be added to MaxConnectionAge to spread out
- # connection storms.
- max_connection_age: 0s
-
- # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
- # which the connection will be forcibly closed.
- max_connection_age_grace: 0s
-
- # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
- # which the connection will be forcibly closed.
- max_concurrent_streams: 10
-
- # After a duration of this time if the server doesn't see any activity it
- # pings the client to see if the transport is still alive.
- # If set below 1s, a minimum value of 1s will be used instead.
- ping_time: 1s
-
- # After having pinged for keepalive check, the server waits for a duration
- # of Timeout and if no activity is seen even after that the connection is
- # closed.
- timeout: 200s
-
- # Usual workers pool configuration
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60
diff --git a/tests/plugins/grpc/configs/external.proto b/tests/plugins/grpc/configs/external.proto
deleted file mode 100644
index 2bbe806e..00000000
--- a/tests/plugins/grpc/configs/external.proto
+++ /dev/null
@@ -1,19 +0,0 @@
-syntax = "proto3";
-package tests;
-
-service External {
- rpc Echo (Ping) returns (Pong) {
- }
-
- rpc Empty (EmptyMessage) returns (EmptyMessage) {
-
- }
-}
-
-message Ping {
- int64 value = 1;
-}
-
-message Pong {
- int64 value = 1;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/configs/test.pb.go b/tests/plugins/grpc/configs/test.pb.go
deleted file mode 100644
index 5f30ceb6..00000000
--- a/tests/plugins/grpc/configs/test.pb.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.27.1
-// protoc v3.17.3
-// source: test.proto
-
-package __
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type Message struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
-}
-
-func (x *Message) Reset() {
- *x = Message{}
- if protoimpl.UnsafeEnabled {
- mi := &file_test_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Message) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Message) ProtoMessage() {}
-
-func (x *Message) ProtoReflect() protoreflect.Message {
- mi := &file_test_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Message.ProtoReflect.Descriptor instead.
-func (*Message) Descriptor() ([]byte, []int) {
- return file_test_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Message) GetMsg() string {
- if x != nil {
- return x.Msg
- }
- return ""
-}
-
-type EmptyMessage struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *EmptyMessage) Reset() {
- *x = EmptyMessage{}
- if protoimpl.UnsafeEnabled {
- mi := &file_test_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EmptyMessage) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EmptyMessage) ProtoMessage() {}
-
-func (x *EmptyMessage) ProtoReflect() protoreflect.Message {
- mi := &file_test_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EmptyMessage.ProtoReflect.Descriptor instead.
-func (*EmptyMessage) Descriptor() ([]byte, []int) {
- return file_test_proto_rawDescGZIP(), []int{1}
-}
-
-type DetailsMessageForException struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Code uint64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
-}
-
-func (x *DetailsMessageForException) Reset() {
- *x = DetailsMessageForException{}
- if protoimpl.UnsafeEnabled {
- mi := &file_test_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DetailsMessageForException) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DetailsMessageForException) ProtoMessage() {}
-
-func (x *DetailsMessageForException) ProtoReflect() protoreflect.Message {
- mi := &file_test_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DetailsMessageForException.ProtoReflect.Descriptor instead.
-func (*DetailsMessageForException) Descriptor() ([]byte, []int) {
- return file_test_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *DetailsMessageForException) GetCode() uint64 {
- if x != nil {
- return x.Code
- }
- return 0
-}
-
-func (x *DetailsMessageForException) GetMessage() string {
- if x != nil {
- return x.Message
- }
- return ""
-}
-
-var File_test_proto protoreflect.FileDescriptor
-
-var file_test_proto_rawDesc = []byte{
- 0x0a, 0x0a, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x1b, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d,
- 0x73, 0x67, 0x22, 0x0e, 0x0a, 0x0c, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x22, 0x4a, 0x0a, 0x1a, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x45, 0x78, 0x63, 0x65, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04,
- 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xf6,
- 0x01, 0x0a, 0x04, 0x54, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x04, 0x45, 0x63, 0x68, 0x6f, 0x12,
- 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x1a, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73,
- 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x2d, 0x0a, 0x05, 0x54, 0x68, 0x72, 0x6f, 0x77, 0x12, 0x10,
- 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x1a, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x22, 0x00, 0x12, 0x2b, 0x0a, 0x03, 0x44, 0x69, 0x65, 0x12, 0x10, 0x2e, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x10, 0x2e,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22,
- 0x00, 0x12, 0x2c, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x2e, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x10, 0x2e, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12,
- 0x36, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x15, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x15,
- 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x4d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x42, 0x05, 0x5a, 0x03, 0x2e, 0x2f, 0x3b, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_test_proto_rawDescOnce sync.Once
- file_test_proto_rawDescData = file_test_proto_rawDesc
-)
-
-func file_test_proto_rawDescGZIP() []byte {
- file_test_proto_rawDescOnce.Do(func() {
- file_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_proto_rawDescData)
- })
- return file_test_proto_rawDescData
-}
-
-var file_test_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_test_proto_goTypes = []interface{}{
- (*Message)(nil), // 0: service.Message
- (*EmptyMessage)(nil), // 1: service.EmptyMessage
- (*DetailsMessageForException)(nil), // 2: service.DetailsMessageForException
-}
-var file_test_proto_depIdxs = []int32{
- 0, // 0: service.Test.Echo:input_type -> service.Message
- 0, // 1: service.Test.Throw:input_type -> service.Message
- 0, // 2: service.Test.Die:input_type -> service.Message
- 0, // 3: service.Test.Info:input_type -> service.Message
- 1, // 4: service.Test.Ping:input_type -> service.EmptyMessage
- 0, // 5: service.Test.Echo:output_type -> service.Message
- 0, // 6: service.Test.Throw:output_type -> service.Message
- 0, // 7: service.Test.Die:output_type -> service.Message
- 0, // 8: service.Test.Info:output_type -> service.Message
- 1, // 9: service.Test.Ping:output_type -> service.EmptyMessage
- 5, // [5:10] is the sub-list for method output_type
- 0, // [0:5] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_test_proto_init() }
-func file_test_proto_init() {
- if File_test_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Message); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_test_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EmptyMessage); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_test_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DetailsMessageForException); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_test_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_test_proto_goTypes,
- DependencyIndexes: file_test_proto_depIdxs,
- MessageInfos: file_test_proto_msgTypes,
- }.Build()
- File_test_proto = out.File
- file_test_proto_rawDesc = nil
- file_test_proto_goTypes = nil
- file_test_proto_depIdxs = nil
-}
diff --git a/tests/plugins/grpc/configs/test.proto b/tests/plugins/grpc/configs/test.proto
deleted file mode 100644
index 2e1c90a9..00000000
--- a/tests/plugins/grpc/configs/test.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-syntax = "proto3";
-
-package service;
-option go_package = "./;";
-
-service Test {
- rpc Echo (Message) returns (Message) {
- }
-
- rpc Throw (Message) returns (Message) {
- }
-
- rpc Die (Message) returns (Message) {
- }
-
- rpc Info (Message) returns (Message) {
- }
-
- rpc Ping (EmptyMessage) returns (EmptyMessage) {
- }
-}
-
-message Message {
- string msg = 1;
-}
-
-message EmptyMessage {
-}
-
-message DetailsMessageForException {
- uint64 code = 1;
- string message = 2;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/grpc_plugin_test.go b/tests/plugins/grpc/grpc_plugin_test.go
deleted file mode 100644
index b92282f7..00000000
--- a/tests/plugins/grpc/grpc_plugin_test.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package grpc_test
-
-import (
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/grpc"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/stretchr/testify/assert"
-)
-
-func TestGrpcInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-grpc-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &grpc.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &informer.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
-
- wg.Wait()
-}
diff --git a/tests/plugins/grpc/php_server/.rr.yaml b/tests/plugins/grpc/php_server/.rr.yaml
deleted file mode 100644
index cc4a9300..00000000
--- a/tests/plugins/grpc/php_server/.rr.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-grpc:
- listen: "tcp://:9001"
- proto: "service.proto"
- tls:
- key: "server.key"
- cert: "server.crt"
- workers:
- command: "php worker.php"
- pool:
- numWorkers: 4
-
-metrics:
- address: localhost:2112
-
-limit:
- interval: 1
- services:
- grpc:
- maxMemory: 100
- TTL: 0
- idleTTL: 0
- execTTL: 60 \ No newline at end of file
diff --git a/tests/plugins/grpc/php_server/composer.json b/tests/plugins/grpc/php_server/composer.json
deleted file mode 100644
index b6303291..00000000
--- a/tests/plugins/grpc/php_server/composer.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "name": "app/example-grpc-server",
- "description": "Example GRPC Server",
- "repositories": [
- {
- "type": "path",
- "url": "../.."
- }
- ],
- "require": {
- "spiral/php-grpc": "*"
- },
- "require-dev": {
- "grpc/grpc": "^1.36"
- },
- "autoload": {
- "psr-4": {
- "": "src"
- }
- },
- "minimum-stability": "dev",
- "prefer-stable": true
-}
diff --git a/tests/plugins/grpc/php_server/server.crt b/tests/plugins/grpc/php_server/server.crt
deleted file mode 100644
index 24d67fd7..00000000
--- a/tests/plugins/grpc/php_server/server.crt
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICTTCCAdOgAwIBAgIJAOKyUd+llTRKMAoGCCqGSM49BAMCMGMxCzAJBgNVBAYT
-AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv
-MRMwEQYDVQQKDApSb2FkUnVubmVyMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTgw
-OTMwMTMzNDUzWhcNMjgwOTI3MTMzNDUzWjBjMQswCQYDVQQGEwJVUzETMBEGA1UE
-CAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzETMBEGA1UECgwK
-Um9hZFJ1bm5lcjESMBAGA1UEAwwJbG9jYWxob3N0MHYwEAYHKoZIzj0CAQYFK4EE
-ACIDYgAEVnbShsM+l5RR3wfWWmGhzuFGwNzKCk7i9xyobDIyBUxG/UUSfj7KKlUX
-puDnDEtF5xXcepl744CyIAYFLOXHb5WqI4jCOzG0o9f/00QQ4bQudJOdbqV910QF
-C2vb7Fxro1MwUTAdBgNVHQ4EFgQU9xUexnbB6ORKayA7Pfjzs33otsAwHwYDVR0j
-BBgwFoAU9xUexnbB6ORKayA7Pfjzs33otsAwDwYDVR0TAQH/BAUwAwEB/zAKBggq
-hkjOPQQDAgNoADBlAjEAue3HhR/MUhxoa9tSDBtOJT3FYbDQswrsdqBTz97CGKst
-e7XeZ3HMEvEXy0hGGEMhAjAqcD/4k9vViVppgWFtkk6+NFbm+Kw/QeeAiH5FgFSj
-8xQcb+b7nPwNLp3JOkXkVd4=
------END CERTIFICATE-----
diff --git a/tests/plugins/grpc/php_server/server.key b/tests/plugins/grpc/php_server/server.key
deleted file mode 100644
index 7501dd46..00000000
--- a/tests/plugins/grpc/php_server/server.key
+++ /dev/null
@@ -1,9 +0,0 @@
------BEGIN EC PARAMETERS-----
-BgUrgQQAIg==
------END EC PARAMETERS-----
------BEGIN EC PRIVATE KEY-----
-MIGkAgEBBDCQP8utxNbHR6xZOLAJgUhn88r6IrPqmN0MsgGJM/jePB+T9UhkmIU8
-PMm2HeScbcugBwYFK4EEACKhZANiAARWdtKGwz6XlFHfB9ZaYaHO4UbA3MoKTuL3
-HKhsMjIFTEb9RRJ+PsoqVRem4OcMS0XnFdx6mXvjgLIgBgUs5cdvlaojiMI7MbSj
-1//TRBDhtC50k51upX3XRAULa9vsXGs=
------END EC PRIVATE KEY-----
diff --git a/tests/plugins/grpc/php_server/service.proto b/tests/plugins/grpc/php_server/service.proto
deleted file mode 100644
index 60ff84a9..00000000
--- a/tests/plugins/grpc/php_server/service.proto
+++ /dev/null
@@ -1,11 +0,0 @@
-syntax = "proto3";
-package service;
-
-service Echo {
- rpc Ping (Message) returns (Message) {
- }
-}
-
-message Message {
- string msg = 1;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/php_server/src/EchoService.php b/tests/plugins/grpc/php_server/src/EchoService.php
deleted file mode 100644
index c2707811..00000000
--- a/tests/plugins/grpc/php_server/src/EchoService.php
+++ /dev/null
@@ -1,17 +0,0 @@
-<?php
-/**
- * Sample GRPC PHP server.
- */
-
-use Spiral\GRPC\ContextInterface;
-use Service\EchoInterface;
-use Service\Message;
-
-class EchoService implements EchoInterface
-{
- public function Ping(ContextInterface $ctx, Message $in): Message
- {
- $out = new Message();
- return $out->setMsg(strtoupper($in->getMsg()));
- }
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/php_server/src/GPBMetadata/Service.php b/tests/plugins/grpc/php_server/src/GPBMetadata/Service.php
deleted file mode 100644
index c1b65b21..00000000
--- a/tests/plugins/grpc/php_server/src/GPBMetadata/Service.php
+++ /dev/null
@@ -1,27 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: service.proto
-
-namespace GPBMetadata;
-
-class Service
-{
- public static $is_initialized = false;
-
- public static function initOnce() {
- $pool = \Google\Protobuf\Internal\DescriptorPool::getGeneratedPool();
-
- if (static::$is_initialized == true) {
- return;
- }
- $pool->internalAddGeneratedFile(hex2bin(
- "0a6e0a0d736572766963652e70726f746f12077365727669636522160a07" .
- "4d657373616765120b0a036d736718012001280932340a044563686f122c" .
- "0a0450696e6712102e736572766963652e4d6573736167651a102e736572" .
- "766963652e4d6573736167652200620670726f746f33"
- ));
-
- static::$is_initialized = true;
- }
-}
-
diff --git a/tests/plugins/grpc/php_server/src/Service/EchoInterface.php b/tests/plugins/grpc/php_server/src/Service/EchoInterface.php
deleted file mode 100644
index 5f336ace..00000000
--- a/tests/plugins/grpc/php_server/src/Service/EchoInterface.php
+++ /dev/null
@@ -1,22 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler (spiral/grpc). DO NOT EDIT!
-# source: service.proto
-
-namespace Service;
-
-use Spiral\GRPC;
-
-interface EchoInterface extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "service.Echo";
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param Message $in
- * @return Message
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function Ping(GRPC\ContextInterface $ctx, Message $in): Message;
-}
diff --git a/tests/plugins/grpc/php_server/src/Service/Message.php b/tests/plugins/grpc/php_server/src/Service/Message.php
deleted file mode 100644
index 6c40c879..00000000
--- a/tests/plugins/grpc/php_server/src/Service/Message.php
+++ /dev/null
@@ -1,58 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# source: service.proto
-
-namespace Service;
-
-use Google\Protobuf\Internal\GPBType;
-use Google\Protobuf\Internal\RepeatedField;
-use Google\Protobuf\Internal\GPBUtil;
-
-/**
- * Generated from protobuf message <code>service.Message</code>
- */
-class Message extends \Google\Protobuf\Internal\Message
-{
- /**
- * Generated from protobuf field <code>string msg = 1;</code>
- */
- private $msg = '';
-
- /**
- * Constructor.
- *
- * @param array $data {
- * Optional. Data for populating the Message object.
- *
- * @type string $msg
- * }
- */
- public function __construct($data = NULL) {
- \GPBMetadata\Service::initOnce();
- parent::__construct($data);
- }
-
- /**
- * Generated from protobuf field <code>string msg = 1;</code>
- * @return string
- */
- public function getMsg()
- {
- return $this->msg;
- }
-
- /**
- * Generated from protobuf field <code>string msg = 1;</code>
- * @param string $var
- * @return $this
- */
- public function setMsg($var)
- {
- GPBUtil::checkString($var, True);
- $this->msg = $var;
-
- return $this;
- }
-
-}
-
diff --git a/tests/plugins/grpc/php_server/worker-grpc.php b/tests/plugins/grpc/php_server/worker-grpc.php
deleted file mode 100644
index 683a2341..00000000
--- a/tests/plugins/grpc/php_server/worker-grpc.php
+++ /dev/null
@@ -1,26 +0,0 @@
-<?php
-/**
- * Sample GRPC PHP server.
- */
-
-use Service\EchoInterface;
-use Spiral\Goridge\StreamRelay;
-use Spiral\GRPC\Server;
-use Spiral\RoadRunner\Worker;
-
-require __DIR__ . '/vendor/autoload.php';
-
-$server = new Server(null, [
- 'debug' => false, // optional (default: false)
-]);
-
-$server->registerService(EchoInterface::class, new EchoService());
-
-$worker = \method_exists(Worker::class, 'create')
- // RoadRunner >= 2.x
- ? Worker::create()
- // RoadRunner 1.x
- : new Worker(new StreamRelay(STDIN, STDOUT))
-;
-
-$server->serve($worker);
diff --git a/tests/plugins/grpc/plugin_test.go b/tests/plugins/grpc/plugin_test.go
deleted file mode 100644
index cfbe0121..00000000
--- a/tests/plugins/grpc/plugin_test.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package grpc
-
-import (
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func init() {
- err := build()
- if err != nil {
- panic(err)
- }
-}
-
-func build() error {
- cmd := exec.Command("go", "build", "-o", "plugin", "../../../plugins/grpc/protoc_plugins/protoc-gen-php-grpc")
- return cmd.Run()
-}
-
-func protoc(t *testing.T, args []string) {
- cmd := exec.Command("protoc", "--plugin=protoc-gen-php-grpc=./plugin")
- cmd.Args = append(cmd.Args, args...)
- out, err := cmd.CombinedOutput()
-
- if len(out) > 0 || err != nil {
- t.Log("RUNNING: ", strings.Join(cmd.Args, " "))
- }
-
- if len(out) > 0 {
- t.Log(string(out))
- }
-
- if err != nil {
- t.Fatalf("protoc: %v", err)
- }
-}
-
-func Test_Simple(t *testing.T) {
- workdir, _ := os.Getwd()
- tmpdir, err := ioutil.TempDir("", "proto-test")
- require.NoError(t, err)
-
- defer func() {
- assert.NoError(t, os.RemoveAll(tmpdir))
- }()
-
- args := []string{
- "-Itestdata",
- "--php-grpc_out=" + tmpdir,
- "simple/simple.proto",
- }
-
- protoc(t, args)
-
- assertEqualFiles(
- t,
- workdir+"/testdata/simple/TestSimple/SimpleServiceInterface.php",
- tmpdir+"/TestSimple/SimpleServiceInterface.php",
- )
-}
-
-func Test_PhpNamespaceOption(t *testing.T) {
- workdir, _ := os.Getwd()
- tmpdir, err := ioutil.TempDir("", "proto-test")
- require.NoError(t, err)
-
- defer func() {
- assert.NoError(t, os.RemoveAll(tmpdir))
- }()
-
- args := []string{
- "-Itestdata",
- "--php-grpc_out=" + tmpdir,
- "php_namespace/service.proto",
- }
- protoc(t, args)
-
- assertEqualFiles(
- t,
- workdir+"/testdata/php_namespace/Test/CustomNamespace/ServiceInterface.php",
- tmpdir+"/Test/CustomNamespace/ServiceInterface.php",
- )
-}
-
-func Test_UseImportedMessage(t *testing.T) {
- workdir, _ := os.Getwd()
- tmpdir, err := ioutil.TempDir("", "proto-test")
- require.NoError(t, err)
-
- defer func() {
- assert.NoError(t, os.RemoveAll(tmpdir))
- }()
-
- args := []string{
- "-Itestdata",
- "--php-grpc_out=" + tmpdir,
- "import/service.proto",
- }
- protoc(t, args)
-
- assertEqualFiles(
- t,
- workdir+"/testdata/import/Import/ServiceInterface.php",
- tmpdir+"/Import/ServiceInterface.php",
- )
-}
-
-func Test_PhpNamespaceOptionInUse(t *testing.T) {
- workdir, _ := os.Getwd()
- tmpdir, err := ioutil.TempDir("", "proto-test")
- require.NoError(t, err)
-
- defer func() {
- assert.NoError(t, os.RemoveAll(tmpdir))
- }()
-
- args := []string{
- "-Itestdata",
- "--php-grpc_out=" + tmpdir,
- "import_custom/service.proto",
- }
- protoc(t, args)
-
- assertEqualFiles(
- t,
- workdir+"/testdata/import_custom/Test/CustomImport/ServiceInterface.php",
- tmpdir+"/Test/CustomImport/ServiceInterface.php",
- )
-}
-
-func Test_UseOfGoogleEmptyMessage(t *testing.T) {
- workdir, _ := os.Getwd()
- tmpdir, err := ioutil.TempDir("", "proto-test")
- require.NoError(t, err)
-
- defer func() {
- assert.NoError(t, os.RemoveAll(tmpdir))
- }()
-
- args := []string{
- "-Itestdata",
- "--php-grpc_out=" + tmpdir,
- "use_empty/service.proto",
- }
- protoc(t, args)
-
- assertEqualFiles(
- t,
- workdir+"/testdata/use_empty/Test/ServiceInterface.php",
- tmpdir+"/Test/ServiceInterface.php",
- )
-
- assert.NoError(t, os.RemoveAll("plugin"))
-}
-
-func assertEqualFiles(t *testing.T, original, generated string) {
- assert.FileExists(t, generated)
-
- originalData, err := ioutil.ReadFile(original)
- if err != nil {
- t.Fatal("Can't find original file for comparison")
- }
-
- generatedData, err := ioutil.ReadFile(generated)
- if err != nil {
- t.Fatal("Can't find generated file for comparison")
- }
-
- // every OS has a special boy
- r := strings.NewReplacer("\r\n", "", "\n", "")
- assert.Equal(t, r.Replace(string(originalData)), r.Replace(string(generatedData)))
-}
diff --git a/tests/plugins/grpc/testdata/import/Import/ServiceInterface.php b/tests/plugins/grpc/testdata/import/Import/ServiceInterface.php
deleted file mode 100644
index 13e58daf..00000000
--- a/tests/plugins/grpc/testdata/import/Import/ServiceInterface.php
+++ /dev/null
@@ -1,32 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler (spiral/php-grpc). DO NOT EDIT!
-# source: import/service.proto
-
-namespace Import;
-
-use Spiral\GRPC;
-use Import\Sub;
-
-interface ServiceInterface extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "import.Service";
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param Message $in
- * @return Message
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function SimpleMethod(GRPC\ContextInterface $ctx, Message $in): Message;
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param Sub\Message $in
- * @return Sub\Message
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function ImportMethod(GRPC\ContextInterface $ctx, Sub\Message $in): Sub\Message;
-}
diff --git a/tests/plugins/grpc/testdata/import/service.proto b/tests/plugins/grpc/testdata/import/service.proto
deleted file mode 100644
index 5d888f09..00000000
--- a/tests/plugins/grpc/testdata/import/service.proto
+++ /dev/null
@@ -1,17 +0,0 @@
-syntax = "proto3";
-
-package import;
-
-import "import/sub/message.proto";
-
-service Service {
- rpc SimpleMethod (Message) returns (Message) {
- }
-
- rpc ImportMethod (import.sub.Message) returns (import.sub.Message) {
- }
-}
-
-message Message {
- int64 id = 1;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/testdata/import/sub/message.proto b/tests/plugins/grpc/testdata/import/sub/message.proto
deleted file mode 100644
index 1db0313b..00000000
--- a/tests/plugins/grpc/testdata/import/sub/message.proto
+++ /dev/null
@@ -1,7 +0,0 @@
-syntax = "proto3";
-
-package import.sub;
-
-message Message {
- int64 id = 1;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/testdata/import_custom/Test/CustomImport/ServiceInterface.php b/tests/plugins/grpc/testdata/import_custom/Test/CustomImport/ServiceInterface.php
deleted file mode 100644
index b010ce4f..00000000
--- a/tests/plugins/grpc/testdata/import_custom/Test/CustomImport/ServiceInterface.php
+++ /dev/null
@@ -1,32 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler (spiral/php-grpc). DO NOT EDIT!
-# source: import_custom/service.proto
-
-namespace Test\CustomImport;
-
-use Spiral\GRPC;
-use Test\CustomImport\Message;
-
-interface ServiceInterface extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "import.Service";
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param Message $in
- * @return Message
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function SimpleMethod(GRPC\ContextInterface $ctx, Message $in): Message;
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param Message\Message $in
- * @return Message\Message
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function ImportMethod(GRPC\ContextInterface $ctx, Message\Message $in): Message\Message;
-}
diff --git a/tests/plugins/grpc/testdata/import_custom/service.proto b/tests/plugins/grpc/testdata/import_custom/service.proto
deleted file mode 100644
index 872aaae3..00000000
--- a/tests/plugins/grpc/testdata/import_custom/service.proto
+++ /dev/null
@@ -1,19 +0,0 @@
-syntax = "proto3";
-
-package import;
-
-option php_namespace = "Test\\CustomImport";
-
-import "import_custom/sub/message.proto";
-
-service Service {
- rpc SimpleMethod (Message) returns (Message) {
- }
-
- rpc ImportMethod (import.sub.Message) returns (import.sub.Message) {
- }
-}
-
-message Message {
- int64 id = 1;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/testdata/import_custom/sub/message.proto b/tests/plugins/grpc/testdata/import_custom/sub/message.proto
deleted file mode 100644
index 5d722ca3..00000000
--- a/tests/plugins/grpc/testdata/import_custom/sub/message.proto
+++ /dev/null
@@ -1,14 +0,0 @@
-syntax = "proto3";
-
-package import.sub;
-option php_namespace = "Test\\CustomImport\\Message";
-
-
-service Service {
- rpc AnotherMethod (Message) returns (Message) {
- }
-}
-
-message Message {
- int64 id = 1;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/testdata/php_namespace/Test/CustomNamespace/ServiceInterface.php b/tests/plugins/grpc/testdata/php_namespace/Test/CustomNamespace/ServiceInterface.php
deleted file mode 100644
index 2090ba97..00000000
--- a/tests/plugins/grpc/testdata/php_namespace/Test/CustomNamespace/ServiceInterface.php
+++ /dev/null
@@ -1,22 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler (spiral/php-grpc). DO NOT EDIT!
-# source: php_namespace/service.proto
-
-namespace Test\CustomNamespace;
-
-use Spiral\GRPC;
-
-interface ServiceInterface extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "testPhpNamespace.Service";
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param SimpleMessage $in
- * @return SimpleMessage
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function SimpleMethod(GRPC\ContextInterface $ctx, SimpleMessage $in): SimpleMessage;
-}
diff --git a/tests/plugins/grpc/testdata/php_namespace/service.proto b/tests/plugins/grpc/testdata/php_namespace/service.proto
deleted file mode 100644
index a3bfa3c0..00000000
--- a/tests/plugins/grpc/testdata/php_namespace/service.proto
+++ /dev/null
@@ -1,15 +0,0 @@
-syntax = "proto3";
-
-package testPhpNamespace;
-
-option php_namespace = "Test\\CustomNamespace";
-
-service Service {
- rpc SimpleMethod (SimpleMessage) returns (SimpleMessage) {
- }
-}
-
-message SimpleMessage {
- int32 id = 1;
- string name = 2;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/testdata/simple/TestSimple/SimpleServiceInterface.php b/tests/plugins/grpc/testdata/simple/TestSimple/SimpleServiceInterface.php
deleted file mode 100644
index f9e84bf7..00000000
--- a/tests/plugins/grpc/testdata/simple/TestSimple/SimpleServiceInterface.php
+++ /dev/null
@@ -1,22 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler (spiral/php-grpc). DO NOT EDIT!
-# source: simple/simple.proto
-
-namespace TestSimple;
-
-use Spiral\GRPC;
-
-interface SimpleServiceInterface extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "testSimple.SimpleService";
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param SimpleMessage $in
- * @return SimpleMessage
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function SimpleMethod(GRPC\ContextInterface $ctx, SimpleMessage $in): SimpleMessage;
-}
diff --git a/tests/plugins/grpc/testdata/simple/simple.proto b/tests/plugins/grpc/testdata/simple/simple.proto
deleted file mode 100644
index aca3c1d9..00000000
--- a/tests/plugins/grpc/testdata/simple/simple.proto
+++ /dev/null
@@ -1,13 +0,0 @@
-syntax = "proto3";
-
-package testSimple;
-
-service SimpleService {
- rpc SimpleMethod (SimpleMessage) returns (SimpleMessage) {
- }
-}
-
-message SimpleMessage {
- int32 id = 1;
- string name = 2;
-} \ No newline at end of file
diff --git a/tests/plugins/grpc/testdata/use_empty/Test/ServiceInterface.php b/tests/plugins/grpc/testdata/use_empty/Test/ServiceInterface.php
deleted file mode 100644
index fe6d345a..00000000
--- a/tests/plugins/grpc/testdata/use_empty/Test/ServiceInterface.php
+++ /dev/null
@@ -1,23 +0,0 @@
-<?php
-# Generated by the protocol buffer compiler (spiral/php-grpc). DO NOT EDIT!
-# source: use_empty/service.proto
-
-namespace Test;
-
-use Spiral\GRPC;
-use Google\Protobuf;
-
-interface ServiceInterface extends GRPC\ServiceInterface
-{
- // GRPC specific service name.
- public const NAME = "test.Service";
-
- /**
- * @param GRPC\ContextInterface $ctx
- * @param Protobuf\GPBEmpty $in
- * @return Protobuf\GPBEmpty
- *
- * @throws GRPC\Exception\InvokeException
- */
- public function Test(GRPC\ContextInterface $ctx, Protobuf\GPBEmpty $in): Protobuf\GPBEmpty;
-}
diff --git a/tests/plugins/grpc/testdata/use_empty/service.proto b/tests/plugins/grpc/testdata/use_empty/service.proto
deleted file mode 100644
index 8c68d8d3..00000000
--- a/tests/plugins/grpc/testdata/use_empty/service.proto
+++ /dev/null
@@ -1,10 +0,0 @@
-syntax = "proto3";
-
-package test;
-
-import "google/protobuf/empty.proto";
-
-service Service {
- rpc Test (google.protobuf.Empty) returns (google.protobuf.Empty) {
- }
-} \ No newline at end of file
diff --git a/tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml b/tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml
deleted file mode 100644
index 73e4f6e6..00000000
--- a/tests/plugins/gzip/configs/.rr-http-middlewareNotExist.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-server:
- command: "php ../../psr-worker.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:18103
- max_request_size: 1024
- middleware: [ "gzip", "foo" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/gzip/configs/.rr-http-withGzip.yaml b/tests/plugins/gzip/configs/.rr-http-withGzip.yaml
deleted file mode 100644
index c67e207b..00000000
--- a/tests/plugins/gzip/configs/.rr-http-withGzip.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-server:
- command: "php ../../psr-worker.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:18953
- max_request_size: 1024
- middleware: [ "gzip" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error \ No newline at end of file
diff --git a/tests/plugins/gzip/plugin_test.go b/tests/plugins/gzip/plugin_test.go
deleted file mode 100644
index 1c6ab887..00000000
--- a/tests/plugins/gzip/plugin_test.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package gzip
-
-import (
- "net/http"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/gzip"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestGzipPlugin(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-withGzip.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
- t.Run("GzipCheckHeader", headerCheck)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func headerCheck(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:18953", nil)
- assert.NoError(t, err)
- client := &http.Client{
- Transport: &http.Transport{
- DisableCompression: false,
- },
- }
-
- r, err := client.Do(req)
- assert.NoError(t, err)
- assert.True(t, r.Uncompressed)
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestMiddlewareNotExist(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-middlewareNotExist.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Warn("requested middleware does not exist", "requested", "foo").MinTimes(1)
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- stopCh <- struct{}{}
- wg.Wait()
-}
diff --git a/tests/plugins/headers/configs/.rr-cors-headers.yaml b/tests/plugins/headers/configs/.rr-cors-headers.yaml
deleted file mode 100644
index b4e960f1..00000000
--- a/tests/plugins/headers/configs/.rr-cors-headers.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-server:
- command: "php ../../http/client.php headers pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:22855
- max_request_size: 1024
- middleware: [ "headers" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- # Additional HTTP headers and CORS control.
- headers:
- cors:
- allowed_origin: "*"
- allowed_headers: "*"
- allowed_methods: "GET,POST,PUT,DELETE"
- allow_credentials: true
- exposed_headers: "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma"
- max_age: 600
- request:
- input: "custom-header"
- response:
- output: "output-header"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/headers/configs/.rr-headers-init.yaml b/tests/plugins/headers/configs/.rr-headers-init.yaml
deleted file mode 100644
index b2781f2b..00000000
--- a/tests/plugins/headers/configs/.rr-headers-init.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:33453
- max_request_size: 1024
- middleware: [ "headers" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- # Additional HTTP headers and CORS control.
- headers:
- cors:
- allowed_origin: "*"
- allowed_headers: "*"
- allowed_methods: "GET,POST,PUT,DELETE"
- allow_credentials: true
- exposed_headers: "Cache-Control,Content-Language,Content-Type,Expires,Last-Modified,Pragma"
- max_age: 600
- request:
- Example-Request-Header: "Value"
- response:
- X-Powered-By: "RoadRunner"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/headers/configs/.rr-req-headers.yaml b/tests/plugins/headers/configs/.rr-req-headers.yaml
deleted file mode 100644
index a2b97171..00000000
--- a/tests/plugins/headers/configs/.rr-req-headers.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-server:
- command: "php ../../http/client.php header pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:22655
- max_request_size: 1024
- middleware: [ "headers" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- # Additional HTTP headers and CORS control.
- headers:
- request:
- input: "custom-header"
- response:
- output: "output-header"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/headers/configs/.rr-res-headers.yaml b/tests/plugins/headers/configs/.rr-res-headers.yaml
deleted file mode 100644
index 4448343c..00000000
--- a/tests/plugins/headers/configs/.rr-res-headers.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-server:
- command: "php ../../http/client.php header pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:22455
- max_request_size: 1024
- middleware: [ "headers" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- # Additional HTTP headers and CORS control.
- headers:
- request:
- input: "custom-header"
- response:
- output: "output-header"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/headers/headers_plugin_test.go b/tests/plugins/headers/headers_plugin_test.go
deleted file mode 100644
index a03a3c34..00000000
--- a/tests/plugins/headers/headers_plugin_test.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package headers
-
-import (
- "io/ioutil"
- "net/http"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/headers"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/stretchr/testify/assert"
-)
-
-func TestHeadersInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-headers-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &headers.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestRequestHeaders(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-req-headers.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &headers.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("RequestHeaders", reqHeaders)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func reqHeaders(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:22655?hello=value", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "CUSTOM-HEADER", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestResponseHeaders(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-res-headers.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &headers.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("ResponseHeaders", resHeaders)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func resHeaders(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:22455?hello=value", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
-
- assert.Equal(t, "output-header", r.Header.Get("output"))
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "CUSTOM-HEADER", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestCORSHeaders(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-cors-headers.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &headers.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("CORSHeaders", corsHeaders)
- t.Run("CORSHeadersPass", corsHeadersPass)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func corsHeadersPass(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:22855", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
-
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Headers"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Origin"))
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
-
- _, err = ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func corsHeaders(t *testing.T) {
- req, err := http.NewRequest("OPTIONS", "http://127.0.0.1:22855", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
-
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Headers"))
- assert.Equal(t, "GET,POST,PUT,DELETE", r.Header.Get("Access-Control-Allow-Methods"))
- assert.Equal(t, "*", r.Header.Get("Access-Control-Allow-Origin"))
- assert.Equal(t, "600", r.Header.Get("Access-Control-Max-Age"))
- assert.Equal(t, "true", r.Header.Get("Access-Control-Allow-Credentials"))
-
- _, err = ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
diff --git a/tests/plugins/http/attributes_test.go b/tests/plugins/http/attributes_test.go
deleted file mode 100644
index 69200a30..00000000
--- a/tests/plugins/http/attributes_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package http
-
-import (
- "net/http"
- "testing"
-
- "github.com/spiral/roadrunner/v2/plugins/http/attributes"
- "github.com/stretchr/testify/assert"
-)
-
-func TestAllAttributes(t *testing.T) {
- r := &http.Request{}
- r = attributes.Init(r)
-
- err := attributes.Set(r, "key", "value")
- if err != nil {
- t.Errorf("error during the Set: error %v", err)
- }
-
- assert.Equal(t, attributes.All(r), map[string]interface{}{
- "key": "value",
- })
-}
-
-func TestAllAttributesNone(t *testing.T) {
- r := &http.Request{}
- r = attributes.Init(r)
-
- assert.Equal(t, attributes.All(r), map[string]interface{}{})
-}
-
-func TestAllAttributesNone2(t *testing.T) {
- r := &http.Request{}
-
- assert.Equal(t, attributes.All(r), map[string]interface{}{})
-}
-
-func TestGetAttribute(t *testing.T) {
- r := &http.Request{}
- r = attributes.Init(r)
-
- err := attributes.Set(r, "key", "value")
- if err != nil {
- t.Errorf("error during the Set: error %v", err)
- }
- assert.Equal(t, attributes.Get(r, "key"), "value")
-}
-
-func TestGetAttributeNone(t *testing.T) {
- r := &http.Request{}
- r = attributes.Init(r)
-
- assert.Equal(t, attributes.Get(r, "key"), nil)
-}
-
-func TestGetAttributeNone2(t *testing.T) {
- r := &http.Request{}
-
- assert.Equal(t, attributes.Get(r, "key"), nil)
-}
-
-func TestSetAttribute(t *testing.T) {
- r := &http.Request{}
- r = attributes.Init(r)
-
- err := attributes.Set(r, "key", "value")
- if err != nil {
- t.Errorf("error during the Set: error %v", err)
- }
- assert.Equal(t, attributes.Get(r, "key"), "value")
-}
-
-func TestSetAttributeNone(t *testing.T) {
- r := &http.Request{}
- err := attributes.Set(r, "key", "value")
- assert.Error(t, err)
- assert.Equal(t, attributes.Get(r, "key"), nil)
-}
diff --git a/tests/plugins/http/configs/.rr-big-req-size.yaml b/tests/plugins/http/configs/.rr-big-req-size.yaml
deleted file mode 100644
index 574b3393..00000000
--- a/tests/plugins/http/configs/.rr-big-req-size.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
-
-http:
- address: 127.0.0.1:10085
- max_request_size: 1
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-broken-pipes.yaml b/tests/plugins/http/configs/.rr-broken-pipes.yaml
deleted file mode 100644
index 703f9431..00000000
--- a/tests/plugins/http/configs/.rr-broken-pipes.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php broken pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:12384
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
-
diff --git a/tests/plugins/http/configs/.rr-env.yaml b/tests/plugins/http/configs/.rr-env.yaml
deleted file mode 100644
index 4ea8ec73..00000000
--- a/tests/plugins/http/configs/.rr-env.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php env pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:12084
- max_request_size: 1024
- middleware: []
- env:
- "RR_HTTP": "true"
- "env_key": "ENV_VALUE"
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/http/configs/.rr-fcgi-reqUri.yaml b/tests/plugins/http/configs/.rr-fcgi-reqUri.yaml
deleted file mode 100644
index cbdd211e..00000000
--- a/tests/plugins/http/configs/.rr-fcgi-reqUri.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-server:
- command: "php ../../http/client.php request-uri pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: :8082
- max_request_size: 1024
- middleware: [ ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- ssl:
- address: :8890
- redirect: false
- cert: fixtures/server.crt
- key: fixtures/server.key
- # root_ca: root.crt
- fcgi:
- address: tcp://127.0.0.1:6921
- http2:
- h2c: false
- maxConcurrentStreams: 128
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-fcgi.yaml b/tests/plugins/http/configs/.rr-fcgi.yaml
deleted file mode 100644
index b0060d85..00000000
--- a/tests/plugins/http/configs/.rr-fcgi.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: :8081
- max_request_size: 1024
- middleware: [ "gzip" ]
- static:
- dir: "../../../tests"
- forbid: [ "" ]
- allow: [ ".txt", ".php" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- fcgi:
- address: tcp://0.0.0.0:6920
- http2:
- h2c: false
- maxConcurrentStreams: 128
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-h2c.yaml b/tests/plugins/http/configs/.rr-h2c.yaml
deleted file mode 100644
index 796ad307..00000000
--- a/tests/plugins/http/configs/.rr-h2c.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: :8083
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
- http2:
- h2c: true
- maxConcurrentStreams: 128
-logs:
- mode: production
- level: info
- encoding: console \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-http-ipv6-2.yaml b/tests/plugins/http/configs/.rr-http-ipv6-2.yaml
deleted file mode 100644
index 233a22b4..00000000
--- a/tests/plugins/http/configs/.rr-http-ipv6-2.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://[::1]:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: "[::1]:10784"
- max_request_size: 1024
- middleware: []
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/http/configs/.rr-http-ipv6.yaml b/tests/plugins/http/configs/.rr-http-ipv6.yaml
deleted file mode 100644
index cb768159..00000000
--- a/tests/plugins/http/configs/.rr-http-ipv6.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://[0:0:0:0:0:0:0:1]:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: "[0:0:0:0:0:0:0:1]:10684"
- max_request_size: 1024
- middleware: []
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: debug
-
diff --git a/tests/plugins/http/configs/.rr-http-static-disabled.yaml b/tests/plugins/http/configs/.rr-http-static-disabled.yaml
deleted file mode 100644
index d248ce48..00000000
--- a/tests/plugins/http/configs/.rr-http-static-disabled.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-server:
- command: "php ../../http/client.php pid pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21234
- max_request_size: 1024
- middleware: [ "gzip" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- static:
- dir: "abc" #not exists
- forbid: [ ".php", ".htaccess" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-http-static-etags.yaml b/tests/plugins/http/configs/.rr-http-static-etags.yaml
deleted file mode 100644
index b09de0f4..00000000
--- a/tests/plugins/http/configs/.rr-http-static-etags.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-server:
- command: "php ../../http/client.php pid pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21603
- max_request_size: 1024
- middleware: [ "gzip" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- static:
- dir: "../../../tests"
- forbid: [ "" ]
- allow: [ ".txt", ".php" ]
- calculate_etag: true
- weak: true
- request:
- "input": "custom-header"
- response:
- "output": "output-header"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-http-static-files-disable.yaml b/tests/plugins/http/configs/.rr-http-static-files-disable.yaml
deleted file mode 100644
index 9f91d75b..00000000
--- a/tests/plugins/http/configs/.rr-http-static-files-disable.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:45877
- max_request_size: 1024
- middleware: [ "gzip" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-http-static-files.yaml b/tests/plugins/http/configs/.rr-http-static-files.yaml
deleted file mode 100644
index 18c6107d..00000000
--- a/tests/plugins/http/configs/.rr-http-static-files.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:34653
- max_request_size: 1024
- middleware: [ "gzip" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- static:
- dir: "../../../tests"
- allow: [ ".ico" ]
- forbid: [ ".php", ".htaccess" ]
-
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-logs:
- mode: development
- level: info
diff --git a/tests/plugins/http/configs/.rr-http-static-security.yaml b/tests/plugins/http/configs/.rr-http-static-security.yaml
deleted file mode 100644
index e2e3af2a..00000000
--- a/tests/plugins/http/configs/.rr-http-static-security.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-server:
- command: "php ../../http/client.php pid pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21603
- max_request_size: 1024
- middleware: [ "gzip" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- static:
- dir: "../../../tests"
- forbid: [ "" ]
- allow: [ ".txt", ".php" ]
- calculate_etag: true
- weak: false
- request:
- "input": "custom-header"
- response:
- "output": "output-header"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-http-static.yaml b/tests/plugins/http/configs/.rr-http-static.yaml
deleted file mode 100644
index 30bb5b1e..00000000
--- a/tests/plugins/http/configs/.rr-http-static.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-server:
- command: "php ../../http/client.php pid pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:21603
- max_request_size: 1024
- middleware: [ "gzip" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- static:
- dir: "../../../tests"
- forbid: [ "" ]
- allow: [ ".txt", ".php" ]
- calculate_etag: true
- weak: false
- request:
- "input": "custom-header"
- response:
- "output": "output-header"
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-http-supervised-pool.yaml b/tests/plugins/http/configs/.rr-http-supervised-pool.yaml
deleted file mode 100644
index 8d4d81d9..00000000
--- a/tests/plugins/http/configs/.rr-http-supervised-pool.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:15432
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:18888
- max_request_size: 1024
- middleware: []
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
- supervisor:
- watch_tick: 1s
- ttl: 0
- idle_ttl: 1s
- exec_ttl: 10s
- max_worker_memory: 100
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/http/configs/.rr-http.yaml b/tests/plugins/http/configs/.rr-http.yaml
deleted file mode 100644
index b4910160..00000000
--- a/tests/plugins/http/configs/.rr-http.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:18903
- max_request_size: 1024
- middleware: [ "pluginMiddleware", "pluginMiddleware2" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
-
diff --git a/tests/plugins/http/configs/.rr-init.yaml b/tests/plugins/http/configs/.rr-init.yaml
deleted file mode 100644
index 02cb1636..00000000
--- a/tests/plugins/http/configs/.rr-init.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:15395
- max_request_size: 1024
- middleware: [ ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- ssl:
- address: :8892
- redirect: false
- cert: fixtures/server.crt
- key: fixtures/server.key
- # root_ca: root.crt
- fcgi:
- address: tcp://0.0.0.0:7921
- http2:
- h2c: false
- maxConcurrentStreams: 128
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/http/configs/.rr-issue659.yaml b/tests/plugins/http/configs/.rr-issue659.yaml
deleted file mode 100644
index bf192fab..00000000
--- a/tests/plugins/http/configs/.rr-issue659.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../issue659.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:32552
- max_request_size: 1024
- internal_error_code: 444
- middleware: [ ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
-
-logs:
- mode: development
- level: debug
-
diff --git a/tests/plugins/http/configs/.rr-no-http.yaml b/tests/plugins/http/configs/.rr-no-http.yaml
deleted file mode 100644
index a6747b5d..00000000
--- a/tests/plugins/http/configs/.rr-no-http.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/http/configs/.rr-resetter.yaml b/tests/plugins/http/configs/.rr-resetter.yaml
deleted file mode 100644
index 61b0e501..00000000
--- a/tests/plugins/http/configs/.rr-resetter.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:10084
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: error
-
diff --git a/tests/plugins/http/configs/.rr-ssl-push.yaml b/tests/plugins/http/configs/.rr-ssl-push.yaml
deleted file mode 100644
index 3349575e..00000000
--- a/tests/plugins/http/configs/.rr-ssl-push.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-server:
- command: "php ../../http/client.php push pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: :8086
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- ssl:
- address: :8894
- redirect: true
- cert: fixtures/server.crt
- key: fixtures/server.key
-logs:
- mode: development
- level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-ssl-redirect.yaml b/tests/plugins/http/configs/.rr-ssl-redirect.yaml
deleted file mode 100644
index 1d04963e..00000000
--- a/tests/plugins/http/configs/.rr-ssl-redirect.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: :8087
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- ssl:
- address: :8895
- redirect: true
- cert: fixtures/server.crt
- key: fixtures/server.key
-logs:
- mode: development
- level: error \ No newline at end of file
diff --git a/tests/plugins/http/configs/.rr-ssl.yaml b/tests/plugins/http/configs/.rr-ssl.yaml
deleted file mode 100644
index 8a0f16b8..00000000
--- a/tests/plugins/http/configs/.rr-ssl.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_HTTP": "true"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: :8085
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- ssl:
- address: :8893
- redirect: false
- cert: fixtures/server.crt
- key: fixtures/server.key
- fcgi:
- address: tcp://0.0.0.0:16920
-logs:
- mode: development
- level: error \ No newline at end of file
diff --git a/tests/plugins/http/fixtures/server.crt b/tests/plugins/http/fixtures/server.crt
deleted file mode 100644
index 24d67fd7..00000000
--- a/tests/plugins/http/fixtures/server.crt
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICTTCCAdOgAwIBAgIJAOKyUd+llTRKMAoGCCqGSM49BAMCMGMxCzAJBgNVBAYT
-AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2Nv
-MRMwEQYDVQQKDApSb2FkUnVubmVyMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTgw
-OTMwMTMzNDUzWhcNMjgwOTI3MTMzNDUzWjBjMQswCQYDVQQGEwJVUzETMBEGA1UE
-CAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzETMBEGA1UECgwK
-Um9hZFJ1bm5lcjESMBAGA1UEAwwJbG9jYWxob3N0MHYwEAYHKoZIzj0CAQYFK4EE
-ACIDYgAEVnbShsM+l5RR3wfWWmGhzuFGwNzKCk7i9xyobDIyBUxG/UUSfj7KKlUX
-puDnDEtF5xXcepl744CyIAYFLOXHb5WqI4jCOzG0o9f/00QQ4bQudJOdbqV910QF
-C2vb7Fxro1MwUTAdBgNVHQ4EFgQU9xUexnbB6ORKayA7Pfjzs33otsAwHwYDVR0j
-BBgwFoAU9xUexnbB6ORKayA7Pfjzs33otsAwDwYDVR0TAQH/BAUwAwEB/zAKBggq
-hkjOPQQDAgNoADBlAjEAue3HhR/MUhxoa9tSDBtOJT3FYbDQswrsdqBTz97CGKst
-e7XeZ3HMEvEXy0hGGEMhAjAqcD/4k9vViVppgWFtkk6+NFbm+Kw/QeeAiH5FgFSj
-8xQcb+b7nPwNLp3JOkXkVd4=
------END CERTIFICATE-----
diff --git a/tests/plugins/http/fixtures/server.key b/tests/plugins/http/fixtures/server.key
deleted file mode 100644
index 7501dd46..00000000
--- a/tests/plugins/http/fixtures/server.key
+++ /dev/null
@@ -1,9 +0,0 @@
------BEGIN EC PARAMETERS-----
-BgUrgQQAIg==
------END EC PARAMETERS-----
------BEGIN EC PRIVATE KEY-----
-MIGkAgEBBDCQP8utxNbHR6xZOLAJgUhn88r6IrPqmN0MsgGJM/jePB+T9UhkmIU8
-PMm2HeScbcugBwYFK4EEACKhZANiAARWdtKGwz6XlFHfB9ZaYaHO4UbA3MoKTuL3
-HKhsMjIFTEb9RRJ+PsoqVRem4OcMS0XnFdx6mXvjgLIgBgUs5cdvlaojiMI7MbSj
-1//TRBDhtC50k51upX3XRAULa9vsXGs=
------END EC PRIVATE KEY-----
diff --git a/tests/plugins/http/handler_test.go b/tests/plugins/http/handler_test.go
deleted file mode 100644
index c8709678..00000000
--- a/tests/plugins/http/handler_test.go
+++ /dev/null
@@ -1,1862 +0,0 @@
-package http
-
-import (
- "bytes"
- "context"
- "io/ioutil"
- "mime/multipart"
- "net/url"
- "os/exec"
- "runtime"
- "strings"
-
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/transport/pipe"
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
- "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/stretchr/testify/assert"
-
- "net/http"
- "os"
- "testing"
- "time"
-)
-
-func TestHandler_Echo(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
- go func(server *http.Server) {
- err = server.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }(hs)
- time.Sleep(time.Millisecond * 10)
-
- body, r, err := get("http://127.0.0.1:8177/?hello=world")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", body)
-}
-
-func Test_HandlerErrors(t *testing.T) {
- _, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, nil)
- assert.Error(t, err)
-}
-
-func TestHandler_Headers(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "header", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8078", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 100)
-
- req, err := http.NewRequest("GET", "http://127.0.0.1:8078?hello=world", nil)
- assert.NoError(t, err)
-
- req.Header.Add("input", "sample")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "world", r.Header.Get("Header"))
- assert.Equal(t, "SAMPLE", string(b))
-}
-
-func TestHandler_Empty_User_Agent(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "user-agent", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":19658", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- req, err := http.NewRequest("GET", "http://127.0.0.1:19658?hello=world", nil)
- assert.NoError(t, err)
-
- req.Header.Add("user-agent", "")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "", string(b))
-}
-
-func TestHandler_User_Agent(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "user-agent", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":25688", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- req, err := http.NewRequest("GET", "http://127.0.0.1:25688?hello=world", nil)
- assert.NoError(t, err)
-
- req.Header.Add("User-Agent", "go-agent")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "go-agent", string(b))
-}
-
-func TestHandler_Cookies(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "cookie", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8079", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- req, err := http.NewRequest("GET", "http://127.0.0.1:8079", nil)
- assert.NoError(t, err)
-
- req.AddCookie(&http.Cookie{Name: "input", Value: "input-value"})
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "INPUT-VALUE", string(b))
-
- for _, c := range r.Cookies() {
- assert.Equal(t, "output", c.Name)
- assert.Equal(t, "cookie-output", c.Value)
- }
-}
-
-func TestHandler_JsonPayload_POST(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8090", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- req, err := http.NewRequest(
- "POST",
- "http://127.0.0.1"+hs.Addr,
- bytes.NewBufferString(`{"key":"value"}`),
- )
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/json")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"value":"key"}`, string(b))
-}
-
-func TestHandler_JsonPayload_PUT(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8081", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- req, err := http.NewRequest("PUT", "http://127.0.0.1"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/json")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"value":"key"}`, string(b))
-}
-
-func TestHandler_JsonPayload_PATCH(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "payload", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8082", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- req, err := http.NewRequest("PATCH", "http://127.0.0.1"+hs.Addr, bytes.NewBufferString(`{"key":"value"}`))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/json")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, `{"value":"key"}`, string(b))
-}
-
-func TestHandler_FormData_POST(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8083", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- form := url.Values{}
-
- form.Add("key", "value")
- form.Add("name[]", "name1")
- form.Add("name[]", "name2")
- form.Add("name[]", "name3")
- form.Add("arr[x][y][z]", "y")
- form.Add("arr[x][y][e]", "f")
- form.Add("arr[c]p", "l")
- form.Add("arr[c]z", "")
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode()))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- // Sorted
- assert.Equal(t, "{\"arr\":{\"c\":{\"p\":\"l\",\"z\":\"\"},\"x\":{\"y\":{\"e\":\"f\",\"z\":\"y\"}}},\"key\":\"value\",\"name\":[\"name1\",\"name2\",\"name3\"]}", string(b))
-}
-
-func TestHandler_FormData_POST_Overwrite(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8083", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- form := url.Values{}
-
- form.Add("key", "value")
- form.Add("key", "value2")
- form.Add("name[]", "name1")
- form.Add("name[]", "name2")
- form.Add("name[]", "name3")
- form.Add("arr[x][y][z]", "y")
- form.Add("arr[x][y][e]", "f")
- form.Add("arr[c]p", "l")
- form.Add("arr[c]z", "")
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode()))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value2","name":["name1","name2","name3"]}`, string(b))
-}
-
-func TestHandler_FormData_POST_Form_UrlEncoded_Charset(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8083", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- form := url.Values{}
-
- form.Add("key", "value")
- form.Add("name[]", "name1")
- form.Add("name[]", "name2")
- form.Add("name[]", "name3")
- form.Add("arr[x][y][z]", "y")
- form.Add("arr[x][y][e]", "f")
- form.Add("arr[c]p", "l")
- form.Add("arr[c]z", "")
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode()))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
-}
-
-func TestHandler_FormData_PUT(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":17834", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- form := url.Values{}
-
- form.Add("key", "value")
- form.Add("name[]", "name1")
- form.Add("name[]", "name2")
- form.Add("name[]", "name3")
- form.Add("arr[x][y][z]", "y")
- form.Add("arr[x][y][e]", "f")
- form.Add("arr[c]p", "l")
- form.Add("arr[c]z", "")
-
- req, err := http.NewRequest("PUT", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode()))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
-}
-
-func TestHandler_FormData_PATCH(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8085", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- form := url.Values{}
-
- form.Add("key", "value")
- form.Add("name[]", "name1")
- form.Add("name[]", "name2")
- form.Add("name[]", "name3")
- form.Add("arr[x][y][z]", "y")
- form.Add("arr[x][y][e]", "f")
- form.Add("arr[c]p", "l")
- form.Add("arr[c]z", "")
-
- req, err := http.NewRequest("PATCH", "http://127.0.0.1"+hs.Addr, strings.NewReader(form.Encode()))
- assert.NoError(t, err)
-
- req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, "{\"arr\":{\"c\":{\"p\":\"l\",\"z\":\"\"},\"x\":{\"y\":{\"e\":\"f\",\"z\":\"y\"}}},\"key\":\"value\",\"name\":[\"name1\",\"name2\",\"name3\"]}", string(b))
-}
-
-func TestHandler_Multipart_POST(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8019", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
- err = w.WriteField("key", "value")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("key", "value")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name1")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name2")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name3")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[x][y][z]", "y")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[x][y][e]", "f")
-
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[c]p", "l")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[c]z", "")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the writer: error %v", err)
- }
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, "{\"arr\":{\"c\":{\"p\":\"l\",\"z\":\"\"},\"x\":{\"y\":{\"e\":\"f\",\"z\":\"y\"}}},\"key\":\"value\",\"name\":[\"name1\",\"name2\",\"name3\"]}", string(b))
-}
-
-func TestHandler_Multipart_PUT(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8020", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
- err = w.WriteField("key", "value")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("key", "value")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name1")
-
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name2")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name3")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[x][y][z]", "y")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[x][y][e]", "f")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[c]p", "l")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[c]z", "")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the writer: error %v", err)
- }
-
- req, err := http.NewRequest("PUT", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
-}
-
-func TestHandler_Multipart_PATCH(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "data", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8021", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
-
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 500)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
- err = w.WriteField("key", "value")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("key", "value")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name1")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name2")
-
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("name[]", "name3")
-
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[x][y][z]", "y")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[x][y][e]", "f")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[c]p", "l")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.WriteField("arr[c]z", "")
- if err != nil {
- t.Errorf("error writing the field: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the writer: error %v", err)
- }
-
- req, err := http.NewRequest("PATCH", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- assert.Equal(t, `{"arr":{"c":{"p":"l","z":""},"x":{"y":{"e":"f","z":"y"}}},"key":"value","name":["name1","name2","name3"]}`, string(b))
-}
-
-func TestHandler_Error(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- _, r, err := get("http://127.0.0.1:8177/?hello=world")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
- assert.Equal(t, 500, r.StatusCode)
-}
-
-func TestHandler_Error2(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error2", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- _, r, err := get("http://127.0.0.1:8177/?hello=world")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
- assert.Equal(t, 500, r.StatusCode)
-}
-
-func TestHandler_Error3(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "pid", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- b2 := &bytes.Buffer{}
- for i := 0; i < 1024*1024; i++ {
- b2.Write([]byte(" "))
- }
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, b2)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- err = r.Body.Close()
- if err != nil {
- t.Errorf("error during the closing Body: error %v", err)
- }
- }()
-
- assert.NoError(t, err)
- assert.Equal(t, 400, r.StatusCode)
-}
-
-func TestHandler_ResponseDuration(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- gotresp := make(chan interface{})
- h.AddListener(func(event interface{}) {
- switch t := event.(type) {
- case handler.ResponseEvent:
- if t.Elapsed() > 0 {
- close(gotresp)
- }
- default:
- }
- })
-
- body, r, err := get("http://127.0.0.1:8177/?hello=world")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
-
- <-gotresp
-
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", body)
-}
-
-func TestHandler_ResponseDurationDelayed(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echoDelay", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- gotresp := make(chan interface{})
- h.AddListener(func(event interface{}) {
- switch tp := event.(type) {
- case handler.ResponseEvent:
- if tp.Elapsed() > time.Second {
- close(gotresp)
- }
- default:
- }
- })
-
- body, r, err := get("http://127.0.0.1:8177/?hello=world")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
- <-gotresp
-
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", body)
-}
-
-func TestHandler_ErrorDuration(t *testing.T) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "error", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- goterr := make(chan struct{}, 10)
- h.AddListener(func(event interface{}) {
- switch tp := event.(type) {
- case handler.ErrorEvent:
- if tp.Elapsed() > 0 {
- goterr <- struct{}{}
- }
- default:
- }
- })
-
- _, r, err := get("http://127.0.0.1:8177/?hello=world")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
-
- <-goterr
- <-goterr
-
- assert.Equal(t, 500, r.StatusCode)
-}
-
-func TestHandler_IP(t *testing.T) {
- trusted := []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- }
-
- cidrs, err := config.ParseCIDRs(trusted)
- assert.NoError(t, err)
- assert.NotNil(t, cidrs)
-
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, cidrs, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- body, r, err := get("http://127.0.0.1:8177/")
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "127.0.0.1", body)
-}
-
-func TestHandler_XRealIP(t *testing.T) {
- trusted := []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- }
-
- cidrs, err := config.ParseCIDRs(trusted)
- assert.NoError(t, err)
- assert.NotNil(t, cidrs)
-
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, cidrs, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: "127.0.0.1:8179", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- body, r, err := getHeader("http://127.0.0.1:8179/", map[string]string{
- "X-Real-Ip": "200.0.0.1",
- })
-
- assert.NoError(t, err)
- defer func() {
- _ = r.Body.Close()
- }()
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "200.0.0.1", body)
-}
-
-func TestHandler_XForwardedFor(t *testing.T) {
- trusted := []string{
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "100.0.0.0/16",
- "200.0.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- }
-
- cidrs, err := config.ParseCIDRs(trusted)
- assert.NoError(t, err)
- assert.NotNil(t, cidrs)
-
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, cidrs, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- body, r, err := getHeader("http://127.0.0.1:8177/", map[string]string{
- "X-Forwarded-For": "100.0.0.1, 200.0.0.1, invalid, 101.0.0.1",
- })
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "101.0.0.1", body)
- _ = r.Body.Close()
-
- body, r, err = getHeader("http://127.0.0.1:8177/", map[string]string{
- "X-Forwarded-For": "100.0.0.1, 200.0.0.1, 101.0.0.1, invalid",
- })
-
- assert.NoError(t, err)
- _ = r.Body.Close()
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "101.0.0.1", body)
-}
-
-func TestHandler_XForwardedFor_NotTrustedRemoteIp(t *testing.T) {
- trusted := []string{
- "10.0.0.0/8",
- }
-
- cidrs, err := config.ParseCIDRs(trusted)
- assert.NoError(t, err)
- assert.NotNil(t, cidrs)
-
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "ip", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, cidrs, p)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: "127.0.0.1:8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- body, r, err := getHeader("http://127.0.0.1:8177/", map[string]string{
- "X-Forwarded-For": "100.0.0.1, 200.0.0.1, invalid, 101.0.0.1",
- })
-
- assert.NoError(t, err)
- _ = r.Body.Close()
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "127.0.0.1", body)
-}
-
-func BenchmarkHandler_Listen_Echo(b *testing.B) {
- p, err := pool.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "echo", "pipes") },
- pipe.NewPipeFactory(),
- &pool.Config{
- NumWorkers: uint64(runtime.NumCPU()),
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- b.Fatal(err)
- }
- defer func() {
- p.Destroy(context.Background())
- }()
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, p)
- assert.NoError(b, err)
-
- hs := &http.Server{Addr: ":8177", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- b.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- b.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- b.ResetTimer()
- b.ReportAllocs()
- bb := "WORLD"
- for n := 0; n < b.N; n++ {
- r, err := http.Get("http://127.0.0.1:8177/?hello=world")
- if err != nil {
- b.Fail()
- }
- // Response might be nil here
- if r != nil {
- br, err := ioutil.ReadAll(r.Body)
- if err != nil {
- b.Errorf("error reading Body: error %v", err)
- }
- if string(br) != bb {
- b.Fail()
- }
- err = r.Body.Close()
- if err != nil {
- b.Errorf("error closing the Body: error %v", err)
- }
- } else {
- b.Errorf("got nil response")
- }
- }
-}
diff --git a/tests/plugins/http/http_plugin_test.go b/tests/plugins/http/http_plugin_test.go
deleted file mode 100644
index 10feff03..00000000
--- a/tests/plugins/http/http_plugin_test.go
+++ /dev/null
@@ -1,2516 +0,0 @@
-package http
-
-import (
- "bytes"
- "crypto/rand"
- "crypto/tls"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/http/httptest"
- "net/rpc"
- "net/url"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/gzip"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/static"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/require"
- "github.com/yookoala/gofast"
-
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-var sslClient = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true, //nolint:gosec
- },
- },
-}
-
-func TestHTTPInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- rIn := makeConfig("6001", "15395", "7921", ":8892", "false", "false", "php ../../http/client.php echo pipes")
- cfg := &config.Viper{
- ReadInCfg: rIn,
- Type: "yaml",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestHTTPNoConfigSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-no-http.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestHTTPInformerReset(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-resetter.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &informer.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("HTTPInformerTest", informerTest)
- t.Run("HTTPEchoTestBefore", echoHTTP)
- t.Run("HTTPResetTest", resetTest)
- t.Run("HTTPEchoTestAfter", echoHTTP)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestSSL(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-ssl.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("SSLEcho", sslEcho)
- t.Run("SSLNoRedirect", sslNoRedirect)
- t.Run("FCGEcho", fcgiEcho)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func sslNoRedirect(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:8085?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
-
- assert.Nil(t, r.TLS)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
-}
-
-func sslEcho(t *testing.T) {
- req, err := http.NewRequest("GET", "https://127.0.0.1:8893?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
-}
-
-func fcgiEcho(t *testing.T) {
- fcgiConnFactory := gofast.SimpleConnFactory("tcp", "0.0.0.0:16920")
-
- fcgiHandler := gofast.NewHandler(
- gofast.BasicParamsMap(gofast.BasicSession),
- gofast.SimpleClientFactory(fcgiConnFactory),
- )
-
- w := httptest.NewRecorder()
- req := httptest.NewRequest("GET", "http://site.local/?hello=world", nil)
- fcgiHandler.ServeHTTP(w, req)
-
- body, err := ioutil.ReadAll(w.Result().Body) //nolint:bodyclose
-
- defer func() {
- _ = w.Result().Body.Close()
- w.Body.Reset()
- }()
-
- assert.NoError(t, err)
- assert.Equal(t, 201, w.Result().StatusCode) //nolint:bodyclose
- assert.Equal(t, "WORLD", string(body))
-}
-
-func TestSSLRedirect(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-ssl-redirect.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("SSLRedirect", sslRedirect)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func sslRedirect(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:8087?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
- assert.NotNil(t, r.TLS)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
-}
-
-func TestSSLPushPipes(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-ssl-push.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("SSLPush", sslPush)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func sslPush(t *testing.T) {
- req, err := http.NewRequest("GET", "https://127.0.0.1:8894?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := sslClient.Do(req)
- assert.NoError(t, err)
-
- assert.NotNil(t, r.TLS)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, "", r.Header.Get("Http2-Release"))
-
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err2 := r.Body.Close()
- if err2 != nil {
- t.Errorf("fail to close the Body: error %v", err2)
- }
-}
-
-func TestFastCGI_Echo(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-fcgi.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("FastCGIEcho", fcgiEcho1)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func fcgiEcho1(t *testing.T) {
- time.Sleep(time.Second * 2)
- fcgiConnFactory := gofast.SimpleConnFactory("tcp", "127.0.0.1:6920")
-
- fcgiHandler := gofast.NewHandler(
- gofast.BasicParamsMap(gofast.BasicSession),
- gofast.SimpleClientFactory(fcgiConnFactory),
- )
-
- w := httptest.NewRecorder()
- req := httptest.NewRequest("GET", "http://site.local/hello-world", nil)
- fcgiHandler.ServeHTTP(w, req)
-
- _, err := ioutil.ReadAll(w.Result().Body) //nolint:bodyclose
- assert.NoError(t, err)
- assert.Equal(t, 201, w.Result().StatusCode) //nolint:bodyclose
-}
-
-func TestFastCGI_RequestUri(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-fcgi-reqUri.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("FastCGIServiceRequestUri", fcgiReqURI)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func fcgiReqURI(t *testing.T) {
- time.Sleep(time.Second * 2)
- fcgiConnFactory := gofast.SimpleConnFactory("tcp", "127.0.0.1:6921")
-
- fcgiHandler := gofast.NewHandler(
- gofast.BasicParamsMap(gofast.BasicSession),
- gofast.SimpleClientFactory(fcgiConnFactory),
- )
-
- w := httptest.NewRecorder()
- req := httptest.NewRequest("GET", "http://site.local/hello-world", nil)
- fcgiHandler.ServeHTTP(w, req)
-
- body, err := ioutil.ReadAll(w.Result().Body) //nolint:bodyclose
- assert.NoError(t, err)
- assert.Equal(t, 200, w.Result().StatusCode) //nolint:bodyclose
- assert.Equal(t, "http://site.local/hello-world", string(body))
-}
-
-func TestH2CUpgrade(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-h2c.yaml",
- Prefix: "rr",
- }
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Error("server internal error", "message", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("H2cUpgrade", h2cUpgrade)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func h2cUpgrade(t *testing.T) {
- req, err := http.NewRequest("PRI", "http://127.0.0.1:8083?hello=world", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Header.Add("Upgrade", "h2c")
- req.Header.Add("Connection", "HTTP2-Settings")
- req.Header.Add("HTTP2-Settings", "")
-
- r, err2 := http.DefaultClient.Do(req)
- if err2 != nil {
- t.Fatal(err)
- }
-
- assert.Equal(t, "101 Switching Protocols", r.Status)
-
- err3 := r.Body.Close()
- if err3 != nil {
- t.Fatal(err)
- }
-}
-
-func TestH2C(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-h2c.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("H2c", h2c)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func h2c(t *testing.T) {
- req, err := http.NewRequest("PRI", "http://127.0.0.1:8083?hello=world", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- req.Header.Add("Connection", "HTTP2-Settings")
- req.Header.Add("HTTP2-Settings", "")
-
- r, err2 := http.DefaultClient.Do(req)
- if err2 != nil {
- t.Fatal(err)
- }
-
- assert.Equal(t, "201 Created", r.Status)
-
- err3 := r.Body.Close()
- if err3 != nil {
- t.Fatal(err)
- }
-}
-
-func TestHttpMiddleware(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &PluginMiddleware{},
- &PluginMiddleware2{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("MiddlewareTest", middleware)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func middleware(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:18903?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-
- req, err = http.NewRequest("GET", "http://127.0.0.1:18903/halt", nil)
- assert.NoError(t, err)
-
- r, err = http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err = ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, 500, r.StatusCode)
- assert.Equal(t, "halted", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestHttpEchoErr(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- rIn := `
-rpc:
- listen: tcp://127.0.0.1:6001
- disabled: false
-
-server:
- command: "php ../../http/client.php echoerr pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- debug: true
- address: 127.0.0.1:34999
- max_request_size: 1024
- middleware: [ "pluginMiddleware", "pluginMiddleware2" ]
- uploads:
- forbid: [ "" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-logs:
- mode: development
- level: debug
-`
-
- cfg := &config.Viper{
- Path: "",
- Prefix: "",
- Type: "yaml",
- ReadInCfg: []byte(rIn),
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug(gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34999/?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("WORLD").MinTimes(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &PluginMiddleware{},
- &PluginMiddleware2{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("HttpEchoError", echoError)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func echoError(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:34999?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- require.NotNil(t, r)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestHttpEnvVariables(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-env.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &PluginMiddleware{},
- &PluginMiddleware2{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("EnvVariablesTest", envVarsTest)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func envVarsTest(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:12084", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, "ENV_VALUE", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestHttpBrokenPipes(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-broken-pipes.yaml",
- Prefix: "rr",
- Type: "yaml",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &PluginMiddleware{},
- &PluginMiddleware2{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- // should be error from the plugin
- case e := <-ch:
- assert.Error(t, e.Error)
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- wg.Wait()
-}
-
-func TestHTTPSupervisedPool(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-supervised-pool.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &informer.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("HTTPEchoRunActivateWorker", echoHTTP2)
- // bigger timeout to handle idle_ttl on slow systems
- time.Sleep(time.Second * 10)
- t.Run("HTTPInformerCompareWorkersTestBefore", informerTestBefore)
- t.Run("HTTPEchoShouldBeNewWorker", echoHTTP2)
- // worker should be destructed (idle_ttl)
- t.Run("HTTPInformerCompareWorkersTestAfter", informerTestAfter)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func echoHTTP2(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:18888?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-// get worker
-// sleep
-// supervisor destroy worker
-// compare pid's
-var workerPid int = 0
-
-func informerTestBefore(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:15432")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- list := struct {
- // Workers is list of workers.
- Workers []process.State `json:"workers"`
- }{}
-
- err = client.Call("informer.Workers", "http", &list)
- assert.NoError(t, err)
- assert.Len(t, list.Workers, 1)
- // save the pid
- workerPid = list.Workers[0].Pid
-}
-
-func informerTestAfter(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:15432")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- list := struct {
- // Workers is list of workers.
- Workers []process.State `json:"workers"`
- }{}
-
- assert.NotZero(t, workerPid)
-
- time.Sleep(time.Second * 5)
-
- err = client.Call("informer.Workers", "http", &list)
- assert.NoError(t, err)
- assert.Len(t, list.Workers, 1)
- assert.NotEqual(t, workerPid, list.Workers[0].Pid)
-}
-
-// get request and return body
-func getHeader(url string, h map[string]string) (string, *http.Response, error) {
- req, err := http.NewRequest("GET", url, bytes.NewBuffer(nil))
- if err != nil {
- return "", nil, err
- }
-
- for k, v := range h {
- req.Header.Set(k, v)
- }
-
- r, err := http.DefaultClient.Do(req)
- if err != nil {
- return "", nil, err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", nil, err
- }
- return string(b), r, err
-}
-
-func makeConfig(rpcPort, httpPort, fcgiPort, sslAddress, redirect, http2Enabled, command string) []byte {
- return []byte(fmt.Sprintf(`
-rpc:
- listen: tcp://127.0.0.1:%s
- disabled: false
-
-server:
- command: "%s"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:%s
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- ssl:
- address: %s
- redirect: %s
- cert: fixtures/server.crt
- key: fixtures/server.key
- # rootCa: root.crt
- fcgi:
- address: tcp://0.0.0.0:%s
- http2:
- enabled: %s
- h2c: false
- max_concurrent_streams: 128
-logs:
- mode: development
- level: error
-`, rpcPort, command, httpPort, sslAddress, redirect, fcgiPort, http2Enabled))
-}
-
-func TestHTTPBigRequestSize(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-big-req-size.yaml",
- Prefix: "rr",
- Type: "yaml",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
-
- t.Run("HTTPBigEcho10Mb", bigEchoHTTP)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func bigEchoHTTP(t *testing.T) {
- buf := make([]byte, 1024*1024*10)
-
- _, err := rand.Read(buf)
- assert.NoError(t, err)
-
- bt := bytes.NewBuffer(buf)
-
- req, err := http.NewRequest("GET", "http://127.0.0.1:10085?hello=world", bt)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 400, r.StatusCode)
- assert.Equal(t, "http_handler_max_size: request body max size is exceeded\n", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestStaticEtagPlugin(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-static.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("ServeSampleEtag", serveStaticSampleEtag)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func serveStaticSampleEtag(t *testing.T) {
- // OK 200 response
- b, r, err := get("http://127.0.0.1:21603/sample.txt")
- assert.NoError(t, err)
- assert.Equal(t, "sample\n", b)
- assert.Equal(t, r.StatusCode, http.StatusOK)
- etag := r.Header.Get("Etag")
-
- _ = r.Body.Close()
-
- // Should be 304 response with same etag
- c := http.Client{
- Timeout: time.Second * 5,
- }
-
- parsedURL, _ := url.Parse("http://127.0.0.1:21603/sample.txt")
-
- req := &http.Request{
- Method: http.MethodGet,
- URL: parsedURL,
- Header: map[string][]string{"If-None-Match": {etag}},
- }
-
- resp, err := c.Do(req)
- assert.Nil(t, err)
- assert.Equal(t, http.StatusNotModified, resp.StatusCode)
- _ = resp.Body.Close()
-}
-
-func TestStaticPluginSecurity(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-static-security.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("ServeSampleNotAllowedPath", serveStaticSampleNotAllowedPath)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func serveStaticSampleNotAllowedPath(t *testing.T) {
- // Should be 304 response with same etag
- c := http.Client{
- Timeout: time.Second * 5,
- }
-
- parsedURL := &url.URL{
- Scheme: "http",
- User: nil,
- Host: "127.0.0.1:21603",
- Path: "%2e%2e%/tests/",
- }
-
- req := &http.Request{
- Method: http.MethodGet,
- URL: parsedURL,
- }
-
- resp, err := c.Do(req)
- assert.Nil(t, err)
- assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- _ = resp.Body.Close()
-
- parsedURL = &url.URL{
- Scheme: "http",
- User: nil,
- Host: "127.0.0.1:21603",
- Path: "%2e%2e%5ctests/",
- }
-
- req = &http.Request{
- Method: http.MethodGet,
- URL: parsedURL,
- }
-
- resp, err = c.Do(req)
- assert.Nil(t, err)
- assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- _ = resp.Body.Close()
-
- parsedURL = &url.URL{
- Scheme: "http",
- User: nil,
- Host: "127.0.0.1:21603",
- Path: "..%2ftests/",
- }
-
- req = &http.Request{
- Method: http.MethodGet,
- URL: parsedURL,
- }
-
- resp, err = c.Do(req)
- assert.Nil(t, err)
- assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- _ = resp.Body.Close()
-
- parsedURL = &url.URL{
- Scheme: "http",
- User: nil,
- Host: "127.0.0.1:21603",
- Path: "%2e%2e%2ftests/",
- }
-
- req = &http.Request{
- Method: http.MethodGet,
- URL: parsedURL,
- }
-
- resp, err = c.Do(req)
- assert.Nil(t, err)
- assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- _ = resp.Body.Close()
-
- _, r, err := get("http://127.0.0.1:21603/../../../../tests/../static/sample.txt")
- assert.NoError(t, err)
- assert.Equal(t, 403, r.StatusCode)
- _ = r.Body.Close()
-}
-
-func TestStaticPlugin(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-static.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("ServeSample", serveStaticSample)
- t.Run("StaticNotForbid", staticNotForbid)
- t.Run("StaticHeaders", staticHeaders)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func staticHeaders(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:21603/client.php", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- t.Fatal(err)
- }
-
- if resp.Header.Get("Output") != "output-header" {
- t.Fatal("can't find output header in response")
- }
-
- b, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal(err)
- }
-
- defer func() {
- _ = resp.Body.Close()
- }()
-
- assert.Equal(t, all("../../../tests/client.php"), string(b))
- assert.Equal(t, all("../../../tests/client.php"), string(b))
-}
-
-func staticNotForbid(t *testing.T) {
- b, r, err := get("http://127.0.0.1:21603/client.php")
- assert.NoError(t, err)
- assert.Equal(t, all("../../../tests/client.php"), b)
- assert.Equal(t, all("../../../tests/client.php"), b)
- _ = r.Body.Close()
-}
-
-func serveStaticSample(t *testing.T) {
- b, r, err := get("http://127.0.0.1:21603/sample.txt")
- assert.NoError(t, err)
- assert.Equal(t, "sample\n", b)
- _ = r.Body.Close()
-}
-
-func TestStaticDisabled_Error(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-static-disabled.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
- assert.Error(t, cont.Init())
-}
-
-func TestStaticFilesDisabled(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-static-files-disable.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("StaticFilesDisabled", staticFilesDisabled)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func staticFilesDisabled(t *testing.T) {
- b, r, err := get("http://127.0.0.1:45877/client.php?hello=world")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, "WORLD", b)
- _ = r.Body.Close()
-}
-
-func TestStaticFilesForbid(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-static-files.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34653/http?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34653/client.XXX?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("201 GET http://127.0.0.1:34653/client.php?hello=world", "remote", "127.0.0.1", "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Error("file open error", "error", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("no such file or directory", "error", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("possible path to dir provided").AnyTimes()
- mockLogger.EXPECT().Debug("file extension is forbidden", gomock.Any(), gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &gzip.Plugin{},
- &static.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("StaticTestFilesDir", staticTestFilesDir)
- t.Run("StaticNotFound", staticNotFound)
- t.Run("StaticFilesForbid", staticFilesForbid)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func staticTestFilesDir(t *testing.T) {
- b, r, err := get("http://127.0.0.1:34653/http?hello=world")
- assert.NoError(t, err)
- assert.Equal(t, "WORLD", b)
- _ = r.Body.Close()
-}
-
-func staticNotFound(t *testing.T) {
- b, _, _ := get("http://127.0.0.1:34653/client.XXX?hello=world") //nolint:bodyclose
- assert.Equal(t, "WORLD", b)
-}
-
-func staticFilesForbid(t *testing.T) {
- b, r, err := get("http://127.0.0.1:34653/client.php?hello=world")
- if err != nil {
- t.Fatal(err)
- }
- assert.Equal(t, "WORLD", b)
- _ = r.Body.Close()
-}
-
-func TestHTTPIssue659(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-issue659.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("HTTPIssue659", echoIssue659)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestHTTPIPv6Long(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-ipv6.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://[0:0:0:0:0:0:0:1]:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Debug("201 GET http://[0:0:0:0:0:0:0:1]:10684/?hello=world", "remote", "::1", "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("HTTPEchoIPv6-long", echoHTTPIPv6Long)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestHTTPIPv6Short(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-http-ipv6-2.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://[::1]:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Debug("201 GET http://[::1]:10784/?hello=world", "remote", "::1", "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("HTTPEchoIPv6-short", echoHTTPIPv6Short)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func echoIssue659(t *testing.T) {
- req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:32552", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Empty(t, b)
- assert.Equal(t, 444, r.StatusCode)
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func echoHTTP(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:10084?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func echoHTTPIPv6Long(t *testing.T) {
- req, err := http.NewRequest("GET", "http://[0:0:0:0:0:0:0:1]:10684?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func echoHTTPIPv6Short(t *testing.T) {
- req, err := http.NewRequest("GET", "http://[::1]:10784?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 201, r.StatusCode)
- assert.Equal(t, "WORLD", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func resetTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
-
- var ret bool
- err = client.Call("resetter.Reset", "http", &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- var services []string
- err = client.Call("resetter.List", nil, &services)
- assert.NoError(t, err)
- if services[0] != "http" {
- t.Fatal("no enough services")
- }
-}
-
-func informerTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- list := struct {
- // Workers is list of workers.
- Workers []process.State `json:"workers"`
- }{}
-
- err = client.Call("informer.Workers", "http", &list)
- assert.NoError(t, err)
- assert.Len(t, list.Workers, 2)
-}
-
-// HELPERS
-func get(url string) (string, *http.Response, error) {
- r, err := http.Get(url) //nolint:gosec
- if err != nil {
- return "", nil, err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", nil, err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", nil, err
- }
-
- return string(b), r, err
-}
-
-func all(fn string) string {
- f, _ := os.Open(fn)
-
- b := new(bytes.Buffer)
- _, err := io.Copy(b, f)
- if err != nil {
- return ""
- }
-
- err = f.Close()
- if err != nil {
- return ""
- }
-
- return b.String()
-}
diff --git a/tests/plugins/http/parse_test.go b/tests/plugins/http/parse_test.go
deleted file mode 100644
index d75620f3..00000000
--- a/tests/plugins/http/parse_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package http
-
-import (
- "testing"
-
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
-)
-
-var samples = []struct {
- in string
- out []string
-}{
- {"key", []string{"key"}},
- {"key[subkey]", []string{"key", "subkey"}},
- {"key[subkey]value", []string{"key", "subkey", "value"}},
- {"key[subkey][value]", []string{"key", "subkey", "value"}},
- {"key[subkey][value][]", []string{"key", "subkey", "value", ""}},
- {"key[subkey] [value][]", []string{"key", "subkey", "value", ""}},
- {"key [ subkey ] [ value ] [ ]", []string{"key", "subkey", "value", ""}},
-}
-
-func Test_FetchIndexes(t *testing.T) {
- for i := 0; i < len(samples); i++ {
- r := handler.FetchIndexes(samples[i].in)
- if !same(r, samples[i].out) {
- t.Errorf("got %q, want %q", r, samples[i].out)
- }
- }
-}
-
-func BenchmarkConfig_FetchIndexes(b *testing.B) {
- for _, tt := range samples {
- for n := 0; n < b.N; n++ {
- r := handler.FetchIndexes(tt.in)
- if !same(r, tt.out) {
- b.Fail()
- }
- }
- }
-}
-
-func same(in, out []string) bool {
- if len(in) != len(out) {
- return false
- }
-
- for i, v := range in {
- if v != out[i] {
- return false
- }
- }
-
- return true
-}
diff --git a/tests/plugins/http/plugin1.go b/tests/plugins/http/plugin1.go
deleted file mode 100644
index 0ec31211..00000000
--- a/tests/plugins/http/plugin1.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package http
-
-import (
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-type Plugin1 struct {
- config config.Configurer
-}
-
-func (p1 *Plugin1) Init(cfg config.Configurer) error {
- p1.config = cfg
- return nil
-}
-
-func (p1 *Plugin1) Serve() chan error {
- errCh := make(chan error, 1)
- return errCh
-}
-
-func (p1 *Plugin1) Stop() error {
- return nil
-}
-
-func (p1 *Plugin1) Name() string {
- return "http_test.plugin1"
-}
diff --git a/tests/plugins/http/plugin_middleware.go b/tests/plugins/http/plugin_middleware.go
deleted file mode 100644
index 9f04d6db..00000000
--- a/tests/plugins/http/plugin_middleware.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package http
-
-import (
- "net/http"
-
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-// PluginMiddleware test
-type PluginMiddleware struct {
- config config.Configurer
-}
-
-// Init test
-func (p *PluginMiddleware) Init(cfg config.Configurer) error {
- p.config = cfg
- return nil
-}
-
-// Middleware test
-func (p *PluginMiddleware) Middleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/halt" {
- w.WriteHeader(500)
- _, err := w.Write([]byte("halted"))
- if err != nil {
- panic("error writing the data to the http reply")
- }
- } else {
- next.ServeHTTP(w, r)
- }
- })
-}
-
-// Name test
-func (p *PluginMiddleware) Name() string {
- return "pluginMiddleware"
-}
-
-// PluginMiddleware2 test
-type PluginMiddleware2 struct {
- config config.Configurer
-}
-
-// Init test
-func (p *PluginMiddleware2) Init(cfg config.Configurer) error {
- p.config = cfg
- return nil
-}
-
-// Middleware test
-func (p *PluginMiddleware2) Middleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/boom" {
- w.WriteHeader(555)
- _, err := w.Write([]byte("boom"))
- if err != nil {
- panic("error writing the data to the http reply")
- }
- } else {
- next.ServeHTTP(w, r)
- }
- })
-}
-
-// Name test
-func (p *PluginMiddleware2) Name() string {
- return "pluginMiddleware2"
-}
diff --git a/tests/plugins/http/response_test.go b/tests/plugins/http/response_test.go
deleted file mode 100644
index f754429d..00000000
--- a/tests/plugins/http/response_test.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package http
-
-import (
- "bytes"
- "errors"
- "net/http"
- "testing"
-
- "github.com/spiral/roadrunner/v2/pkg/payload"
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
- "github.com/stretchr/testify/assert"
-)
-
-type testWriter struct {
- h http.Header
- buf bytes.Buffer
- wroteHeader bool
- code int
- err error
- pushErr error
- pushes []string
-}
-
-func (tw *testWriter) Header() http.Header { return tw.h }
-
-func (tw *testWriter) Write(p []byte) (int, error) {
- if !tw.wroteHeader {
- tw.WriteHeader(http.StatusOK)
- }
-
- n, e := tw.buf.Write(p)
- if e == nil {
- e = tw.err
- }
-
- return n, e
-}
-
-func (tw *testWriter) WriteHeader(code int) { tw.wroteHeader = true; tw.code = code }
-
-func (tw *testWriter) Push(target string, opts *http.PushOptions) error {
- tw.pushes = append(tw.pushes, target)
-
- return tw.pushErr
-}
-
-func TestNewResponse_Error(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{Context: []byte(`invalid payload`)})
- assert.Error(t, err)
- assert.Nil(t, r)
-}
-
-func TestNewResponse_Write(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{
- Context: []byte(`{"headers":{"key":["value"]},"status": 301}`),
- Body: []byte(`sample body`),
- })
-
- assert.NoError(t, err)
- assert.NotNil(t, r)
-
- w := &testWriter{h: http.Header(make(map[string][]string))}
- assert.NoError(t, r.Write(w))
-
- assert.Equal(t, 301, w.code)
- assert.Equal(t, "value", w.h.Get("key"))
- assert.Equal(t, "sample body", w.buf.String())
-}
-
-func TestNewResponse_Stream(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{
- Context: []byte(`{"headers":{"key":["value"]},"status": 301}`),
- })
-
- // r is pointer, so, it might be nil
- if r == nil {
- t.Fatal("response is nil")
- return
- }
-
- r.Body = new(bytes.Buffer)
- r.Body.(*bytes.Buffer).WriteString("hello world")
-
- assert.NoError(t, err)
- assert.NotNil(t, r)
-
- w := &testWriter{h: http.Header(make(map[string][]string))}
- assert.NoError(t, r.Write(w))
-
- assert.Equal(t, 301, w.code)
- assert.Equal(t, "value", w.h.Get("key"))
- assert.Equal(t, "hello world", w.buf.String())
-}
-
-func TestNewResponse_StreamError(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{
- Context: []byte(`{"headers":{"key":["value"]},"status": 301}`),
- })
-
- // r is pointer, so, it might be nil
- if r == nil {
- t.Fatal("response is nil")
- return
- }
-
- r.Body = &bytes.Buffer{}
- r.Body.(*bytes.Buffer).WriteString("hello world")
-
- assert.NoError(t, err)
- assert.NotNil(t, r)
-
- w := &testWriter{h: http.Header(make(map[string][]string)), err: errors.New("error")}
- assert.Error(t, r.Write(w))
-}
-
-func TestWrite_HandlesPush(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{
- Context: []byte(`{"headers":{"Http2-Push":["/test.js"],"content-type":["text/html"]},"status": 200}`),
- })
-
- assert.NoError(t, err)
- assert.NotNil(t, r)
-
- w := &testWriter{h: http.Header(make(map[string][]string))}
- assert.NoError(t, r.Write(w))
-
- assert.Nil(t, w.h["Http2-Push"])
- assert.Equal(t, []string{"/test.js"}, w.pushes)
-}
-
-func TestWrite_HandlesTrailers(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{
- Context: []byte(`{"headers":{"Trailer":["foo, bar", "baz"],"foo":["test"],"bar":["demo"]},"status": 200}`),
- })
-
- assert.NoError(t, err)
- assert.NotNil(t, r)
-
- w := &testWriter{h: http.Header(make(map[string][]string))}
- assert.NoError(t, r.Write(w))
-
- assert.Nil(t, w.h[handler.TrailerHeaderKey])
- assert.Nil(t, w.h["foo"]) //nolint:staticcheck
- assert.Nil(t, w.h["baz"]) //nolint:staticcheck
-
- assert.Equal(t, "test", w.h.Get("Trailer:foo"))
- assert.Equal(t, "demo", w.h.Get("Trailer:bar"))
-}
-
-func TestWrite_HandlesHandlesWhitespacesInTrailer(t *testing.T) {
- r, err := handler.NewResponse(&payload.Payload{
- Context: []byte(
- `{"headers":{"Trailer":["foo\t,bar , baz"],"foo":["a"],"bar":["b"],"baz":["c"]},"status": 200}`),
- })
-
- assert.NoError(t, err)
- assert.NotNil(t, r)
-
- w := &testWriter{h: http.Header(make(map[string][]string))}
- assert.NoError(t, r.Write(w))
-
- assert.Equal(t, "a", w.h.Get("Trailer:foo"))
- assert.Equal(t, "b", w.h.Get("Trailer:bar"))
- assert.Equal(t, "c", w.h.Get("Trailer:baz"))
-}
diff --git a/tests/plugins/http/uploads_config_test.go b/tests/plugins/http/uploads_config_test.go
deleted file mode 100644
index 4f99b621..00000000
--- a/tests/plugins/http/uploads_config_test.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package http
-
-import (
- "os"
- "testing"
-
- "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/stretchr/testify/assert"
-)
-
-func TestFsConfig_Forbids(t *testing.T) {
- cfg := config.Uploads{Forbid: []string{".php"}}
-
- assert.True(t, cfg.Forbids("index.php"))
- assert.True(t, cfg.Forbids("index.PHP"))
- assert.True(t, cfg.Forbids("phpadmin/index.bak.php"))
- assert.False(t, cfg.Forbids("index.html"))
-}
-
-func TestFsConfig_TmpFallback(t *testing.T) {
- cfg := config.Uploads{Dir: "test"}
- assert.Equal(t, "test", cfg.TmpDir())
-
- cfg = config.Uploads{Dir: ""}
- assert.Equal(t, os.TempDir(), cfg.TmpDir())
-}
diff --git a/tests/plugins/http/uploads_test.go b/tests/plugins/http/uploads_test.go
deleted file mode 100644
index 54f2bead..00000000
--- a/tests/plugins/http/uploads_test.go
+++ /dev/null
@@ -1,433 +0,0 @@
-package http
-
-import (
- "bytes"
- "context"
- "crypto/sha512"
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
- "mime/multipart"
- "net/http"
- "os"
- "os/exec"
- "testing"
- "time"
-
- j "github.com/json-iterator/go"
- poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/transport/pipe"
- handler "github.com/spiral/roadrunner/v2/pkg/worker_handler"
- "github.com/spiral/roadrunner/v2/plugins/http/config"
- "github.com/stretchr/testify/assert"
-)
-
-var json = j.ConfigCompatibleWithStandardLibrary
-
-const testFile = "uploads_test.go"
-
-func TestHandler_Upload_File(t *testing.T) {
- pool, err := poolImpl.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
- pipe.NewPipeFactory(),
- &poolImpl.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, pool)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8021", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", errS)
- }
- }()
-
- go func() {
- errL := hs.ListenAndServe()
- if errL != nil && errL != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", errL)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
-
- f := mustOpen(testFile)
- defer func() {
- errC := f.Close()
- if errC != nil {
- t.Errorf("failed to close a file: error %v", errC)
- }
- }()
- fw, err := w.CreateFormFile("upload", f.Name())
- assert.NotNil(t, fw)
- assert.NoError(t, err)
- _, err = io.Copy(fw, f)
- if err != nil {
- t.Errorf("error copying the file: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the file: error %v", err)
- }
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- errC := r.Body.Close()
- if errC != nil {
- t.Errorf("error closing the Body: error %v", errC)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- fs := fileString(testFile, 0, "application/octet-stream")
-
- assert.Equal(t, `{"upload":`+fs+`}`, string(b))
-}
-
-func TestHandler_Upload_NestedFile(t *testing.T) {
- pool, err := poolImpl.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
- pipe.NewPipeFactory(),
- &poolImpl.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{},
- }, nil, pool)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8021", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", errS)
- }
- }()
-
- go func() {
- errL := hs.ListenAndServe()
- if errL != nil && errL != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", errL)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
-
- f := mustOpen(testFile)
- defer func() {
- errC := hs.Close()
- if errC != nil {
- t.Errorf("failed to close a file: error %v", errC)
- }
- }()
- fw, err := w.CreateFormFile("upload[x][y][z][]", f.Name())
- assert.NotNil(t, fw)
- assert.NoError(t, err)
- _, err = io.Copy(fw, f)
- if err != nil {
- t.Errorf("error copying the file: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the file: error %v", err)
- }
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- errC := r.Body.Close()
- if errC != nil {
- t.Errorf("error closing the Body: error %v", errC)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- fs := fileString(testFile, 0, "application/octet-stream")
-
- assert.Equal(t, `{"upload":{"x":{"y":{"z":[`+fs+`]}}}}`, string(b))
-}
-
-func TestHandler_Upload_File_NoTmpDir(t *testing.T) {
- pool, err := poolImpl.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
- pipe.NewPipeFactory(),
- &poolImpl.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: "-------",
- Forbid: []string{},
- }, nil, pool)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8021", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
-
- f := mustOpen(testFile)
- defer func() {
- errC := hs.Close()
- if errC != nil {
- t.Errorf("failed to close a file: error %v", errC)
- }
- }()
- fw, err := w.CreateFormFile("upload", f.Name())
- assert.NotNil(t, fw)
- assert.NoError(t, err)
- _, err = io.Copy(fw, f)
- if err != nil {
- t.Errorf("error copying the file: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the file: error %v", err)
- }
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- errC := r.Body.Close()
- if errC != nil {
- t.Errorf("error closing the Body: error %v", errC)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- fs := fileString(testFile, 6, "application/octet-stream")
-
- assert.Equal(t, `{"upload":`+fs+`}`, string(b))
-}
-
-func TestHandler_Upload_File_Forbids(t *testing.T) {
- pool, err := poolImpl.Initialize(context.Background(),
- func() *exec.Cmd { return exec.Command("php", "../../http/client.php", "upload", "pipes") },
- pipe.NewPipeFactory(),
- &poolImpl.Config{
- NumWorkers: 1,
- AllocateTimeout: time.Second * 1000,
- DestroyTimeout: time.Second * 1000,
- })
- if err != nil {
- t.Fatal(err)
- }
-
- h, err := handler.NewHandler(1024, 500, config.Uploads{
- Dir: os.TempDir(),
- Forbid: []string{".go"},
- }, nil, pool)
- assert.NoError(t, err)
-
- hs := &http.Server{Addr: ":8021", Handler: h}
- defer func() {
- errS := hs.Shutdown(context.Background())
- if errS != nil {
- t.Errorf("error during the shutdown: error %v", err)
- }
- }()
-
- go func() {
- err = hs.ListenAndServe()
- if err != nil && err != http.ErrServerClosed {
- t.Errorf("error listening the interface: error %v", err)
- }
- }()
- time.Sleep(time.Millisecond * 10)
-
- var mb bytes.Buffer
- w := multipart.NewWriter(&mb)
-
- f := mustOpen(testFile)
- defer func() {
- errC := hs.Close()
- if errC != nil {
- t.Errorf("failed to close a file: error %v", errC)
- }
- }()
- fw, err := w.CreateFormFile("upload", f.Name())
- assert.NotNil(t, fw)
- assert.NoError(t, err)
- _, err = io.Copy(fw, f)
- if err != nil {
- t.Errorf("error copying the file: error %v", err)
- }
-
- err = w.Close()
- if err != nil {
- t.Errorf("error closing the file: error %v", err)
- }
-
- req, err := http.NewRequest("POST", "http://127.0.0.1"+hs.Addr, &mb)
- assert.NoError(t, err)
-
- req.Header.Set("Content-Type", w.FormDataContentType())
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- defer func() {
- errC := r.Body.Close()
- if errC != nil {
- t.Errorf("error closing the Body: error %v", errC)
- }
- }()
-
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
-
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- fs := fileString(testFile, 8, "application/octet-stream")
-
- assert.Equal(t, `{"upload":`+fs+`}`, string(b))
-}
-
-func Test_FileExists(t *testing.T) {
- assert.True(t, exists(testFile))
- assert.False(t, exists("uploads_test."))
-}
-
-func mustOpen(f string) *os.File {
- r, err := os.Open(f)
- if err != nil {
- panic(err)
- }
- return r
-}
-
-type fInfo struct {
- Name string `json:"name"`
- Size int64 `json:"size"`
- Mime string `json:"mime"`
- Error int `json:"error"`
- Sha512 string `json:"sha512,omitempty"`
-}
-
-func fileString(f string, errNo int, mime string) string {
- s, err := os.Stat(f)
- if err != nil {
- fmt.Println(fmt.Errorf("error stat the file, error: %v", err))
- }
-
- ff, err := os.Open(f)
- if err != nil {
- fmt.Println(fmt.Errorf("error opening the file, error: %v", err))
- }
-
- defer func() {
- er := ff.Close()
- if er != nil {
- fmt.Println(fmt.Errorf("error closing the file, error: %v", er))
- }
- }()
-
- h := sha512.New()
- _, err = io.Copy(h, ff)
- if err != nil {
- fmt.Println(fmt.Errorf("error copying the file, error: %v", err))
- }
-
- v := &fInfo{
- Name: s.Name(),
- Size: s.Size(),
- Error: errNo,
- Mime: mime,
- Sha512: hex.EncodeToString(h.Sum(nil)),
- }
-
- if errNo != 0 {
- v.Sha512 = ""
- v.Size = 0
- }
-
- r, err := json.Marshal(v)
- if err != nil {
- fmt.Println(fmt.Errorf("error marshaling fInfo, error: %v", err))
- }
- return string(r)
-}
-
-// exists if file exists.
-func exists(path string) bool {
- if _, err := os.Stat(path); os.IsNotExist(err) {
- return false
- }
- return true
-}
diff --git a/tests/plugins/informer/.rr-informer.yaml b/tests/plugins/informer/.rr-informer.yaml
deleted file mode 100644
index 94c9a856..00000000
--- a/tests/plugins/informer/.rr-informer.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG: "C138"
- relay: "pipes"
- relay_timeout: "20s"
-
-rpc:
- listen: tcp://127.0.0.1:6001
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/informer/informer_test.go b/tests/plugins/informer/informer_test.go
deleted file mode 100644
index c3b5c6a6..00000000
--- a/tests/plugins/informer/informer_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package informer
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/stretchr/testify/assert"
-)
-
-func TestInformerInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{
- Path: ".rr-informer.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &logger.ZapLogger{},
- &informer.Plugin{},
- &rpcPlugin.Plugin{},
- &Plugin1{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("InformerWorkersRpcTest", informerWorkersRPCTest)
- t.Run("InformerListRpcTest", informerListRPCTest)
- t.Run("InformerPluginWithoutWorkersRpcTest", informerPluginWOWorkersRPCTest)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func informerPluginWOWorkersRPCTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- list := struct {
- // Workers is list of workers.
- Workers []process.State `json:"workers"`
- }{}
-
- err = client.Call("informer.Workers", "informer.config", &list)
- assert.NoError(t, err)
- assert.Len(t, list.Workers, 0)
-}
-
-func informerWorkersRPCTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- list := struct {
- // Workers is list of workers.
- Workers []process.State `json:"workers"`
- }{}
-
- err = client.Call("informer.Workers", "informer.plugin1", &list)
- assert.NoError(t, err)
- assert.Len(t, list.Workers, 10)
-}
-
-func informerListRPCTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- list := make([]string, 0, 5)
- // Plugins which are expected to be in the list
- expected := []string{"rpc", "logs", "informer.plugin1", "config", "server"}
-
- err = client.Call("informer.List", true, &list)
- assert.NoError(t, err)
- assert.ElementsMatch(t, list, expected)
-}
diff --git a/tests/plugins/informer/test_plugin.go b/tests/plugins/informer/test_plugin.go
deleted file mode 100644
index 21897f40..00000000
--- a/tests/plugins/informer/test_plugin.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package informer
-
-import (
- "context"
- "time"
-
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/state/process"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/server"
-)
-
-var testPoolConfig = &pool.Config{
- NumWorkers: 10,
- MaxJobs: 100,
- AllocateTimeout: time.Second * 10,
- DestroyTimeout: time.Second * 10,
- Supervisor: &pool.SupervisorConfig{
- WatchTick: 60 * time.Second,
- TTL: 1000 * time.Second,
- IdleTTL: 10 * time.Second,
- ExecTTL: 10 * time.Second,
- MaxWorkerMemory: 1000,
- },
-}
-
-// Gauge //////////////
-type Plugin1 struct {
- config config.Configurer
- server server.Server
-}
-
-func (p1 *Plugin1) Init(cfg config.Configurer, server server.Server) error {
- p1.config = cfg
- p1.server = server
- return nil
-}
-
-func (p1 *Plugin1) Serve() chan error {
- errCh := make(chan error, 1)
- return errCh
-}
-
-func (p1 *Plugin1) Stop() error {
- return nil
-}
-
-func (p1 *Plugin1) Name() string {
- return "informer.plugin1"
-}
-
-func (p1 *Plugin1) Available() {}
-
-func (p1 *Plugin1) Workers() []*process.State {
- p, err := p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil)
- if err != nil {
- panic(err)
- }
-
- ps := make([]*process.State, 0, len(p.Workers()))
- workers := p.Workers()
- for i := 0; i < len(workers); i++ {
- state, err := process.WorkerProcessState(workers[i])
- if err != nil {
- return nil
- }
- ps = append(ps, state)
- }
-
- return ps
-}
diff --git a/tests/plugins/jobs/amqp/.rr-amqp-declare.yaml b/tests/plugins/jobs/amqp/.rr-amqp-declare.yaml
deleted file mode 100644
index f9a7308b..00000000
--- a/tests/plugins/jobs/amqp/.rr-amqp-declare.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-amqp:
- addr: amqp://guest:[email protected]:5672/
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/amqp/.rr-amqp-init.yaml b/tests/plugins/jobs/amqp/.rr-amqp-init.yaml
deleted file mode 100644
index 43840545..00000000
--- a/tests/plugins/jobs/amqp/.rr-amqp-init.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-amqp:
- addr: amqp://guest:[email protected]:5672/
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 1
- pipeline_size: 100000
- timeout: 1
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: amqp
- prefetch: 100
- queue: test-1-queue
- priority: 1
- exchange: default
- exchange_type: direct
- routing_key: test-1
- exclusive: false
- multiple_ack: false
- requeue_on_fail: false
-
- test-2:
- driver: amqp
- prefetch: 100
- queue: test-2-queue
- priority: 2
- exchange: default
- exchange_type: direct
- routing_key: test-2
- exclusive: false
- multiple_ack: false
- requeue_on_fail: false
-
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml b/tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml
deleted file mode 100644
index 79493d96..00000000
--- a/tests/plugins/jobs/amqp/.rr-amqp-jobs-err.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_err.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-amqp:
- addr: amqp://guest:[email protected]:5672/
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 1
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/amqp/.rr-no-global.yaml b/tests/plugins/jobs/amqp/.rr-no-global.yaml
deleted file mode 100644
index 1b01eb73..00000000
--- a/tests/plugins/jobs/amqp/.rr-no-global.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: error
- mode: development
-
-jobs:
- # num logical cores by default
- num_pollers: 10
- # 1mi by default
- pipeline_size: 100000
- # worker pool configuration
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- # list of broker pipelines associated with endpoints
- pipelines:
- test-1:
- driver: amqp
- priority: 1
- pipeline_size: 100
- queue: test-1-queue
- exchange: default
- exchange_type: direct
- routing_key: test
-
- test-2:
- driver: amqp
- priority: 2
- pipeline_size: 100
- queue: test-2-queue
- exchange: default
- exchange_type: direct
- routing_key: test-2
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml b/tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml
deleted file mode 100644
index 3555ef96..00000000
--- a/tests/plugins/jobs/beanstalk/.rr-beanstalk-declare.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-beanstalk:
- # beanstalk address
- addr: tcp://127.0.0.1:11300
- # connect timeout
- timeout: 10s
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml b/tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml
deleted file mode 100644
index cf9069a8..00000000
--- a/tests/plugins/jobs/beanstalk/.rr-beanstalk-init.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-beanstalk:
- addr: tcp://127.0.0.1:11300
- timeout: 10s
-
-logs:
- level: info
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: beanstalk
- priority: 11
- tube_priority: 1
- tube: default-1
- reserve_timeout: 10s
-
- test-2:
- driver: beanstalk
- priority: 11
- tube_priority: 3
- tube: default-2
- reserve_timeout: 10s
-
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml b/tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml
deleted file mode 100644
index 71b51dce..00000000
--- a/tests/plugins/jobs/beanstalk/.rr-beanstalk-jobs-err.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_err.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-beanstalk:
- addr: tcp://127.0.0.1:11300
- timeout: 10s
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/beanstalk/.rr-no-global.yaml b/tests/plugins/jobs/beanstalk/.rr-no-global.yaml
deleted file mode 100644
index 92d090d4..00000000
--- a/tests/plugins/jobs/beanstalk/.rr-no-global.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: error
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- # list of broker pipelines associated with endpoints
- pipelines:
- test-1:
- driver: beanstalk
- priority: 11
- tube_priority: 1
- tube: default-1
- reserve_timeout: 10s
-
- consume: [ "test-1" ]
-
-endure:
- log_level: debug
diff --git a/tests/plugins/jobs/boltdb/.rr-boltdb-declare.yaml b/tests/plugins/jobs/boltdb/.rr-boltdb-declare.yaml
deleted file mode 100644
index cdc2655f..00000000
--- a/tests/plugins/jobs/boltdb/.rr-boltdb-declare.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-boltdb:
- permissions: 0777
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/boltdb/.rr-boltdb-init.yaml b/tests/plugins/jobs/boltdb/.rr-boltdb-init.yaml
deleted file mode 100644
index 804db543..00000000
--- a/tests/plugins/jobs/boltdb/.rr-boltdb-init.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-boltdb:
- permissions: 0777
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 1
- pipeline_size: 100000
- timeout: 1
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: boltdb
- prefetch: 100
- file: "rr1.db"
- priority: 1
-
- test-2:
- driver: boltdb
- prefetch: 100
- file: "rr2.db"
- priority: 2
-
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/boltdb/.rr-boltdb-jobs-err.yaml b/tests/plugins/jobs/boltdb/.rr-boltdb-jobs-err.yaml
deleted file mode 100644
index d375a9a5..00000000
--- a/tests/plugins/jobs/boltdb/.rr-boltdb-jobs-err.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_err.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-boltdb:
- permissions: 0777
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 1
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/boltdb/.rr-no-global.yaml b/tests/plugins/jobs/boltdb/.rr-no-global.yaml
deleted file mode 100644
index 54aaf3c6..00000000
--- a/tests/plugins/jobs/boltdb/.rr-no-global.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: error
- mode: development
-
-jobs:
- # num logical cores by default
- num_pollers: 10
- # 1mi by default
- pipeline_size: 100000
- # worker pool configuration
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- # list of broker pipelines associated with endpoints
- pipelines:
- test-1:
- driver: boltdb
- prefetch: 100
- file: "rr1.db"
- priority: 1
-
- test-2:
- driver: boltdb
- prefetch: 100
- file: "rr2.db"
- priority: 1
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/configs/.rr-jobs-init.yaml b/tests/plugins/jobs/configs/.rr-jobs-init.yaml
deleted file mode 100644
index 9813344e..00000000
--- a/tests/plugins/jobs/configs/.rr-jobs-init.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-amqp:
- addr: amqp://guest:[email protected]:5672/
-
-# beanstalk configuration
-#
-beanstalk:
- # beanstalk address
- addr: tcp://127.0.0.1:11300
- # connect timeout
- timeout: 10s
-
-# amazon sqs configuration
-# General section
-sqs:
- key: api-key
- secret: api-secret
- region: us-west-1
- endpoint: http://127.0.0.1:9324
-
-logs:
- level: info
- encoding: console
- mode: development
-
-jobs:
- # num logical cores by default
- num_pollers: 10
- # 1mi by default
- pipeline_size: 100000
- # worker pool configuration
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- # list of broker pipelines associated with endpoints
- pipelines:
- test-local:
- driver: memory
- priority: 10
- prefetch: 10000
-
- test-local-2:
- driver: memory
- priority: 1
- prefetch: 10000
-
- test-local-3:
- driver: memory
- priority: 2
- prefetch: 10000
-
- test-1:
- driver: amqp
- # QoS
- prefetch: 1000000
- # Queue name
- queue: test-1-queue
- # Pipeline jobs priority, 1 - highest
- priority: 1
- # Exchange
- exchange: default
- # Exchange type: direct, topic, fanout
- exchange_type: direct
- # Routing key for the queue
- routing_key: test
- # Declare a queue exclusive at the exchange
- exclusive: false
- # When multiple is true, this delivery and all prior unacknowledged deliveries
- # on the same channel will be acknowledged. This is useful for batch processing
- # of deliveries
- multiple_ack: false
- # When multiple is true, this delivery and all prior unacknowledged deliveries
- # on the same channel will be acknowledged. This is useful for batch processing
- # of deliveries
- requeue_on_fail: false
-
- test-2-amqp:
- driver: amqp
- priority: 2
- prefetch: 1000000
- queue: test-2-queue
- exchange: default
- exchange_type: direct
- routing_key: test-2
-
- test-2:
- driver: beanstalk
- priority: 11
- tube: default
-
- test-3:
- driver: sqs
- prefetch: 1000000
- queue: default
- attributes:
- MessageRetentionPeriod: 86400
- tags:
- test: "tag"
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-local", "test-local-2", "test-local-3", "test-1", "test-2-amqp", "test-3" ]
-
diff --git a/tests/plugins/jobs/configs/.rr-jobs-metrics.yaml b/tests/plugins/jobs/configs/.rr-jobs-metrics.yaml
deleted file mode 100644
index 4db9a676..00000000
--- a/tests/plugins/jobs/configs/.rr-jobs-metrics.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-metrics:
- address: 127.0.0.1:2112
-
-logs:
- level: info
- encoding: console
- mode: development
-
-jobs:
- # num logical cores by default
- num_pollers: 10
- # 1mi by default
- pipeline_size: 100000
- # worker pool configuration
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml b/tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml
deleted file mode 100644
index 2c4709ba..00000000
--- a/tests/plugins/jobs/durability/.rr-amqp-durability-redial.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-amqp:
- addr: amqp://guest:[email protected]:23679/
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- timeout: 1
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: amqp
- prefetch: 100
- queue: test-1-queue
- priority: 1
- exchange: default
- exchange_type: direct
- routing_key: test-1
- exclusive: false
- multiple_ack: false
- requeue_on_fail: false
-
- test-2:
- driver: amqp
- prefetch: 100
- queue: test-2-queue
- priority: 2
- exchange: default
- exchange_type: direct
- routing_key: test-2
- exclusive: false
- multiple_ack: false
- requeue_on_fail: false
-
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml b/tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml
deleted file mode 100644
index 57d8ad2d..00000000
--- a/tests/plugins/jobs/durability/.rr-beanstalk-durability-redial.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-beanstalk:
- addr: tcp://127.0.0.1:11400
- timeout: 10s
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: beanstalk
- priority: 11
- tube_priority: 1
- tube: default-1
- reserve_timeout: 10s
-
- test-2:
- driver: beanstalk
- priority: 11
- tube_priority: 3
- tube: default-2
- reserve_timeout: 10s
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml b/tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml
deleted file mode 100644
index b6ba83a4..00000000
--- a/tests/plugins/jobs/durability/.rr-sqs-durability-redial.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-sqs:
- key: api-key
- secret: api-secret
- region: us-west-1
- endpoint: http://127.0.0.1:19324
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- timeout: 20
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: sqs
- prefetch: 10
- visibility_timeout: 0
- wait_time_seconds: 1
- queue: default
- # https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SetQueueAttributes.html
- attributes:
- DelaySeconds: 0
- MaximumMessageSize: 262144
- MessageRetentionPeriod: 345600
- ReceiveMessageWaitTimeSeconds: 0
- VisibilityTimeout: 0
- tags:
- test: "tag"
-
- test-2:
- driver: sqs
- prefetch: 10
- queue: default-2
- wait_time_seconds: 1
- attributes:
- MessageRetentionPeriod: 86400
- tags:
- test: "tag"
-
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/helpers.go b/tests/plugins/jobs/helpers.go
deleted file mode 100644
index 6c2d05ca..00000000
--- a/tests/plugins/jobs/helpers.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package jobs
-
-import (
- "bytes"
- "net"
- "net/http"
- "net/rpc"
- "testing"
-
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-const (
- push string = "jobs.Push"
- pause string = "jobs.Pause"
- destroy string = "jobs.Destroy"
- resume string = "jobs.Resume"
- stat string = "jobs.Stat"
-)
-
-func resumePipes(pipes ...string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, len(pipes))}
-
- for i := 0; i < len(pipes); i++ {
- pipe.GetPipelines()[i] = pipes[i]
- }
-
- er := &jobsv1beta.Empty{}
- err = client.Call(resume, pipe, er)
- assert.NoError(t, err)
- }
-}
-
-func pushToDisabledPipe(pipeline string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{
- Job: "some/php/namespace",
- Id: "1",
- Payload: `{"hello":"world"}`,
- Headers: nil,
- Options: &jobsv1beta.Options{
- Priority: 1,
- Pipeline: pipeline,
- },
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call(push, req, er)
- assert.NoError(t, err)
- }
-}
-
-func pushToPipe(pipeline string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{
- Job: "some/php/namespace",
- Id: "1",
- Payload: `{"hello":"world"}`,
- Headers: map[string]*jobsv1beta.HeaderValue{"test": {Value: []string{"test2"}}},
- Options: &jobsv1beta.Options{
- Priority: 1,
- Pipeline: pipeline,
- Delay: 0,
- },
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call(push, req, er)
- assert.NoError(t, err)
- }
-}
-
-func pushToPipeDelayed(pipeline string, delay int64) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{
- Job: "some/php/namespace",
- Id: "2",
- Payload: `{"hello":"world"}`,
- Headers: map[string]*jobsv1beta.HeaderValue{"test": {Value: []string{"test2"}}},
- Options: &jobsv1beta.Options{
- Priority: 1,
- Pipeline: pipeline,
- Delay: delay,
- },
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call(push, req, er)
- assert.NoError(t, err)
- }
-}
-
-func pushToPipeErr(pipeline string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- require.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- req := &jobsv1beta.PushRequest{Job: &jobsv1beta.Job{
- Job: "some/php/namespace",
- Id: "1",
- Payload: `{"hello":"world"}`,
- Headers: map[string]*jobsv1beta.HeaderValue{"test": {Value: []string{"test2"}}},
- Options: &jobsv1beta.Options{
- Priority: 1,
- Pipeline: pipeline,
- Delay: 0,
- },
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call(push, req, er)
- require.Error(t, err)
- }
-}
-func pausePipelines(pipes ...string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, len(pipes))}
-
- for i := 0; i < len(pipes); i++ {
- pipe.GetPipelines()[i] = pipes[i]
- }
-
- er := &jobsv1beta.Empty{}
- err = client.Call(pause, pipe, er)
- assert.NoError(t, err)
- }
-}
-
-func destroyPipelines(pipes ...string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, len(pipes))}
-
- for i := 0; i < len(pipes); i++ {
- pipe.GetPipelines()[i] = pipes[i]
- }
-
- er := &jobsv1beta.Empty{}
- err = client.Call(destroy, pipe, er)
- assert.NoError(t, err)
- }
-}
-
-func enableProxy(name string, t *testing.T) {
- buf := new(bytes.Buffer)
- buf.WriteString(`{"enabled":true}`)
-
- resp, err := http.Post("http://127.0.0.1:8474/proxies/"+name, "application/json", buf) //nolint:noctx
- require.NoError(t, err)
- require.Equal(t, 200, resp.StatusCode)
- if resp.Body != nil {
- _ = resp.Body.Close()
- }
-}
-
-func disableProxy(name string, t *testing.T) {
- buf := new(bytes.Buffer)
- buf.WriteString(`{"enabled":false}`)
-
- resp, err := http.Post("http://127.0.0.1:8474/proxies/"+name, "application/json", buf) //nolint:noctx
- require.NoError(t, err)
- require.Equal(t, 200, resp.StatusCode)
- if resp.Body != nil {
- _ = resp.Body.Close()
- }
-}
-
-func deleteProxy(name string, t *testing.T) {
- client := &http.Client{}
-
- req, err := http.NewRequest(http.MethodDelete, "http://127.0.0.1:8474/proxies/"+name, nil) //nolint:noctx
- require.NoError(t, err)
-
- resp, err := client.Do(req)
- require.NoError(t, err)
-
- require.NoError(t, err)
- require.Equal(t, 204, resp.StatusCode)
- if resp.Body != nil {
- _ = resp.Body.Close()
- }
-}
-
-func stats(state *jobState.State) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- st := &jobsv1beta.Stats{}
- er := &jobsv1beta.Empty{}
-
- err = client.Call(stat, er, st)
- require.NoError(t, err)
- require.NotNil(t, st)
-
- state.Queue = st.Stats[0].Queue
- state.Pipeline = st.Stats[0].Pipeline
- state.Driver = st.Stats[0].Driver
- state.Active = st.Stats[0].Active
- state.Delayed = st.Stats[0].Delayed
- state.Reserved = st.Stats[0].Reserved
- state.Ready = st.Stats[0].Ready
- }
-}
diff --git a/tests/plugins/jobs/jobs_amqp_test.go b/tests/plugins/jobs/jobs_amqp_test.go
deleted file mode 100644
index 949698ec..00000000
--- a/tests/plugins/jobs/jobs_amqp_test.go
+++ /dev/null
@@ -1,499 +0,0 @@
-package jobs
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/amqp"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestAMQPInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "amqp/.rr-amqp-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &amqp.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestAMQPDeclare(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "amqp/.rr-amqp-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &amqp.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareAMQPPipeline", declareAMQPPipe)
- t.Run("ConsumeAMQPPipeline", resumePipes("test-3"))
- t.Run("PushAMQPPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PauseAMQPPipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroyAMQPPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestAMQPJobsError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "amqp/.rr-amqp-jobs-err.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &amqp.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareAMQPPipeline", declareAMQPPipe)
- t.Run("ConsumeAMQPPipeline", resumePipes("test-3"))
- t.Run("PushAMQPPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 25)
- t.Run("PauseAMQPPipeline", pausePipelines("test-3"))
- t.Run("DestroyAMQPPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestAMQPNoGlobalSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "amqp/.rr-no-global.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &amqp.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- require.Error(t, err)
-}
-
-func TestAMQPStats(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "amqp/.rr-amqp-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &amqp.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareAMQPPipeline", declareAMQPPipe)
- t.Run("ConsumeAMQPPipeline", resumePipes("test-3"))
- t.Run("PushAMQPPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 2)
- t.Run("PauseAMQPPipeline", pausePipelines("test-3"))
- time.Sleep(time.Second * 2)
- t.Run("PushAMQPPipeline", pushToPipe("test-3"))
- t.Run("PushPipelineDelayed", pushToPipeDelayed("test-3", 5))
-
- out := &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "amqp")
- assert.Equal(t, out.Queue, "default")
-
- assert.Equal(t, int64(1), out.Active)
- assert.Equal(t, int64(1), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
- assert.Equal(t, false, out.Ready)
-
- time.Sleep(time.Second)
- t.Run("ResumePipeline", resumePipes("test-3"))
- time.Sleep(time.Second * 7)
-
- out = &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "amqp")
- assert.Equal(t, out.Queue, "default")
-
- assert.Equal(t, int64(0), out.Active)
- assert.Equal(t, int64(0), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
- assert.Equal(t, true, out.Ready)
-
- time.Sleep(time.Second)
- t.Run("DestroyAMQPPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func declareAMQPPipe(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{
- "driver": "amqp",
- "name": "test-3",
- "routing_key": "test-3",
- "queue": "default",
- "exchange_type": "direct",
- "exchange": "amqp.default",
- "prefetch": "100",
- "priority": "3",
- "exclusive": "true",
- "multiple_ask": "true",
- "requeue_on_fail": "true",
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call("jobs.Declare", pipe, er)
- assert.NoError(t, err)
-}
diff --git a/tests/plugins/jobs/jobs_beanstalk_test.go b/tests/plugins/jobs/jobs_beanstalk_test.go
deleted file mode 100644
index 9f4d37ec..00000000
--- a/tests/plugins/jobs/jobs_beanstalk_test.go
+++ /dev/null
@@ -1,515 +0,0 @@
-package jobs
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/beanstalk"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestBeanstalkInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "beanstalk/.rr-beanstalk-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes()
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &beanstalk.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestBeanstalkDeclare(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "beanstalk/.rr-beanstalk-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "beanstalk", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes()
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &beanstalk.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareBeanstalkPipeline", declareBeanstalkPipe)
- t.Run("ConsumeBeanstalkPipeline", resumePipes("test-3"))
- t.Run("PushBeanstalkPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PauseBeanstalkPipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroyBeanstalkPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestBeanstalkJobsError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "beanstalk/.rr-beanstalk-jobs-err.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "beanstalk", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes()
-
- mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &beanstalk.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareBeanstalkPipeline", declareBeanstalkPipe)
- t.Run("ConsumeBeanstalkPipeline", resumePipes("test-3"))
- t.Run("PushBeanstalkPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 25)
- t.Run("PauseBeanstalkPipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroyBeanstalkPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestBeanstalkStats(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "beanstalk/.rr-beanstalk-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "beanstalk", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("beanstalk reserve timeout", "warn", "reserve-with-timeout").AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("beanstalk listener stopped").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &beanstalk.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareBeanstalkPipe)
- t.Run("ConsumePipeline", resumePipes("test-3"))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 2)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second * 3)
- t.Run("PushPipelineDelayed", pushToPipeDelayed("test-3", 5))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
-
- out := &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "beanstalk")
- assert.Equal(t, out.Queue, "default")
-
- // try 5 times
- if out.Active == 0 {
- for i := 0; i < 5; i++ {
- time.Sleep(time.Second)
- out = &jobState.State{}
- t.Run("Stats", stats(out))
- if out.Active == 1 {
- break
- }
- }
- }
-
- assert.Equal(t, int64(1), out.Active)
- assert.Equal(t, int64(1), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
-
- time.Sleep(time.Second)
- t.Run("ResumePipeline", resumePipes("test-3"))
- time.Sleep(time.Second * 15)
-
- out = &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "beanstalk")
- assert.Equal(t, out.Queue, "default")
-
- assert.Equal(t, int64(0), out.Active)
- assert.Equal(t, int64(0), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
-
- time.Sleep(time.Second)
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestBeanstalkNoGlobalSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "beanstalk/.rr-no-global.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &beanstalk.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- require.Error(t, err)
-}
-
-func declareBeanstalkPipe(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{
- "driver": "beanstalk",
- "name": "test-3",
- "tube": "default",
- "reserve_timeout": "1",
- "priority": "3",
- "tube_priority": "10",
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call("jobs.Declare", pipe, er)
- assert.NoError(t, err)
-}
diff --git a/tests/plugins/jobs/jobs_boltdb_test.go b/tests/plugins/jobs/jobs_boltdb_test.go
deleted file mode 100644
index ab36ffa4..00000000
--- a/tests/plugins/jobs/jobs_boltdb_test.go
+++ /dev/null
@@ -1,506 +0,0 @@
-package jobs
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/boltdb"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-const (
- rr1db string = "rr1.db"
- rr2db string = "rr2.db"
-)
-
-func TestBoltDBInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "boltdb/.rr-boltdb-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("boltdb listener stopped").Times(4)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &boltdb.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- stopCh <- struct{}{}
- wg.Wait()
-
- assert.NoError(t, os.Remove(rr1db))
- assert.NoError(t, os.Remove(rr2db))
-}
-
-func TestBoltDBDeclare(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "boltdb/.rr-boltdb-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "boltdb", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("boltdb listener stopped").Times(2)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &boltdb.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareBoltDBPipe(rr1db))
- t.Run("ConsumePipeline", resumePipes("test-3"))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
- assert.NoError(t, os.Remove(rr1db))
-}
-
-func TestBoltDBJobsError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "boltdb/.rr-boltdb-jobs-err.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "boltdb", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("boltdb listener stopped").Times(2)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &boltdb.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareBoltDBPipe(rr1db))
- t.Run("ConsumePipeline", resumePipes("test-3"))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 25)
- t.Run("PausePipeline", pausePipelines("test-3"))
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
- assert.NoError(t, os.Remove(rr1db))
-}
-
-func TestBoltDBNoGlobalSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "boltdb/.rr-no-global.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &boltdb.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- require.Error(t, err)
-}
-
-func TestBoltDBStats(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "boltdb/.rr-boltdb-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "boltdb", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("boltdb listener stopped").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &boltdb.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareBoltDBPipe(rr1db))
- t.Run("ConsumePipeline", resumePipes("test-3"))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 2)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second * 2)
- t.Run("PushPipeline", pushToPipe("test-3"))
- t.Run("PushPipelineDelayed", pushToPipeDelayed("test-3", 5))
-
- out := &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, "test-3", out.Pipeline)
- assert.Equal(t, "boltdb", out.Driver)
- assert.Equal(t, "push", out.Queue)
-
- assert.Equal(t, int64(1), out.Active)
- assert.Equal(t, int64(1), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
- assert.Equal(t, false, out.Ready)
-
- time.Sleep(time.Second)
- t.Run("ResumePipeline", resumePipes("test-3"))
- time.Sleep(time.Second * 7)
-
- out = &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, "test-3", out.Pipeline)
- assert.Equal(t, "boltdb", out.Driver)
- assert.Equal(t, "push", out.Queue)
-
- assert.Equal(t, int64(0), out.Active)
- assert.Equal(t, int64(0), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
- assert.Equal(t, true, out.Ready)
-
- time.Sleep(time.Second)
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
- assert.NoError(t, os.Remove(rr1db))
-}
-
-func declareBoltDBPipe(file string) func(t *testing.T) {
- return func(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{
- "driver": "boltdb",
- "name": "test-3",
- "prefetch": "100",
- "priority": "3",
- "file": file,
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call("jobs.Declare", pipe, er)
- assert.NoError(t, err)
- }
-}
diff --git a/tests/plugins/jobs/jobs_general_test.go b/tests/plugins/jobs/jobs_general_test.go
deleted file mode 100644
index 5c521c2b..00000000
--- a/tests/plugins/jobs/jobs_general_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package jobs
-
-import (
- "io/ioutil"
- "net/http"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/amqp"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/memory"
- "github.com/spiral/roadrunner/v2/plugins/metrics"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestJobsInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-jobs-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("driver ready", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("driver ready", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2-amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2-amqp", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(2)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &memory.Plugin{},
- &amqp.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestJOBSMetrics(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{}
- cfg.Prefix = "rr"
- cfg.Path = "configs/.rr-jobs-metrics.yaml"
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &server.Plugin{},
- &jobs.Plugin{},
- &metrics.Plugin{},
- &memory.Plugin{},
- mockLogger,
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- tt := time.NewTimer(time.Minute * 3)
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer tt.Stop()
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
-
- t.Run("DeclareEphemeralPipeline", declareMemoryPipe)
- t.Run("ConsumeEphemeralPipeline", consumeMemoryPipe)
- t.Run("PushEphemeralPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PushEphemeralPipeline", pushToPipeDelayed("test-3", 5))
- time.Sleep(time.Second)
- t.Run("PushEphemeralPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 5)
-
- genericOut, err := get()
- assert.NoError(t, err)
-
- assert.Contains(t, genericOut, `rr_jobs_jobs_err 0`)
- assert.Contains(t, genericOut, `rr_jobs_jobs_ok 3`)
- assert.Contains(t, genericOut, `rr_jobs_push_err 0`)
- assert.Contains(t, genericOut, `rr_jobs_push_ok 3`)
- assert.Contains(t, genericOut, "workers_memory_bytes")
-
- close(sig)
- wg.Wait()
-}
-
-const getAddr = "http://127.0.0.1:2112/metrics"
-
-// get request and return body
-func get() (string, error) {
- r, err := http.Get(getAddr)
- if err != nil {
- return "", err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", err
- }
- // unsafe
- return string(b), err
-}
diff --git a/tests/plugins/jobs/jobs_memory_test.go b/tests/plugins/jobs/jobs_memory_test.go
deleted file mode 100644
index 7e39c556..00000000
--- a/tests/plugins/jobs/jobs_memory_test.go
+++ /dev/null
@@ -1,570 +0,0 @@
-package jobs
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/memory"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestMemoryInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "memory/.rr-memory-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestMemoryDeclare(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "memory/.rr-memory-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "memory", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareMemoryPipe)
- t.Run("ConsumePipeline", consumeMemoryPipe)
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestMemoryPauseResume(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "memory/.rr-memory-pause-resume.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
-
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-local", "driver", "memory", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-local", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &memory.Plugin{},
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("Pause", pausePipelines("test-local"))
- t.Run("pushToDisabledPipe", pushToDisabledPipe("test-local"))
- t.Run("Resume", resumePipes("test-local"))
- t.Run("pushToEnabledPipe", pushToPipe("test-local"))
- time.Sleep(time.Second * 1)
-
- stopCh <- struct{}{}
- time.Sleep(time.Second)
- wg.Wait()
-}
-
-func TestMemoryJobsError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "memory/.rr-memory-jobs-err.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "memory", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareMemoryPipe)
- t.Run("ConsumePipeline", resumePipes("test-3"))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 25)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestMemoryStats(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "memory/.rr-memory-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "memory", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareMemoryPipe)
- t.Run("ConsumePipeline", consumeMemoryPipe)
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
-
- t.Run("PushPipeline", pushToPipeDelayed("test-3", 5))
- t.Run("PushPipeline", pushToPipe("test-3"))
-
- time.Sleep(time.Second)
- out := &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "memory")
- assert.Equal(t, out.Queue, "test-3")
-
- assert.Equal(t, out.Active, int64(1))
- assert.Equal(t, out.Delayed, int64(1))
- assert.Equal(t, out.Reserved, int64(0))
-
- time.Sleep(time.Second)
- t.Run("ConsumePipeline", consumeMemoryPipe)
- time.Sleep(time.Second * 7)
-
- out = &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "memory")
- assert.Equal(t, out.Queue, "test-3")
-
- assert.Equal(t, out.Active, int64(0))
- assert.Equal(t, out.Delayed, int64(0))
- assert.Equal(t, out.Reserved, int64(0))
-
- t.Run("DestroyEphemeralPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func declareMemoryPipe(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{
- "driver": "memory",
- "name": "test-3",
- "prefetch": "10000",
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call("jobs.Declare", pipe, er)
- assert.NoError(t, err)
-}
-
-func consumeMemoryPipe(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.Pipelines{Pipelines: make([]string, 1)}
- pipe.GetPipelines()[0] = "test-3"
-
- er := &jobsv1beta.Empty{}
- err = client.Call("jobs.Resume", pipe, er)
- assert.NoError(t, err)
-}
diff --git a/tests/plugins/jobs/jobs_sqs_test.go b/tests/plugins/jobs/jobs_sqs_test.go
deleted file mode 100644
index 2dd2c8db..00000000
--- a/tests/plugins/jobs/jobs_sqs_test.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package jobs
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- jobState "github.com/spiral/roadrunner/v2/pkg/state/job"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/sqs"
- jobsv1beta "github.com/spiral/roadrunner/v2/proto/jobs/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestSQSInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "sqs/.rr-sqs-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Warn("sqs listener stopped").AnyTimes()
- mockLogger.EXPECT().Info("------> job poller stopped <------").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &sqs.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestSQSDeclare(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "sqs/.rr-sqs-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "sqs", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("sqs listener stopped").AnyTimes()
- mockLogger.EXPECT().Info("------> job poller stopped <------").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &sqs.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareSQSPipeline", declareSQSPipe)
- t.Run("ConsumeSQSPipeline", resumePipes("test-3"))
- t.Run("PushSQSPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PauseSQSPipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroySQSPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestSQSJobsError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "sqs/.rr-sqs-jobs-err.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error("jobs protocol error", "error", "error", "delay", gomock.Any(), "requeue", gomock.Any()).Times(3)
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "sqs", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("sqs listener stopped").AnyTimes()
- mockLogger.EXPECT().Info("------> job poller stopped <------").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &sqs.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclareSQSPipeline", declareSQSPipe)
- t.Run("ConsumeSQSPipeline", resumePipes("test-3"))
- t.Run("PushSQSPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second * 25)
- t.Run("PauseSQSPipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
- t.Run("DestroySQSPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-
- time.Sleep(time.Second * 5)
-}
-
-func TestSQSNoGlobalSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "sqs/.rr-no-global.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &sqs.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- require.Error(t, err)
-}
-
-func TestSQSStat(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "sqs/.rr-sqs-declare.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("pipeline paused", "pipeline", "test-3", "driver", "sqs", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-3", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Warn("sqs listener stopped").AnyTimes()
- mockLogger.EXPECT().Info("------> job poller stopped <------").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &sqs.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
-
- t.Run("DeclarePipeline", declareSQSPipe)
- t.Run("ConsumePipeline", resumePipes("test-3"))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
- t.Run("PausePipeline", pausePipelines("test-3"))
- time.Sleep(time.Second)
-
- t.Run("PushPipelineDelayed", pushToPipeDelayed("test-3", 5))
- t.Run("PushPipeline", pushToPipe("test-3"))
- time.Sleep(time.Second)
-
- out := &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "sqs")
- assert.Equal(t, out.Queue, "http://127.0.0.1:9324/000000000000/default")
-
- assert.Equal(t, int64(1), out.Active)
- assert.Equal(t, int64(1), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
-
- time.Sleep(time.Second)
- t.Run("ResumePipeline", resumePipes("test-3"))
- time.Sleep(time.Second * 7)
-
- out = &jobState.State{}
- t.Run("Stats", stats(out))
-
- assert.Equal(t, out.Pipeline, "test-3")
- assert.Equal(t, out.Driver, "sqs")
- assert.Equal(t, out.Queue, "http://127.0.0.1:9324/000000000000/default")
-
- assert.Equal(t, int64(0), out.Active)
- assert.Equal(t, int64(0), out.Delayed)
- assert.Equal(t, int64(0), out.Reserved)
-
- t.Run("DestroyPipeline", destroyPipelines("test-3"))
-
- time.Sleep(time.Second * 5)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func declareSQSPipe(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- pipe := &jobsv1beta.DeclareRequest{Pipeline: map[string]string{
- "driver": "sqs",
- "name": "test-3",
- "queue": "default",
- "prefetch": "10",
- "priority": "3",
- "visibility_timeout": "0",
- "wait_time_seconds": "3",
- "tags": `{"key":"value"}`,
- }}
-
- er := &jobsv1beta.Empty{}
- err = client.Call("jobs.Declare", pipe, er)
- assert.NoError(t, err)
-}
diff --git a/tests/plugins/jobs/jobs_with_toxics_test.go b/tests/plugins/jobs/jobs_with_toxics_test.go
deleted file mode 100644
index 84fbec48..00000000
--- a/tests/plugins/jobs/jobs_with_toxics_test.go
+++ /dev/null
@@ -1,400 +0,0 @@
-package jobs
-
-import (
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- toxiproxy "github.com/Shopify/toxiproxy/client"
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/amqp"
- "github.com/spiral/roadrunner/v2/plugins/beanstalk"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/informer"
- "github.com/spiral/roadrunner/v2/plugins/jobs"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/sqs"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestDurabilityAMQP(t *testing.T) {
- client := toxiproxy.NewClient("127.0.0.1:8474")
- proxies, err := client.Proxies()
- require.NoError(t, err)
-
- for p := range proxies {
- _ = proxies[p].Delete()
- }
-
- proxy, err := client.CreateProxy("redial", "127.0.0.1:23679", "127.0.0.1:5672")
- require.NoError(t, err)
- defer func() {
- _ = proxy.Delete()
- }()
-
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- require.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "durability/.rr-amqp-durability-redial.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(2)
-
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Error("job push error, job might be lost", "error", gomock.Any(), "pipeline", "test-1", "ID", gomock.Any(), "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Error("job push error, job might be lost", "error", gomock.Any(), "pipeline", "test-2", "ID", gomock.Any(), "driver", "amqp", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("delivery channel closed, leaving the rabbit listener").Times(4)
-
- // redial errors
- mockLogger.EXPECT().Warn("rabbitmq reconnecting, caused by", "error", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-1", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-2", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("rabbitmq dial succeed. trying to redeclare queues and subscribers").AnyTimes()
- mockLogger.EXPECT().Info("queues and subscribers redeclared successfully").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &amqp.Plugin{},
- )
- require.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- disableProxy("redial", t)
- time.Sleep(time.Second * 3)
-
- go func() {
- time.Sleep(time.Second * 5)
- enableProxy("redial", t)
- }()
-
- t.Run("PushPipelineWhileRedialing-1", pushToPipeErr("test-1"))
- t.Run("PushPipelineWhileRedialing-2", pushToPipeErr("test-2"))
-
- time.Sleep(time.Second * 15)
- t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1"))
- t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2"))
-
- time.Sleep(time.Second * 5)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestDurabilitySQS(t *testing.T) {
- client := toxiproxy.NewClient("127.0.0.1:8474")
-
- _, err := client.CreateProxy("redial", "127.0.0.1:19324", "127.0.0.1:9324")
- require.NoError(t, err)
- defer deleteProxy("redial", t)
-
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- require.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "durability/.rr-sqs-durability-redial.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- // redial errors
- mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-1", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-2", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes()
-
- // stop
- mockLogger.EXPECT().Warn("sqs listener stopped").AnyTimes()
- mockLogger.EXPECT().Info("------> job poller stopped <------").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &sqs.Plugin{},
- )
- require.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- disableProxy("redial", t)
- time.Sleep(time.Second * 3)
-
- go func() {
- time.Sleep(time.Second)
- t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1"))
- time.Sleep(time.Second)
- t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2"))
- }()
-
- time.Sleep(time.Second * 5)
- enableProxy("redial", t)
-
- t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1"))
- t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2"))
-
- time.Sleep(time.Second * 10)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestDurabilityBeanstalk(t *testing.T) {
- client := toxiproxy.NewClient("127.0.0.1:8474")
-
- _, err := client.CreateProxy("redial", "127.0.0.1:11400", "127.0.0.1:11300")
- require.NoError(t, err)
- defer deleteProxy("redial", t)
-
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- require.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "durability/.rr-beanstalk-durability-redial.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // general
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Info("pipeline active", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Info("job pushed to the queue", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-1", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
- mockLogger.EXPECT().Warn("pipeline stopped", "pipeline", "test-2", "start", gomock.Any(), "elapsed", gomock.Any()).Times(1)
-
- mockLogger.EXPECT().Info("job processing started", "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("job processed without errors", "ID", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- // redial errors
- mockLogger.EXPECT().Info("beanstalk redial was successful").MinTimes(2)
- mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-1", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Error("pipeline error", "pipeline", "test-2", "error", gomock.Any(), "start", gomock.Any(), "elapsed", gomock.Any()).AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &jobs.Plugin{},
- &resetter.Plugin{},
- &informer.Plugin{},
- &beanstalk.Plugin{},
- )
- require.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- disableProxy("redial", t)
- time.Sleep(time.Second * 3)
-
- go func() {
- time.Sleep(time.Second * 2)
- t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1"))
- t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2"))
- }()
-
- time.Sleep(time.Second * 5)
- enableProxy("redial", t)
-
- t.Run("PushPipelineWhileRedialing-1", pushToPipe("test-1"))
- t.Run("PushPipelineWhileRedialing-2", pushToPipe("test-2"))
-
- time.Sleep(time.Second * 10)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
diff --git a/tests/plugins/jobs/memory/.rr-memory-declare.yaml b/tests/plugins/jobs/memory/.rr-memory-declare.yaml
deleted file mode 100644
index 726c24ac..00000000
--- a/tests/plugins/jobs/memory/.rr-memory-declare.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/memory/.rr-memory-init.yaml b/tests/plugins/jobs/memory/.rr-memory-init.yaml
deleted file mode 100644
index 9ee8afc2..00000000
--- a/tests/plugins/jobs/memory/.rr-memory-init.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: memory
- priority: 10
- prefetch: 10000
-
- test-2:
- driver: memory
- priority: 10
- prefetch: 10000
-
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/memory/.rr-memory-jobs-err.yaml b/tests/plugins/jobs/memory/.rr-memory-jobs-err.yaml
deleted file mode 100644
index 05dc3ffa..00000000
--- a/tests/plugins/jobs/memory/.rr-memory-jobs-err.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_err.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/memory/.rr-memory-pause-resume.yaml b/tests/plugins/jobs/memory/.rr-memory-pause-resume.yaml
deleted file mode 100644
index 1ad48237..00000000
--- a/tests/plugins/jobs/memory/.rr-memory-pause-resume.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: debug
- mode: development
-
-jobs:
- # num logical cores by default
- num_pollers: 10
- # 1mi by default
- pipeline_size: 100000
- # worker pool configuration
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- # list of broker pipelines associated with endpoints
- pipelines:
- test-local:
- driver: memory
- priority: 10
- pipeline_size: 10000
-
- test-local-2:
- driver: memory
- priority: 1
- pipeline_size: 10000
-
- test-local-3:
- driver: memory
- priority: 2
- pipeline_size: 10000
-
- # list of pipelines to be consumed by the server, keep empty if you want to start consuming manually
- consume: [ "test-local", "test-local-2" ]
-
diff --git a/tests/plugins/jobs/sqs/.rr-no-global.yaml b/tests/plugins/jobs/sqs/.rr-no-global.yaml
deleted file mode 100644
index 2c97a37e..00000000
--- a/tests/plugins/jobs/sqs/.rr-no-global.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-logs:
- level: error
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: sqs
- prefetch: 1000
- visibility_timeout: 0
- wait_time_seconds: 0
- queue: default
- attributes:
- DelaySeconds: 0
- MaximumMessageSize: 262144
- MessageRetentionPeriod: 345600
- ReceiveMessageWaitTimeSeconds: 0
- VisibilityTimeout: 30
- tags:
- test: "tag"
-
- consume: [ "test-1" ]
-
diff --git a/tests/plugins/jobs/sqs/.rr-sqs-declare.yaml b/tests/plugins/jobs/sqs/.rr-sqs-declare.yaml
deleted file mode 100644
index 21209cbb..00000000
--- a/tests/plugins/jobs/sqs/.rr-sqs-declare.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_ok.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-# amazon sqs configuration
-# General section
-sqs:
- key: api-key
- secret: api-secret
- region: us-west-1
- endpoint: http://127.0.0.1:9324
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 1
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/jobs/sqs/.rr-sqs-init.yaml b/tests/plugins/jobs/sqs/.rr-sqs-init.yaml
deleted file mode 100644
index ffdec1fd..00000000
--- a/tests/plugins/jobs/sqs/.rr-sqs-init.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-sqs:
- key: api-key
- secret: api-secret
- region: us-west-1
- endpoint: http://127.0.0.1:9324
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
- pipelines:
- test-1:
- driver: sqs
- prefetch: 1000
- visibility_timeout: 0
- wait_time_seconds: 0
- queue: default
- attributes:
- DelaySeconds: 0
- MaximumMessageSize: 262144
- MessageRetentionPeriod: 345600
- ReceiveMessageWaitTimeSeconds: 0
- VisibilityTimeout: 30
- tags:
- test: "tag"
-
- test-2:
- driver: sqs
- prefetch: 1000
- queue: default-2
- attributes:
- MessageRetentionPeriod: 86400
- tags:
- test: "tag"
- consume: [ "test-1", "test-2" ]
-
diff --git a/tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml b/tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml
deleted file mode 100644
index b518d433..00000000
--- a/tests/plugins/jobs/sqs/.rr-sqs-jobs-err.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../jobs_err.php"
- relay: "pipes"
- relay_timeout: "20s"
-
-sqs:
- key: api-key
- secret: api-secret
- region: us-west-1
- endpoint: http://127.0.0.1:9324
-
-logs:
- level: debug
- encoding: console
- mode: development
-
-jobs:
- num_pollers: 10
- timeout: 60
- pipeline_size: 100000
- pool:
- num_workers: 10
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/kv/configs/.rr-boltdb.yaml b/tests/plugins/kv/configs/.rr-boltdb.yaml
deleted file mode 100644
index 7a8aee4e..00000000
--- a/tests/plugins/kv/configs/.rr-boltdb.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- boltdb-rr:
- driver: boltdb
- config:
- dir: "."
- file: "rr.db"
- bucket: "test"
- permissions: 0666
- interval: 1 # seconds
diff --git a/tests/plugins/kv/configs/.rr-in-memory.yaml b/tests/plugins/kv/configs/.rr-in-memory.yaml
deleted file mode 100644
index 0452d8bc..00000000
--- a/tests/plugins/kv/configs/.rr-in-memory.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- memory-rr:
- driver: memory
- config:
- interval: 1
diff --git a/tests/plugins/kv/configs/.rr-kv-bolt-no-interval.yaml b/tests/plugins/kv/configs/.rr-kv-bolt-no-interval.yaml
deleted file mode 100644
index 476369c5..00000000
--- a/tests/plugins/kv/configs/.rr-kv-bolt-no-interval.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- boltdb-south:
- driver: boltdb
- config:
- file: "rr.db"
- permissions: 755
-
- boltdb-africa:
- driver: boltdb
- config:
- file: "africa.db"
- permissions: 755
diff --git a/tests/plugins/kv/configs/.rr-kv-bolt-perms.yaml b/tests/plugins/kv/configs/.rr-kv-bolt-perms.yaml
deleted file mode 100644
index e7728972..00000000
--- a/tests/plugins/kv/configs/.rr-kv-bolt-perms.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- boltdb-south:
- driver: boltdb
- config:
- file: "rr.db"
-
- boltdb-africa:
- driver: boltdb
- config:
- file: "africa.db"
- permissions: 0777
diff --git a/tests/plugins/kv/configs/.rr-kv-init.yaml b/tests/plugins/kv/configs/.rr-kv-init.yaml
deleted file mode 100644
index 10cf6491..00000000
--- a/tests/plugins/kv/configs/.rr-kv-init.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- default:
- driver: memory
- config:
- interval: 60
-
- boltdb-south:
- driver: boltdb
- config:
- dir: "."
- file: "rr.db"
- bucket: "rr"
- permissions: 0666
- interval: 1
-
- boltdb-africa:
- driver: boltdb
- config:
- dir: "."
- file: "africa.db"
- bucket: "rr"
- permissions: 0666
- interval: 1
-
- memcached:
- driver: memcached
- config:
- addr: ["127.0.0.1:11211"]
diff --git a/tests/plugins/kv/configs/.rr-memcached.yaml b/tests/plugins/kv/configs/.rr-memcached.yaml
deleted file mode 100644
index ef8de2ab..00000000
--- a/tests/plugins/kv/configs/.rr-memcached.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- memcached-rr:
- driver: memcached
- config:
- addr:
- - "127.0.0.1:11211"
diff --git a/tests/plugins/kv/configs/.rr-redis-global.yaml b/tests/plugins/kv/configs/.rr-redis-global.yaml
deleted file mode 100644
index 27377835..00000000
--- a/tests/plugins/kv/configs/.rr-redis-global.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-redis-rr:
- addrs:
- - "127.0.0.1:6379"
-
-kv:
- redis-rr:
- driver: redis
diff --git a/tests/plugins/kv/configs/.rr-redis-no-config.yaml b/tests/plugins/kv/configs/.rr-redis-no-config.yaml
deleted file mode 100644
index 56113f13..00000000
--- a/tests/plugins/kv/configs/.rr-redis-no-config.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- redis-rr:
- driver: redis
diff --git a/tests/plugins/kv/configs/.rr-redis.yaml b/tests/plugins/kv/configs/.rr-redis.yaml
deleted file mode 100644
index f9b967d5..00000000
--- a/tests/plugins/kv/configs/.rr-redis.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: error
-
-kv:
- redis-rr:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6379"
diff --git a/tests/plugins/kv/storage_plugin_test.go b/tests/plugins/kv/storage_plugin_test.go
deleted file mode 100644
index c10e4726..00000000
--- a/tests/plugins/kv/storage_plugin_test.go
+++ /dev/null
@@ -1,1517 +0,0 @@
-package kv
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/boltdb"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/kv"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/memcached"
- "github.com/spiral/roadrunner/v2/plugins/memory"
- "github.com/spiral/roadrunner/v2/plugins/redis"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- payload "github.com/spiral/roadrunner/v2/proto/kv/v1beta"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestKVInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-kv-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &memory.Plugin{},
- &boltdb.Plugin{},
- &memcached.Plugin{},
- &redis.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &kv.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("KvSetTest", kvSetTest)
- t.Run("KvHasTest", kvHasTest)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-
- _ = os.RemoveAll("rr.db")
- _ = os.RemoveAll("africa.db")
-}
-
-func TestKVNoInterval(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-kv-bolt-no-interval.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &boltdb.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &kv.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("KvSetTest", kvSetTest)
- t.Run("KvHasTest", kvHasTest)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-
- _ = os.RemoveAll("rr.db")
- _ = os.RemoveAll("africa.db")
-}
-
-func TestKVCreateToReopenWithPerms(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-kv-bolt-perms.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &boltdb.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &kv.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestKVCreateToReopenWithPerms2(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-kv-bolt-perms.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &boltdb.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &kv.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("KvSetTest", kvSetTest)
- t.Run("KvHasTest", kvHasTest)
-
- stopCh <- struct{}{}
-
- wg.Wait()
-
- _ = os.RemoveAll("rr.db")
- _ = os.RemoveAll("africa.db")
-}
-
-func kvSetTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- p := &payload.Request{
- Storage: "boltdb-south",
- Items: []*payload.Item{
- {
- Key: "key",
- Value: []byte("val"),
- },
- },
- }
-
- resp := &payload.Response{}
- err = client.Call("kv.Set", p, resp)
- assert.NoError(t, err)
-}
-
-func kvHasTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
- p := &payload.Request{
- Storage: "boltdb-south",
- Items: []*payload.Item{
- {
- Key: "key",
- Value: []byte("val"),
- },
- },
- }
-
- ret := &payload.Response{}
- err = client.Call("kv.Has", p, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 1)
-}
-
-func TestBoltDb(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-boltdb.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &kv.Plugin{},
- &boltdb.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("BOLTDB", testRPCMethods)
- stopCh <- struct{}{}
- wg.Wait()
-
- _ = os.Remove("rr.db")
-}
-
-func testRPCMethods(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- // add 5 second ttl
- tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
- keys := &payload.Request{
- Storage: "boltdb-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "c",
- },
- },
- }
-
- data := &payload.Request{
- Storage: "boltdb-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- Timeout: tt,
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- ret := &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", data, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3) // should be 3
-
- // key "c" should be deleted
- time.Sleep(time.Second * 7)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // should be 2
-
- ret = &payload.Response{}
- err = client.Call("kv.MGet", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // c is expired
-
- tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
-
- data2 := &payload.Request{
- Storage: "boltdb-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Timeout: tt2,
- },
- {
- Key: "b",
- Timeout: tt2,
- },
- {
- Key: "d",
- Timeout: tt2,
- },
- },
- }
-
- // MEXPIRE
- ret = &payload.Response{}
- err = client.Call("kv.MExpire", data2, ret)
- assert.NoError(t, err)
-
- // TTL
- keys2 := &payload.Request{
- Storage: "boltdb-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "d",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.TTL", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3)
-
- // HAS AFTER TTL
- time.Sleep(time.Second * 15)
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- // DELETE
- keysDel := &payload.Request{
- Storage: "boltdb-rr",
- Items: []*payload.Item{
- {
- Key: "e",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.Delete", keysDel, ret)
- assert.NoError(t, err)
-
- // HAS AFTER DELETE
- ret = &payload.Response{}
- err = client.Call("kv.Has", keysDel, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- dataClear := &payload.Request{
- Storage: "boltdb-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- clear := &payload.Request{Storage: "boltdb-rr"}
-
- ret = &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", dataClear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 5) // should be 5
-
- ret = &payload.Response{}
- err = client.Call("kv.Clear", clear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0) // should be 5
-}
-
-func TestMemcached(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-memcached.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &kv.Plugin{},
- &memcached.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("MEMCACHED", testRPCMethodsMemcached)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func testRPCMethodsMemcached(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- // add 5 second ttl
- tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
-
- keys := &payload.Request{
- Storage: "memcached-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "c",
- },
- },
- }
-
- data := &payload.Request{
- Storage: "memcached-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- Timeout: tt,
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- ret := &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", data, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3) // should be 3
-
- // key "c" should be deleted
- time.Sleep(time.Second * 7)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // should be 2
-
- ret = &payload.Response{}
- err = client.Call("kv.MGet", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // c is expired
-
- tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
-
- data2 := &payload.Request{
- Storage: "memcached-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Timeout: tt2,
- },
- {
- Key: "b",
- Timeout: tt2,
- },
- {
- Key: "d",
- Timeout: tt2,
- },
- },
- }
-
- // MEXPIRE
- ret = &payload.Response{}
- err = client.Call("kv.MExpire", data2, ret)
- assert.NoError(t, err)
-
- // TTL call is not supported for the memcached driver
- keys2 := &payload.Request{
- Storage: "memcached-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "d",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.TTL", keys2, ret)
- assert.Error(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- // HAS AFTER TTL
- time.Sleep(time.Second * 15)
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- // DELETE
- keysDel := &payload.Request{
- Storage: "memcached-rr",
- Items: []*payload.Item{
- {
- Key: "e",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.Delete", keysDel, ret)
- assert.NoError(t, err)
-
- // HAS AFTER DELETE
- ret = &payload.Response{}
- err = client.Call("kv.Has", keysDel, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- dataClear := &payload.Request{
- Storage: "memcached-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- clear := &payload.Request{Storage: "memcached-rr"}
-
- ret = &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", dataClear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 5) // should be 5
-
- ret = &payload.Response{}
- err = client.Call("kv.Clear", clear, ret)
- assert.NoError(t, err)
-
- time.Sleep(time.Second * 2)
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0) // should be 5
-}
-
-func TestInMemory(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-in-memory.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &kv.Plugin{},
- &memory.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("INMEMORY", testRPCMethodsInMemory)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func testRPCMethodsInMemory(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- // add 5 second ttl
-
- tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
- keys := &payload.Request{
- Storage: "memory-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "c",
- },
- },
- }
-
- data := &payload.Request{
- Storage: "memory-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- Timeout: tt,
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- ret := &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", data, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3) // should be 3
-
- // key "c" should be deleted
- time.Sleep(time.Second * 7)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // should be 2
-
- ret = &payload.Response{}
- err = client.Call("kv.MGet", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // c is expired
-
- tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
-
- data2 := &payload.Request{
- Storage: "memory-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Timeout: tt2,
- },
- {
- Key: "b",
- Timeout: tt2,
- },
- {
- Key: "d",
- Timeout: tt2,
- },
- },
- }
-
- // MEXPIRE
- ret = &payload.Response{}
- err = client.Call("kv.MExpire", data2, ret)
- assert.NoError(t, err)
-
- // TTL
- keys2 := &payload.Request{
- Storage: "memory-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "d",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.TTL", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3)
-
- // HAS AFTER TTL
- time.Sleep(time.Second * 15)
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- // DELETE
- keysDel := &payload.Request{
- Storage: "memory-rr",
- Items: []*payload.Item{
- {
- Key: "e",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.Delete", keysDel, ret)
- assert.NoError(t, err)
-
- // HAS AFTER DELETE
- ret = &payload.Response{}
- err = client.Call("kv.Has", keysDel, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- dataClear := &payload.Request{
- Storage: "memory-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- clear := &payload.Request{Storage: "memory-rr"}
-
- ret = &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", dataClear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 5) // should be 5
-
- ret = &payload.Response{}
- err = client.Call("kv.Clear", clear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0) // should be 5
-}
-
-func TestRedis(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-redis.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &kv.Plugin{},
- &redis.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("REDIS", testRPCMethodsRedis)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestRedisGlobalSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-redis-global.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &kv.Plugin{},
- &redis.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("REDIS", testRPCMethodsRedis)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestRedisNoConfig(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-redis-no-config.yaml", // should be used default
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", []string{"kv"}).AnyTimes()
-
- mockLogger.EXPECT().Error(`can't find local or global configuration, this section will be skipped`, "local: ", "kv.redis-rr.config", "global: ", "redis-rr").Times(1)
-
- err = cont.RegisterAll(
- cfg,
- &kv.Plugin{},
- &redis.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- &memory.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- assert.NoError(t, err)
-}
-
-func testRPCMethodsRedis(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- // add 5 second ttl
- tt := time.Now().Add(time.Second * 5).Format(time.RFC3339)
- keys := &payload.Request{
- Storage: "redis-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "c",
- },
- },
- }
-
- data := &payload.Request{
- Storage: "redis-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- Timeout: tt,
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- ret := &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", data, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3) // should be 3
-
- // key "c" should be deleted
- time.Sleep(time.Second * 7)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // should be 2
-
- ret = &payload.Response{}
- err = client.Call("kv.MGet", keys, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 2) // c is expired
-
- tt2 := time.Now().Add(time.Second * 10).Format(time.RFC3339)
-
- data2 := &payload.Request{
- Storage: "redis-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Timeout: tt2,
- },
- {
- Key: "b",
- Timeout: tt2,
- },
- {
- Key: "d",
- Timeout: tt2,
- },
- },
- }
-
- // MEXPIRE
- ret = &payload.Response{}
- err = client.Call("kv.MExpire", data2, ret)
- assert.NoError(t, err)
-
- // TTL
- keys2 := &payload.Request{
- Storage: "redis-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- },
- {
- Key: "b",
- },
- {
- Key: "d",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.TTL", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 3)
-
- // HAS AFTER TTL
- time.Sleep(time.Second * 15)
- ret = &payload.Response{}
- err = client.Call("kv.Has", keys2, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- // DELETE
- keysDel := &payload.Request{
- Storage: "redis-rr",
- Items: []*payload.Item{
- {
- Key: "e",
- },
- },
- }
-
- ret = &payload.Response{}
- err = client.Call("kv.Delete", keysDel, ret)
- assert.NoError(t, err)
-
- // HAS AFTER DELETE
- ret = &payload.Response{}
- err = client.Call("kv.Has", keysDel, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0)
-
- dataClear := &payload.Request{
- Storage: "redis-rr",
- Items: []*payload.Item{
- {
- Key: "a",
- Value: []byte("aa"),
- },
- {
- Key: "b",
- Value: []byte("bb"),
- },
- {
- Key: "c",
- Value: []byte("cc"),
- },
- {
- Key: "d",
- Value: []byte("dd"),
- },
- {
- Key: "e",
- Value: []byte("ee"),
- },
- },
- }
-
- clear := &payload.Request{Storage: "redis-rr"}
-
- ret = &payload.Response{}
- // Register 3 keys with values
- err = client.Call("kv.Set", dataClear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 5) // should be 5
-
- ret = &payload.Response{}
- err = client.Call("kv.Clear", clear, ret)
- assert.NoError(t, err)
-
- ret = &payload.Response{}
- err = client.Call("kv.Has", dataClear, ret)
- assert.NoError(t, err)
- assert.Len(t, ret.GetItems(), 0) // should be 5
-}
diff --git a/tests/plugins/logger/configs/.rr-file-logger.yaml b/tests/plugins/logger/configs/.rr-file-logger.yaml
deleted file mode 100644
index 49c30d02..00000000
--- a/tests/plugins/logger/configs/.rr-file-logger.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:54224
- max_request_size: 1024
- middleware: [ ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-logs:
- mode: development
- level: debug
- file_logger_options:
- log_output: "test.log"
diff --git a/tests/plugins/logger/configs/.rr-no-logger.yaml b/tests/plugins/logger/configs/.rr-no-logger.yaml
deleted file mode 100644
index e69de29b..00000000
--- a/tests/plugins/logger/configs/.rr-no-logger.yaml
+++ /dev/null
diff --git a/tests/plugins/logger/configs/.rr-no-logger2.yaml b/tests/plugins/logger/configs/.rr-no-logger2.yaml
deleted file mode 100644
index 810ea88f..00000000
--- a/tests/plugins/logger/configs/.rr-no-logger2.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../http/client.php echo pipes"
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:18945
- max_request_size: 1024
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/logger/configs/.rr-raw-mode.yaml b/tests/plugins/logger/configs/.rr-raw-mode.yaml
deleted file mode 100644
index fba25945..00000000
--- a/tests/plugins/logger/configs/.rr-raw-mode.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-server:
- command: "php ../../raw-error.php"
- relay: "pipes"
-
-http:
- address: 127.0.0.1:34999
- max_requestSize: 1024
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 10s
- destroy_timeout: 10s
-
-logs:
- mode: raw
diff --git a/tests/plugins/logger/configs/.rr.yaml b/tests/plugins/logger/configs/.rr.yaml
deleted file mode 100644
index 5ab359d3..00000000
--- a/tests/plugins/logger/configs/.rr.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-logs:
- mode: development
- level: error \ No newline at end of file
diff --git a/tests/plugins/logger/logger_test.go b/tests/plugins/logger/logger_test.go
deleted file mode 100644
index e077f0bc..00000000
--- a/tests/plugins/logger/logger_test.go
+++ /dev/null
@@ -1,430 +0,0 @@
-package logger
-
-import (
- "net/http"
- "os"
- "os/signal"
- "strings"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestLogger(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(false), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- vp,
- &Plugin{},
- &logger.ZapLogger{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- err = container.Stop()
- assert.NoError(t, err)
- return
- case <-stopCh:
- assert.NoError(t, container.Stop())
- return
- }
- }
- }()
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestLoggerRawErr(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- // config plugin
- cfg := &config.Viper{}
- cfg.Path = "configs/.rr-raw-mode.yaml"
- cfg.Prefix = "rr"
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Info("{\"field\": \"value\"}").MinTimes(1)
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestLoggerNoConfig(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-no-logger.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- vp,
- &Plugin{},
- &logger.ZapLogger{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- err = container.Stop()
- assert.NoError(t, err)
- return
- case <-stopCh:
- assert.NoError(t, container.Stop())
- return
- }
- }
- }()
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-// Should no panic
-func TestLoggerNoConfig2(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-no-logger2.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- vp,
- &rpc.Plugin{},
- &logger.ZapLogger{},
- &httpPlugin.Plugin{},
- &server.Plugin{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- err = container.Stop()
- assert.NoError(t, err)
- return
- case <-stopCh:
- assert.NoError(t, container.Stop())
- return
- }
- }
- }()
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestFileLogger(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-file-logger.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- vp,
- &rpc.Plugin{},
- &logger.ZapLogger{},
- &httpPlugin.Plugin{},
- &server.Plugin{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- err = container.Stop()
- assert.NoError(t, err)
- return
- case <-stopCh:
- assert.NoError(t, container.Stop())
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
- t.Run("HTTPEchoReq", httpEcho)
-
- f, err := os.ReadFile("test.log")
- if err != nil {
- t.Fatal(err)
- }
-
- strings.Contains(string(f), "worker constructed")
- strings.Contains(string(f), "201 GET")
-
- _ = os.Remove("test.log")
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestMarshalObjectLogging(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-file-logger.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- vp,
- &Plugin{},
- &logger.ZapLogger{},
- )
- assert.NoError(t, err)
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- err = container.Stop()
- assert.NoError(t, err)
- return
- case <-stopCh:
- assert.NoError(t, container.Stop())
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
-
- f, err := os.ReadFile("test.log")
- if err != nil {
- t.Fatal(err)
- }
-
- assert.Contains(t, string(f), "Example marshaller error")
- assert.Equal(t, 4, strings.Count(string(f), "Example marshaller error"))
-
- _ = os.Remove("test.log")
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func httpEcho(t *testing.T) {
- req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:54224?hello=world", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- assert.Equal(t, http.StatusCreated, r.StatusCode)
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
diff --git a/tests/plugins/logger/plugin.go b/tests/plugins/logger/plugin.go
deleted file mode 100644
index 54e78d7b..00000000
--- a/tests/plugins/logger/plugin.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package logger
-
-import (
- "strings"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "go.uber.org/zap"
- core "go.uber.org/zap/zapcore"
-)
-
-type Plugin struct {
- config config.Configurer
- log logger.Logger
-}
-
-type Loggable struct {
-}
-
-func (l *Loggable) MarshalLogObject(encoder core.ObjectEncoder) error {
- encoder.AddString("error", "Example marshaller error")
- return nil
-}
-
-func (p1 *Plugin) Init(cfg config.Configurer, log logger.Logger) error {
- p1.config = cfg
- p1.log = log
- return nil
-}
-
-func (p1 *Plugin) Serve() chan error {
- errCh := make(chan error, 1)
- p1.log.Error("error", "test", errors.E(errors.Str("test")))
- p1.log.Info("error", "test", errors.E(errors.Str("test")))
- p1.log.Debug("error", "test", errors.E(errors.Str("test")))
- p1.log.Warn("error", "test", errors.E(errors.Str("test")))
-
- field := zap.String("error", "Example field error")
-
- p1.log.Error("error", field)
- p1.log.Info("error", field)
- p1.log.Debug("error", field)
- p1.log.Warn("error", field)
-
- marshalledObject := &Loggable{}
-
- p1.log.Error("error", marshalledObject)
- p1.log.Info("error", marshalledObject)
- p1.log.Debug("error", marshalledObject)
- p1.log.Warn("error", marshalledObject)
-
- p1.log.Error("error", "test")
- p1.log.Info("error", "test")
- p1.log.Debug("error", "test")
- p1.log.Warn("error", "test")
-
- // test the `raw` mode
- messageJSON := []byte(`{"field": "value"}`)
- p1.log.Debug(strings.TrimRight(string(messageJSON), " \n\t"))
-
- return errCh
-}
-
-func (p1 *Plugin) Stop() error {
- return nil
-}
-
-func (p1 *Plugin) Name() string {
- return "logger_plugin"
-}
diff --git a/tests/plugins/metrics/configs/.rr-http-metrics.yaml b/tests/plugins/metrics/configs/.rr-http-metrics.yaml
deleted file mode 100644
index 3e92a88c..00000000
--- a/tests/plugins/metrics/configs/.rr-http-metrics.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- relay: "pipes"
-
-http:
- address: 127.0.0.1:13223
- max_request_size: 1024
- middleware: [ ]
- pool:
- num_workers: 1
-
-metrics:
- address: 127.0.0.1:2112
-
-logs:
- mode: development
- level: debug
diff --git a/tests/plugins/metrics/configs/.rr-issue-571.yaml b/tests/plugins/metrics/configs/.rr-issue-571.yaml
deleted file mode 100644
index 872f777a..00000000
--- a/tests/plugins/metrics/configs/.rr-issue-571.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../metrics-issue-571.php"
-
-http:
- address: "0.0.0.0:56444"
- pool:
- num_workers: 5
-
-metrics:
- address: "0.0.0.0:23557"
diff --git a/tests/plugins/metrics/configs/.rr-test.yaml b/tests/plugins/metrics/configs/.rr-test.yaml
deleted file mode 100644
index d6f529f5..00000000
--- a/tests/plugins/metrics/configs/.rr-test.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-metrics:
- # prometheus client address (path /metrics added automatically)
- address: "[::1]:2112"
- collect:
- app_metric:
- type: histogram
- help: "Custom application metric"
- labels: [ "type" ]
- buckets: [ 0.1, 0.2, 0.3, 1.0 ]
- app_metric_counter:
- type: counter
- help: "Custom application counter."
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/metrics/metrics_test.go b/tests/plugins/metrics/metrics_test.go
deleted file mode 100644
index c590ab2e..00000000
--- a/tests/plugins/metrics/metrics_test.go
+++ /dev/null
@@ -1,1091 +0,0 @@
-package metrics
-
-import (
- "io/ioutil"
- "net"
- "net/http"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/metrics"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-const dialAddr = "127.0.0.1:6001"
-const dialNetwork = "tcp"
-const getAddr = "http://127.0.0.1:2112/metrics"
-const getIPV6Addr = "http://[::1]:2112/metrics"
-
-func TestMetricsInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{}
- cfg.Prefix = "rr"
- cfg.Path = "configs/.rr-test.yaml"
-
- err = cont.RegisterAll(
- cfg,
- &metrics.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &Plugin1{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- tt := time.NewTimer(time.Second * 5)
- defer tt.Stop()
-
- time.Sleep(time.Second * 2)
- out, err := getIPV6()
- assert.NoError(t, err)
-
- assert.Contains(t, out, "go_gc_duration_seconds")
- assert.Contains(t, out, "app_metric_counter")
-
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
-}
-
-func TestMetricsIssue571(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{}
- cfg.Prefix = "rr"
- cfg.Path = "configs/.rr-issue-571.yaml"
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", []string{"metrics"}).MinTimes(1)
- mockLogger.EXPECT().Debug("200 GET http://127.0.0.1:56444/", "remote", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "test", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "test", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "test", "labels", []string{}, "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("adding metric", "name", "test", "value", gomock.Any(), "labels", []string{}).MinTimes(1)
- mockLogger.EXPECT().Error("metric with provided name already exist", "name", "test", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(3)
- mockLogger.EXPECT().Info("scan command", gomock.Any()).AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &metrics.Plugin{},
- &rpcPlugin.Plugin{},
- &server.Plugin{},
- mockLogger,
- &httpPlugin.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- // give some time to wait http
- time.Sleep(time.Second * 2)
- _, err = issue571Http()
- assert.NoError(t, err)
-
- out, err := issue571Metrics()
- assert.NoError(t, err)
-
- assert.Contains(t, out, "HELP test Test counter")
- assert.Contains(t, out, "TYPE test counter")
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-// get request and return body
-func issue571Http() (string, error) {
- r, err := http.Get("http://127.0.0.1:56444")
- if err != nil {
- return "", err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", err
- }
- // unsafe
- return string(b), err
-}
-
-// get request and return body
-func issue571Metrics() (string, error) {
- r, err := http.Get("http://127.0.0.1:23557")
- if err != nil {
- return "", err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", err
- }
- // unsafe
- return string(b), err
-}
-
-func TestMetricsGaugeCollector(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{}
- cfg.Prefix = "rr"
- cfg.Path = "configs/.rr-test.yaml"
-
- err = cont.RegisterAll(
- cfg,
- &metrics.Plugin{},
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &Plugin1{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- time.Sleep(time.Second)
- tt := time.NewTimer(time.Second * 5)
- defer tt.Stop()
-
- time.Sleep(time.Second * 2)
- out, err := getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, out, "my_gauge 100")
- assert.Contains(t, out, "my_gauge2 100")
-
- out, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, out, "go_gc_duration_seconds")
-
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
-}
-
-func TestMetricsDifferentRPCCalls(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{}
- cfg.Prefix = "rr"
- cfg.Path = "configs/.rr-test.yaml"
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", []string{"metrics"}).MinTimes(1)
-
- mockLogger.EXPECT().Info("adding metric", "name", "counter_CounterMetric", "value", gomock.Any(), "labels", []string{"type2", "section2"}).MinTimes(1)
- mockLogger.EXPECT().Info("adding metric", "name", "histogram_registerHistogram", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("adding metric", "name", "sub_gauge_subVector", "value", gomock.Any(), "labels", []string{"core", "first"}).MinTimes(1)
- mockLogger.EXPECT().Info("adding metric", "name", "sub_gauge_subMetric", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("adding metric", "name", "test_metrics_named_collector", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("adding metric", "name", "app_metric_counter", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("metric successfully added", "name", "observe_observeMetricNotEnoughLabels", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "observe_observeMetric", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "counter_CounterMetric", "labels", []string{"type2", "section2"}, "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "counter_CounterMetric", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "histogram_registerHistogram", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "sub_gauge_subVector", "labels", []string{"core", "first"}, "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "sub_gauge_subVector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "sub_gauge_subMetric", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "sub_gauge_subMetric", "labels", gomock.Any(), "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "histogram_setOnHistogram", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "gauge_setWithoutLabels", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "gauge_missing_section_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "gauge_2_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "test_metrics_named_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "test_metrics_named_collector", "labels", gomock.Any(), "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "user_gauge_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("metric successfully added", "name", "app_metric_counter", "labels", gomock.Any(), "value", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("declaring new metric", "name", "observe_observeMetricNotEnoughLabels", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "observe_observeMetric", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "counter_CounterMetric", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "histogram_registerHistogram", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "sub_gauge_subVector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "sub_gauge_subMetric", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "histogram_setOnHistogram", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "gauge_setWithoutLabels", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "gauge_missing_section_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "test_metrics_named_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "gauge_2_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("declaring new metric", "name", "user_gauge_collector", "type", gomock.Any(), "namespace", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("observing metric", "name", "observe_observeMetric", "value", gomock.Any(), "labels", []string{"test"}).MinTimes(1)
- mockLogger.EXPECT().Info("observing metric", "name", "observe_observeMetric", "value", gomock.Any(), "labels", []string{"test", "test2"}).MinTimes(1)
- mockLogger.EXPECT().Info("observing metric", "name", "gauge_setOnHistogram", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("observing metric", "name", "gauge_setWithoutLabels", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("observing metric", "name", "gauge_missing_section_collector", "value", gomock.Any(), "labels", []string{"missing"}).MinTimes(1)
- mockLogger.EXPECT().Info("observing metric", "name", "user_gauge_collector", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("observing metric", "name", "gauge_2_collector", "value", gomock.Any(), "labels", []string{"core", "first"}).MinTimes(1)
-
- mockLogger.EXPECT().Info("observe operation finished successfully", "name", "observe_observeMetric", "labels", []string{"test", "test2"}, "value", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("set operation finished successfully", "name", "gauge_2_collector", "labels", []string{"core", "first"}, "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("set operation finished successfully", "name", "user_gauge_collector", "labels", gomock.Any(), "value", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("subtracting value from metric", "name", "sub_gauge_subVector", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("subtracting value from metric", "name", "sub_gauge_subMetric", "value", gomock.Any(), "labels", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Info("subtracting operation finished successfully", "name", "sub_gauge_subVector", "labels", gomock.Any(), "value", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("subtracting operation finished successfully", "name", "sub_gauge_subMetric", "labels", gomock.Any(), "value", gomock.Any()).MinTimes(1)
-
- mockLogger.EXPECT().Error("failed to get metrics with label values", "collector", "gauge_missing_section_collector", "labels", []string{"missing"}).MinTimes(1)
- mockLogger.EXPECT().Error("required labels for collector", "collector", "gauge_setWithoutLabels").MinTimes(1)
- mockLogger.EXPECT().Error("failed to get metrics with label values", "collector", "observe_observeMetric", "labels", []string{"test"}).MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- &metrics.Plugin{},
- &rpcPlugin.Plugin{},
- mockLogger,
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- tt := time.NewTimer(time.Minute * 3)
- defer tt.Stop()
-
- go func() {
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
- t.Run("DeclareMetric", declareMetricsTest)
- genericOut, err := getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "test_metrics_named_collector")
-
- t.Run("AddMetric", addMetricsTest)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "test_metrics_named_collector 10000")
-
- t.Run("SetMetric", setMetric)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "user_gauge_collector 100")
-
- t.Run("VectorMetric", vectorMetric)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "gauge_2_collector{section=\"first\",type=\"core\"} 100")
-
- t.Run("MissingSection", missingSection)
- t.Run("SetWithoutLabels", setWithoutLabels)
- t.Run("SetOnHistogram", setOnHistogram)
- t.Run("MetricSub", subMetric)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "sub_gauge_subMetric 1")
-
- t.Run("SubVector", subVector)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "sub_gauge_subVector{section=\"first\",type=\"core\"} 1")
-
- t.Run("RegisterHistogram", registerHistogram)
-
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, `TYPE histogram_registerHistogram`)
-
- // check buckets
- assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="0.1"} 0`)
- assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="0.2"} 0`)
- assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="0.5"} 0`)
- assert.Contains(t, genericOut, `histogram_registerHistogram_bucket{le="+Inf"} 0`)
- assert.Contains(t, genericOut, `histogram_registerHistogram_sum 0`)
- assert.Contains(t, genericOut, `histogram_registerHistogram_count 0`)
-
- t.Run("CounterMetric", counterMetric)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "HELP default_default_counter_CounterMetric test_counter")
- assert.Contains(t, genericOut, `default_default_counter_CounterMetric{section="section2",type="type2"}`)
-
- t.Run("ObserveMetric", observeMetric)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "observe_observeMetric")
-
- t.Run("ObserveMetricNotEnoughLabels", observeMetricNotEnoughLabels)
-
- t.Run("ConfiguredCounterMetric", configuredCounterMetric)
- genericOut, err = getIPV6()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, "HELP app_metric_counter Custom application counter.")
- assert.Contains(t, genericOut, `app_metric_counter 100`)
-
- close(sig)
-}
-
-func configuredCounterMetric(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- assert.NoError(t, client.Call("metrics.Add", metrics.Metric{
- Name: "app_metric_counter",
- Value: 100.0,
- }, &ret))
- assert.True(t, ret)
-}
-
-func observeMetricNotEnoughLabels(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "observe_observeMetricNotEnoughLabels",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Help: "test_observe",
- Type: metrics.Histogram,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- assert.Error(t, client.Call("metrics.Observe", metrics.Metric{
- Name: "observe_observeMetric",
- Value: 100.0,
- Labels: []string{"test"},
- }, &ret))
- assert.False(t, ret)
-}
-
-func observeMetric(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "observe_observeMetric",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Help: "test_observe",
- Type: metrics.Histogram,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- assert.NoError(t, client.Call("metrics.Observe", metrics.Metric{
- Name: "observe_observeMetric",
- Value: 100.0,
- Labels: []string{"test", "test2"},
- }, &ret))
- assert.True(t, ret)
-}
-
-func counterMetric(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "counter_CounterMetric",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Help: "test_counter",
- Type: metrics.Counter,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-
- ret = false
-
- assert.NoError(t, client.Call("metrics.Add", metrics.Metric{
- Name: "counter_CounterMetric",
- Value: 100.0,
- Labels: []string{"type2", "section2"},
- }, &ret))
- assert.True(t, ret)
-}
-
-func registerHistogram(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "histogram_registerHistogram",
- Collector: metrics.Collector{
- Help: "test_histogram",
- Type: metrics.Histogram,
- Buckets: []float64{0.1, 0.2, 0.5},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-
- ret = false
-
- m := metrics.Metric{
- Name: "histogram_registerHistogram",
- Value: 10000,
- Labels: nil,
- }
-
- err = client.Call("metrics.Add", m, &ret)
- assert.Error(t, err)
- assert.False(t, ret)
-}
-
-func subVector(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "sub_gauge_subVector",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Gauge,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- m := metrics.Metric{
- Name: "sub_gauge_subVector",
- Value: 100000,
- Labels: []string{"core", "first"},
- }
-
- err = client.Call("metrics.Add", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- m = metrics.Metric{
- Name: "sub_gauge_subVector",
- Value: 99999,
- Labels: []string{"core", "first"},
- }
-
- err = client.Call("metrics.Sub", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-}
-
-func subMetric(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "sub_gauge_subMetric",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Gauge,
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- m := metrics.Metric{
- Name: "sub_gauge_subMetric",
- Value: 100000,
- }
-
- err = client.Call("metrics.Add", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- m = metrics.Metric{
- Name: "sub_gauge_subMetric",
- Value: 99999,
- }
-
- err = client.Call("metrics.Sub", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-}
-
-func setOnHistogram(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "histogram_setOnHistogram",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Histogram,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-
- ret = false
-
- m := metrics.Metric{
- Name: "gauge_setOnHistogram",
- Value: 100.0,
- }
-
- err = client.Call("metrics.Set", m, &ret) // expected 2 label values but got 1 in []string{"missing"}
- assert.Error(t, err)
- assert.False(t, ret)
-}
-
-func setWithoutLabels(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "gauge_setWithoutLabels",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Gauge,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-
- ret = false
-
- m := metrics.Metric{
- Name: "gauge_setWithoutLabels",
- Value: 100.0,
- }
-
- err = client.Call("metrics.Set", m, &ret) // expected 2 label values but got 1 in []string{"missing"}
- assert.Error(t, err)
- assert.False(t, ret)
-}
-
-func missingSection(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "gauge_missing_section_collector",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Gauge,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-
- ret = false
-
- m := metrics.Metric{
- Name: "gauge_missing_section_collector",
- Value: 100.0,
- Labels: []string{"missing"},
- }
-
- err = client.Call("metrics.Set", m, &ret) // expected 2 label values but got 1 in []string{"missing"}
- assert.Error(t, err)
- assert.False(t, ret)
-}
-
-func vectorMetric(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "gauge_2_collector",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Gauge,
- Labels: []string{"type", "section"},
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-
- ret = false
-
- m := metrics.Metric{
- Name: "gauge_2_collector",
- Value: 100.0,
- Labels: []string{"core", "first"},
- }
-
- err = client.Call("metrics.Set", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-}
-
-func setMetric(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "user_gauge_collector",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Gauge,
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- m := metrics.Metric{
- Name: "user_gauge_collector",
- Value: 100.0,
- }
-
- err = client.Call("metrics.Set", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-}
-
-func addMetricsTest(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- m := metrics.Metric{
- Name: "test_metrics_named_collector",
- Value: 10000,
- Labels: nil,
- }
-
- err = client.Call("metrics.Add", m, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-}
-
-func declareMetricsTest(t *testing.T) {
- conn, err := net.Dial(dialNetwork, dialAddr)
- assert.NoError(t, err)
- defer func() {
- _ = conn.Close()
- }()
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret bool
-
- nc := metrics.NamedCollector{
- Name: "test_metrics_named_collector",
- Collector: metrics.Collector{
- Namespace: "default",
- Subsystem: "default",
- Type: metrics.Counter,
- Help: "NO HELP!",
- Labels: nil,
- Buckets: nil,
- },
- }
-
- err = client.Call("metrics.Declare", nc, &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
-}
-
-func TestHTTPMetrics(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{}
- cfg.Prefix = "rr"
- cfg.Path = "configs/.rr-http-metrics.yaml"
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("200 GET http://127.0.0.1:13223/", "remote", gomock.Any(), "elapsed", gomock.Any()).MinTimes(1)
-
- err = cont.RegisterAll(
- cfg,
- &metrics.Plugin{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- mockLogger,
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- tt := time.NewTimer(time.Minute * 3)
-
- go func() {
- defer tt.Stop()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 2)
- t.Run("req1", echoHTTP)
- t.Run("req2", echoHTTP)
-
- genericOut, err := get()
- assert.NoError(t, err)
- assert.Contains(t, genericOut, `rr_http_request_duration_seconds_bucket`)
- assert.Contains(t, genericOut, `rr_http_request_duration_seconds_sum{status="200"}`)
- assert.Contains(t, genericOut, `rr_http_request_duration_seconds_count{status="200"} 2`)
- assert.Contains(t, genericOut, `rr_http_request_total{status="200"} 2`)
- assert.Contains(t, genericOut, "rr_http_workers_memory_bytes")
-
- close(sig)
-}
-
-func echoHTTP(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:13223", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- _, err = ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-// get request and return body
-func get() (string, error) {
- r, err := http.Get(getAddr)
- if err != nil {
- return "", err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", err
- }
- // unsafe
- return string(b), err
-}
-
-// get request and return body
-func getIPV6() (string, error) {
- r, err := http.Get(getIPV6Addr)
- if err != nil {
- return "", err
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return "", err
- }
-
- err = r.Body.Close()
- if err != nil {
- return "", err
- }
- // unsafe
- return string(b), err
-}
diff --git a/tests/plugins/metrics/plugin1.go b/tests/plugins/metrics/plugin1.go
deleted file mode 100644
index ae024a8a..00000000
--- a/tests/plugins/metrics/plugin1.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package metrics
-
-import (
- "github.com/prometheus/client_golang/prometheus"
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-// Gauge //////////////
-type Plugin1 struct {
- config config.Configurer
-}
-
-func (p1 *Plugin1) Init(cfg config.Configurer) error {
- p1.config = cfg
- return nil
-}
-
-func (p1 *Plugin1) Serve() chan error {
- errCh := make(chan error, 1)
- return errCh
-}
-
-func (p1 *Plugin1) Stop() error {
- return nil
-}
-
-func (p1 *Plugin1) Name() string {
- return "metrics_test.plugin1"
-}
-
-func (p1 *Plugin1) MetricsCollector() []prometheus.Collector {
- collector := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "my_gauge",
- Help: "My gauge value",
- })
-
- collector.Set(100)
-
- collector2 := prometheus.NewGauge(prometheus.GaugeOpts{
- Name: "my_gauge2",
- Help: "My gauge2 value",
- })
-
- collector2.Set(100)
- return []prometheus.Collector{collector, collector2}
-}
diff --git a/tests/plugins/reload/config_test.go b/tests/plugins/reload/config_test.go
deleted file mode 100644
index 72c11070..00000000
--- a/tests/plugins/reload/config_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package reload
-
-import (
- "testing"
- "time"
-
- "github.com/spiral/roadrunner/v2/plugins/reload"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_Config_Valid(t *testing.T) {
- services := make(map[string]reload.ServiceConfig)
- services["test"] = reload.ServiceConfig{
- Recursive: false,
- Patterns: nil,
- Dirs: nil,
- Ignore: nil,
- }
-
- cfg := &reload.Config{
- Interval: time.Second,
- Patterns: nil,
- Services: services,
- }
- assert.NoError(t, cfg.Valid())
-}
-
-func Test_Fake_ServiceConfig(t *testing.T) {
- services := make(map[string]reload.ServiceConfig)
- cfg := &reload.Config{
- Interval: time.Microsecond,
- Patterns: nil,
- Services: services,
- }
- assert.Error(t, cfg.Valid())
-}
-
-func Test_Interval(t *testing.T) {
- services := make(map[string]reload.ServiceConfig)
- services["test"] = reload.ServiceConfig{
- Enabled: false,
- Recursive: false,
- Patterns: nil,
- Dirs: nil,
- Ignore: nil,
- }
-
- cfg := &reload.Config{
- Interval: time.Millisecond, // should crash here
- Patterns: nil,
- Services: services,
- }
- assert.Error(t, cfg.Valid())
-}
-
-func Test_NoServiceConfig(t *testing.T) {
- cfg := &reload.Config{
- Interval: time.Second,
- Patterns: nil,
- Services: nil,
- }
- assert.Error(t, cfg.Valid())
-}
diff --git a/tests/plugins/reload/configs/.rr-reload-2.yaml b/tests/plugins/reload/configs/.rr-reload-2.yaml
deleted file mode 100644
index 6a9d7582..00000000
--- a/tests/plugins/reload/configs/.rr-reload-2.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-server:
- command: php ../../psr-worker-bench.php
- relay: pipes
- relay_timeout: 20s
-http:
- address: '127.0.0.1:27388'
- max_request_size: 1024
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trusted_subnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: debug
-reload:
- interval: 1s
- patterns:
- - .txt
- services:
- http:
- dirs:
- - './unit_tests'
- recursive: true
diff --git a/tests/plugins/reload/configs/.rr-reload-3.yaml b/tests/plugins/reload/configs/.rr-reload-3.yaml
deleted file mode 100644
index 36af2693..00000000
--- a/tests/plugins/reload/configs/.rr-reload-3.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-server:
- command: php ../../psr-worker-bench.php
- relay: pipes
- relay_timeout: 20s
-http:
- address: '127.0.0.1:37388'
- max_request_size: 1024
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trusted_subnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: debug
-reload:
- interval: 1s
- patterns:
- - .txt
- services:
- http:
- dirs:
- - './unit_tests'
- - './unit_tests_copied'
- - './dir1'
- recursive: true
diff --git a/tests/plugins/reload/configs/.rr-reload-4.yaml b/tests/plugins/reload/configs/.rr-reload-4.yaml
deleted file mode 100644
index ceec11c4..00000000
--- a/tests/plugins/reload/configs/.rr-reload-4.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-server:
- command: php ../../psr-worker-bench.php
- relay: pipes
- relay_timeout: 20s
-http:
- address: '127.0.0.1:22766'
- max_request_size: 1024
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trusted_subnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: debug
-
-reload:
- interval: 1s
- patterns:
- - .aaa
- services:
- http:
- dirs:
- - './unit_tests'
- - './unit_tests_copied'
- - './dir1'
- recursive: false
diff --git a/tests/plugins/reload/configs/.rr-reload.yaml b/tests/plugins/reload/configs/.rr-reload.yaml
deleted file mode 100644
index 5048ddda..00000000
--- a/tests/plugins/reload/configs/.rr-reload.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-server:
- command: php ../../psr-worker-bench.php
- relay: pipes
- relay_timeout: 20s
-http:
- address: '127.0.0.1:22388'
- max_request_size: 1024
- uploads:
- forbid:
- - .php
- - .exe
- - .bat
- trusted_subnets:
- - 10.0.0.0/8
- - 127.0.0.0/8
- - 172.16.0.0/12
- - 192.168.0.0/16
- - '::1/128'
- - 'fc00::/7'
- - 'fe80::/10'
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-logs:
- mode: development
- level: debug
-reload:
- interval: 1s
- patterns:
- - .txt
- services:
- http:
- dirs:
- - './unit_tests'
- recursive: true
diff --git a/tests/plugins/reload/reload_plugin_test.go b/tests/plugins/reload/reload_plugin_test.go
deleted file mode 100644
index 21c27e49..00000000
--- a/tests/plugins/reload/reload_plugin_test.go
+++ /dev/null
@@ -1,852 +0,0 @@
-package reload
-
-import (
- "io"
- "io/ioutil"
- "math/rand"
- "os"
- "os/signal"
- "path/filepath"
- "strconv"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/reload"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-const testDir string = "unit_tests"
-const testCopyToDir string = "unit_tests_copied"
-const dir1 string = "dir1"
-const hugeNumberOfFiles uint = 500
-
-func TestReloadInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- err = os.Mkdir(testDir, 0755)
- assert.NoError(t, err)
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", "file.txt", "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", "file.txt", "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP handler listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- t.Run("ReloadTestInit", reloadTestInit)
- time.Sleep(time.Second * 3)
- stopCh <- struct{}{}
- wg.Wait()
-
- assert.NoError(t, freeResources(testDir))
-}
-
-func reloadTestInit(t *testing.T) {
- err := ioutil.WriteFile(filepath.Join(testDir, "file.txt"), //nolint:gosec
- []byte{}, 0755)
- assert.NoError(t, err)
-}
-
-func TestReloadHugeNumberOfFiles(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file was updated", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP handler listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- t.Run("ReloadTestHugeNumberOfFiles", reloadHugeNumberOfFiles)
- t.Run("ReloadRandomlyChangeFile", randomlyChangeFile)
- time.Sleep(time.Second * 10)
-
- stopCh <- struct{}{}
- wg.Wait()
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
-}
-
-func randomlyChangeFile(t *testing.T) {
- // we know, that directory contains 500 files (0-499)
- // let's try to randomly change it
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) //nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(hugeNumberOfFiles)) //nolint:gosec
- err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".txt"), []byte("Hello, Gophers!"), 0755) //nolint:gosec
- assert.NoError(t, err)
- }
-}
-
-func reloadHugeNumberOfFiles(t *testing.T) {
- for i := uint(0); i < hugeNumberOfFiles; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
-}
-
-// Should be events only about creating files with txt ext
-func TestReloadFilterFileExt(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload-2.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, os.Mkdir(testDir, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(100)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP handler listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- t.Run("ReloadMakeFiles", reloadMakeFiles)
- time.Sleep(time.Second * 2)
- t.Run("ReloadFilteredExt", reloadFilteredExt)
- time.Sleep(time.Second * 10)
-
- stopCh <- struct{}{}
- wg.Wait()
-
- assert.NoError(t, freeResources(testDir))
-}
-
-func reloadMakeFiles(t *testing.T) {
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
- }
- for i := uint(0); i < 100; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
- }
-}
-
-func reloadFilteredExt(t *testing.T) {
- // change files with abc extension
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(1000) //nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(hugeNumberOfFiles)) //nolint:gosec
- err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".abc"), []byte("Hello, Gophers!"), 0755) //nolint:gosec
- assert.NoError(t, err)
- }
-
- // change files with def extension
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(1000) //nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(hugeNumberOfFiles)) //nolint:gosec
- err := ioutil.WriteFile(filepath.Join(testDir, "file_"+strconv.Itoa(int(rNum))+".def"), []byte("Hello, Gophers!"), 0755) //nolint:gosec
- assert.NoError(t, err)
- }
-}
-
-// Should be events only about creating files with txt ext
-func TestReloadCopy100(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload-3.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
- assert.NoError(t, os.Mkdir(dir1, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
- //
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("file was created", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file was added to watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file was removed from watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Debug("file was updated", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(50)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").AnyTimes()
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP handler listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- // Scenario
- // 1
- // Create 100 files with txt, abc, def extensions
- // Copy files to the unit_tests_copy dir
- // 2
- // Delete both dirs, recreate
- // Create 100 files with txt, abc, def extensions
- // Move files to the unit_tests_copy dir
- // 3
- // Recursive
-
- time.Sleep(time.Second * 3)
- t.Run("ReloadMake100Files", reloadMake100Files)
- time.Sleep(time.Second * 2)
- t.Run("ReloadCopyFiles", reloadCopyFiles)
- time.Sleep(time.Second * 2)
- t.Run("ReloadRecursiveDirsSupport", copyFilesRecursive)
- time.Sleep(time.Second * 2)
- t.Run("RandomChangesInRecursiveDirs", randomChangesInRecursiveDirs)
- time.Sleep(time.Second * 2)
- t.Run("RemoveFilesSupport", removeFilesSupport)
- time.Sleep(time.Second * 2)
- t.Run("ReloadMoveSupport", reloadMoveSupport)
- time.Sleep(time.Second * 10)
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-
- time.Sleep(time.Second * 3)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func reloadMoveSupport(t *testing.T) {
- t.Run("MoveSupportCopy", copyFilesRecursive)
- // move some files
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) //nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(33)) //nolint:gosec
- rDir := rand.Int63n(9) //nolint:gosec
- rExt := rand.Int63n(3) //nolint:gosec
-
- ext := []string{
- ".txt",
- ".abc",
- ".def",
- }
-
- // change files with def extension
- dirs := []string{
- "dir1",
- "dir1/dir2",
- "dir1/dir2/dir3",
- "dir1/dir2/dir3/dir4",
- "dir1/dir2/dir3/dir4/dir5",
- "dir1/dir2/dir3/dir4/dir5/dir6",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
- }
-
- // move file
- err := os.Rename(filepath.Join(dirs[rDir], "file_"+strconv.Itoa(int(rNum))+ext[rExt]), filepath.Join(dirs[rDir+1], "file_"+strconv.Itoa(int(rNum))+ext[rExt]))
- assert.NoError(t, err)
- }
-}
-
-func removeFilesSupport(t *testing.T) {
- // remove some files
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(500) //nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(100)) //nolint:gosec
- rDir := rand.Int63n(10) //nolint:gosec
- rExt := rand.Int63n(3) //nolint:gosec
-
- ext := []string{
- ".txt",
- ".abc",
- ".def",
- }
-
- // change files with def extension
- dirs := []string{
- "dir1",
- "dir1/dir2",
- "dir1/dir2/dir3",
- "dir1/dir2/dir3/dir4",
- "dir1/dir2/dir3/dir4/dir5",
- "dir1/dir2/dir3/dir4/dir5/dir6",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
- }
- // here can be a situation, when file already deleted
- _ = os.Remove(filepath.Join(dirs[rDir], "file_"+strconv.Itoa(int(rNum))+ext[rExt]))
- }
-}
-
-func randomChangesInRecursiveDirs(t *testing.T) {
- // change files with def extension
- dirs := []string{
- "dir1",
- "dir1/dir2",
- "dir1/dir2/dir3",
- "dir1/dir2/dir3/dir4",
- "dir1/dir2/dir3/dir4/dir5",
- "dir1/dir2/dir3/dir4/dir5/dir6",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9",
- "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10",
- }
-
- ext := []string{
- ".txt",
- ".abc",
- ".def",
- }
-
- filenames := []string{
- "file_", // should be update
- "foo_", // should be created
- "bar_", // should be created
- }
- for i := 0; i < 10; i++ {
- // rand sleep
- rSleep := rand.Int63n(100) //nolint:gosec
- time.Sleep(time.Millisecond * time.Duration(rSleep))
- rNum := rand.Int63n(int64(100)) //nolint:gosec
- rDir := rand.Int63n(10) //nolint:gosec
- rExt := rand.Int63n(3) //nolint:gosec
- rName := rand.Int63n(3) //nolint:gosec
-
- err := ioutil.WriteFile(filepath.Join(dirs[rDir], filenames[rName]+strconv.Itoa(int(rNum))+ext[rExt]), []byte("Hello, Gophers!"), 0755) //nolint:gosec
- assert.NoError(t, err)
- }
-}
-
-func copyFilesRecursive(t *testing.T) {
- err := copyDir(testDir, "dir1")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9")
- assert.NoError(t, err)
- err = copyDir(testDir, "dir1/dir2/dir3/dir4/dir5/dir6/dir7/dir8/dir9/dir10")
- assert.NoError(t, err)
-}
-
-func reloadCopyFiles(t *testing.T) {
- err := copyDir(testDir, testCopyToDir)
- assert.NoError(t, err)
-
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
-
- // recreate files
- for i := uint(0); i < 33; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
- for i := uint(0); i < 33; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
- }
- for i := uint(0); i < 34; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
- }
-
- err = copyDir(testDir, testCopyToDir)
- assert.NoError(t, err)
-}
-
-func reloadMake100Files(t *testing.T) {
- for i := uint(0); i < 33; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".txt"))
- }
- for i := uint(0); i < 33; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".abc"))
- }
- for i := uint(0); i < 34; i++ {
- assert.NoError(t, makeFile("file_"+strconv.Itoa(int(i))+".def"))
- }
-}
-
-func TestReloadNoRecursion(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-reload-4.yaml",
- Prefix: "rr",
- }
-
- // try to remove, skip error
- assert.NoError(t, freeResources(testDir))
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-
- assert.NoError(t, os.Mkdir(testDir, 0755))
- assert.NoError(t, os.Mkdir(dir1, 0755))
- assert.NoError(t, os.Mkdir(testCopyToDir, 0755))
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- // http server should not be restarted. all event from wrong file extensions should be skipped
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("file was removed from watcher", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Debug("file added to the list of removed files", "path", gomock.Any(), "name", gomock.Any(), "size", gomock.Any()).MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin got restart request. Restarting...").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP workers Pool successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP handler listeners successfully re-added").MinTimes(1)
- mockLogger.EXPECT().Info("HTTP plugin successfully restarted").MinTimes(1)
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() // placeholder for the workerlogerror
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &reload.Plugin{},
- &resetter.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 3)
- t.Run("ReloadMakeFiles", reloadMakeFiles) // make files in the testDir
- time.Sleep(time.Second * 2)
- t.Run("ReloadCopyFilesRecursive", reloadCopyFiles)
- time.Sleep(time.Second * 3)
- assert.NoError(t, freeResources(testDir))
- time.Sleep(time.Second * 10)
-
- stopCh <- struct{}{}
- wg.Wait()
-
- assert.NoError(t, freeResources(testCopyToDir))
- assert.NoError(t, freeResources(dir1))
-}
-
-// ========================================================================
-
-func freeResources(path string) error {
- return os.RemoveAll(path)
-}
-
-func makeFile(filename string) error {
- return ioutil.WriteFile(filepath.Join(testDir, filename), []byte{}, 0755) //nolint:gosec
-}
-
-func copyDir(src string, dst string) error {
- src = filepath.Clean(src)
- dst = filepath.Clean(dst)
-
- si, err := os.Stat(src)
- if err != nil {
- return err
- }
- if !si.IsDir() {
- return errors.E(errors.Str("source is not a directory"))
- }
-
- _, err = os.Stat(dst)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
-
- err = os.MkdirAll(dst, si.Mode())
- if err != nil {
- return err
- }
-
- entries, err := ioutil.ReadDir(src)
- if err != nil {
- return err
- }
-
- for _, entry := range entries {
- srcPath := filepath.Join(src, entry.Name())
- dstPath := filepath.Join(dst, entry.Name())
-
- if entry.IsDir() {
- err = copyDir(srcPath, dstPath)
- if err != nil {
- return err
- }
- } else {
- // Skip symlinks.
- if entry.Mode()&os.ModeSymlink != 0 {
- continue
- }
-
- err = copyFile(srcPath, dstPath)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func copyFile(src, dst string) error {
- in, err := os.Open(src)
- if err != nil {
- return errors.E(err)
- }
- defer func() {
- _ = in.Close()
- }()
-
- out, err := os.Create(dst)
- if err != nil {
- return errors.E(err)
- }
- defer func() {
- _ = out.Close()
- }()
-
- _, err = io.Copy(out, in)
- if err != nil {
- return errors.E(err)
- }
-
- err = out.Sync()
- if err != nil {
- return errors.E(err)
- }
-
- si, err := os.Stat(src)
- if err != nil {
- return errors.E(err)
- }
- err = os.Chmod(dst, si.Mode())
- if err != nil {
- return errors.E(err)
- }
- return nil
-}
diff --git a/tests/plugins/resetter/.rr-resetter.yaml b/tests/plugins/resetter/.rr-resetter.yaml
deleted file mode 100644
index 623ba142..00000000
--- a/tests/plugins/resetter/.rr-resetter.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- env:
- "RR_CONFIG": "/some/place/on/the/C134"
- "RR_CONFIG2": "C138"
- relay: "pipes"
- relay_timeout: "20s"
-
-rpc:
- listen: tcp://127.0.0.1:6001
-logs:
- mode: development
- level: debug \ No newline at end of file
diff --git a/tests/plugins/resetter/resetter_test.go b/tests/plugins/resetter/resetter_test.go
deleted file mode 100644
index e0fb020d..00000000
--- a/tests/plugins/resetter/resetter_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package resetter
-
-import (
- "net"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/resetter"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestResetterInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- cfg := &config.Viper{
- Path: ".rr-resetter.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("Started RPC service", "address", "tcp://127.0.0.1:6001", "plugins", []string{"resetter"}).MinTimes(1)
-
- mockLogger.EXPECT().Error(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
-
- mockLogger.EXPECT().Warn("listener accept error, connection closed", "error", gomock.Any()).AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- &server.Plugin{},
- mockLogger,
- &resetter.Plugin{},
- &rpcPlugin.Plugin{},
- &Plugin1{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- stopCh := make(chan struct{}, 1)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
-
- t.Run("ResetterRpcTest", resetterRPCTest)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func resetterRPCTest(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- // WorkerList contains list of workers.
-
- var ret bool
- err = client.Call("resetter.Reset", "resetter.plugin1", &ret)
- assert.NoError(t, err)
- assert.True(t, ret)
- ret = false
-
- var services []string
- err = client.Call("resetter.List", nil, &services)
- assert.NotNil(t, services)
- assert.NoError(t, err)
- if services[0] != "resetter.plugin1" {
- t.Fatal("no enough services")
- }
-}
diff --git a/tests/plugins/resetter/test_plugin.go b/tests/plugins/resetter/test_plugin.go
deleted file mode 100644
index 5c26cbd0..00000000
--- a/tests/plugins/resetter/test_plugin.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package resetter
-
-import (
- "context"
- "time"
-
- poolImpl "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/server"
-)
-
-var testPoolConfig = &poolImpl.Config{
- NumWorkers: 10,
- MaxJobs: 100,
- AllocateTimeout: time.Second * 10,
- DestroyTimeout: time.Second * 10,
- Supervisor: &poolImpl.SupervisorConfig{
- WatchTick: 60 * time.Second,
- TTL: 1000 * time.Second,
- IdleTTL: 10 * time.Second,
- ExecTTL: 10 * time.Second,
- MaxWorkerMemory: 1000,
- },
-}
-
-// Gauge //////////////
-type Plugin1 struct {
- config config.Configurer
- server server.Server
-}
-
-func (p1 *Plugin1) Init(cfg config.Configurer, server server.Server) error {
- p1.config = cfg
- p1.server = server
- return nil
-}
-
-func (p1 *Plugin1) Serve() chan error {
- errCh := make(chan error, 1)
- return errCh
-}
-
-func (p1 *Plugin1) Stop() error {
- return nil
-}
-
-func (p1 *Plugin1) Name() string {
- return "resetter.plugin1"
-}
-
-func (p1 *Plugin1) Reset() error {
- pool, err := p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil)
- if err != nil {
- panic(err)
- }
- pool.Destroy(context.Background())
-
- pool, err = p1.server.NewWorkerPool(context.Background(), testPoolConfig, nil)
- if err != nil {
- panic(err)
- }
-
- _ = pool
-
- return nil
-}
diff --git a/tests/plugins/rpc/config_test.go b/tests/plugins/rpc/config_test.go
deleted file mode 100755
index 0645050d..00000000
--- a/tests/plugins/rpc/config_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package rpc
-
-import (
- "testing"
-
- "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-func TestConfig_Listener(t *testing.T) {
- cfg := &rpc.Config{Listen: "tcp://:18001"}
-
- ln, err := cfg.Listener()
- assert.NoError(t, err)
- assert.NotNil(t, ln)
- defer func() {
- err := ln.Close()
- if err != nil {
- t.Errorf("error closing the listener: error %v", err)
- }
- }()
-
- assert.Equal(t, "tcp", ln.Addr().Network())
- assert.Equal(t, "0.0.0.0:18001", ln.Addr().String())
-}
-
-func TestConfig_Listener2(t *testing.T) {
- cfg := &rpc.Config{Listen: ":18001"}
-
- ln, err := cfg.Listener()
- assert.NoError(t, err)
- assert.NotNil(t, ln)
- defer func() {
- err := ln.Close()
- if err != nil {
- t.Errorf("error closing the listener: error %v", err)
- }
- }()
-
- assert.Equal(t, "tcp", ln.Addr().Network())
- assert.Equal(t, "0.0.0.0:18001", ln.Addr().String())
-}
-
-func TestConfig_ListenerIPV6(t *testing.T) {
- cfg := &rpc.Config{Listen: "tcp://[::]:18001"}
-
- ln, err := cfg.Listener()
- assert.NoError(t, err)
- assert.NotNil(t, ln)
- defer func() {
- err := ln.Close()
- if err != nil {
- t.Errorf("error closing the listener: error %v", err)
- }
- }()
-
- assert.Equal(t, "tcp", ln.Addr().Network())
- assert.Equal(t, "[::]:18001", ln.Addr().String())
-}
-
-func TestConfig_ListenerUnix(t *testing.T) {
- cfg := &rpc.Config{Listen: "unix://file.sock"}
-
- ln, err := cfg.Listener()
- assert.NoError(t, err)
- assert.NotNil(t, ln)
- defer func() {
- err := ln.Close()
- if err != nil {
- t.Errorf("error closing the listener: error %v", err)
- }
- }()
-
- assert.Equal(t, "unix", ln.Addr().Network())
- assert.Equal(t, "file.sock", ln.Addr().String())
-}
-
-func Test_Config_Error(t *testing.T) {
- cfg := &rpc.Config{Listen: "uni:unix.sock"}
- ln, err := cfg.Listener()
- assert.Nil(t, ln)
- assert.Error(t, err)
-}
-
-func Test_Config_ErrorMethod(t *testing.T) {
- cfg := &rpc.Config{Listen: "xinu://unix.sock"}
-
- ln, err := cfg.Listener()
- assert.Nil(t, ln)
- assert.Error(t, err)
-}
-
-func TestConfig_Dialer(t *testing.T) {
- cfg := &rpc.Config{Listen: "tcp://:18001"}
-
- ln, _ := cfg.Listener()
- defer func() {
- err := ln.Close()
- if err != nil {
- t.Errorf("error closing the listener: error %v", err)
- }
- }()
-
- conn, err := cfg.Dialer()
- assert.NoError(t, err)
- assert.NotNil(t, conn)
- defer func() {
- err := conn.Close()
- if err != nil {
- t.Errorf("error closing the connection: error %v", err)
- }
- }()
-
- assert.Equal(t, "tcp", conn.RemoteAddr().Network())
- assert.Equal(t, "127.0.0.1:18001", conn.RemoteAddr().String())
-}
-
-func TestConfig_DialerUnix(t *testing.T) {
- cfg := &rpc.Config{Listen: "unix://file.sock"}
-
- ln, _ := cfg.Listener()
- defer func() {
- err := ln.Close()
- if err != nil {
- t.Errorf("error closing the listener: error %v", err)
- }
- }()
-
- conn, err := cfg.Dialer()
- assert.NoError(t, err)
- assert.NotNil(t, conn)
- defer func() {
- err := conn.Close()
- if err != nil {
- t.Errorf("error closing the connection: error %v", err)
- }
- }()
-
- assert.Equal(t, "unix", conn.RemoteAddr().Network())
- assert.Equal(t, "file.sock", conn.RemoteAddr().String())
-}
-
-func Test_Config_DialerError(t *testing.T) {
- cfg := &rpc.Config{Listen: "uni:unix.sock"}
- ln, err := cfg.Dialer()
- assert.Nil(t, ln)
- assert.Error(t, err)
- assert.Equal(t, "invalid socket DSN (tcp://:6001, unix://file.sock)", err.Error())
-}
-
-func Test_Config_DialerErrorMethod(t *testing.T) {
- cfg := &rpc.Config{Listen: "xinu://unix.sock"}
-
- ln, err := cfg.Dialer()
- assert.Nil(t, ln)
- assert.Error(t, err)
-}
-
-func Test_Config_Defaults(t *testing.T) {
- c := &rpc.Config{}
- c.InitDefaults()
- assert.Equal(t, "tcp://127.0.0.1:6001", c.Listen)
-}
diff --git a/tests/plugins/rpc/configs/.rr-rpc-disabled.yaml b/tests/plugins/rpc/configs/.rr-rpc-disabled.yaml
deleted file mode 100644
index d256aad7..00000000
--- a/tests/plugins/rpc/configs/.rr-rpc-disabled.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-logs:
- mode: development
- level: panic
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: panic
diff --git a/tests/plugins/rpc/configs/.rr.yaml b/tests/plugins/rpc/configs/.rr.yaml
deleted file mode 100644
index d6aaa7c6..00000000
--- a/tests/plugins/rpc/configs/.rr.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-logs:
- mode: development
- level: panic
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: panic
diff --git a/tests/plugins/rpc/plugin1.go b/tests/plugins/rpc/plugin1.go
deleted file mode 100644
index 6843b396..00000000
--- a/tests/plugins/rpc/plugin1.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package rpc
-
-import (
- "fmt"
-
- "github.com/spiral/roadrunner/v2/plugins/config"
-)
-
-type Plugin1 struct {
- config config.Configurer
-}
-
-func (p1 *Plugin1) Init(cfg config.Configurer) error {
- p1.config = cfg
- return nil
-}
-
-func (p1 *Plugin1) Serve() chan error {
- errCh := make(chan error, 1)
- return errCh
-}
-
-func (p1 *Plugin1) Stop() error {
- return nil
-}
-
-func (p1 *Plugin1) Name() string {
- return "rpc_test.plugin1"
-}
-
-func (p1 *Plugin1) RPC() interface{} {
- return &PluginRPC{srv: p1}
-}
-
-type PluginRPC struct {
- srv *Plugin1
-}
-
-func (r *PluginRPC) Hello(in string, out *string) error {
- *out = fmt.Sprintf("Hello, username: %s", in)
- return nil
-}
diff --git a/tests/plugins/rpc/plugin2.go b/tests/plugins/rpc/plugin2.go
deleted file mode 100644
index 2c47158f..00000000
--- a/tests/plugins/rpc/plugin2.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package rpc
-
-import (
- "net"
- "net/rpc"
- "time"
-
- "github.com/spiral/errors"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
-)
-
-// plugin2 makes a call to the plugin1 via RPC
-// this is just a simulation of external call FOR TEST
-// you don't need to do such things :)
-type Plugin2 struct {
-}
-
-func (p2 *Plugin2) Init() error {
- return nil
-}
-
-func (p2 *Plugin2) Serve() chan error {
- errCh := make(chan error, 1)
-
- go func() {
- time.Sleep(time.Second * 3)
-
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- if err != nil {
- errCh <- errors.E(errors.Serve, err)
- return
- }
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
- var ret string
- err = client.Call("rpc_test.plugin1.Hello", "Valery", &ret)
- if err != nil {
- errCh <- err
- return
- }
- if ret != "Hello, username: Valery" {
- errCh <- errors.E("wrong response")
- return
- }
- // to stop exec
- errCh <- errors.E(errors.Disabled)
- }()
-
- return errCh
-}
-
-func (p2 *Plugin2) Stop() error {
- return nil
-}
diff --git a/tests/plugins/rpc/rpc_test.go b/tests/plugins/rpc/rpc_test.go
deleted file mode 100644
index 49d3b3f1..00000000
--- a/tests/plugins/rpc/rpc_test.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package rpc
-
-import (
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/stretchr/testify/assert"
-)
-
-// graph https://bit.ly/3ensdNb
-func TestRpcInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&Plugin1{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&Plugin2{})
- if err != nil {
- t.Fatal(err)
- }
-
- v := &config.Viper{}
- v.Path = "configs/.rr.yaml"
- v.Prefix = "rr"
- err = cont.Register(v)
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&rpc.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
-
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- tt := time.NewTimer(time.Second * 10)
-
- go func() {
- defer wg.Done()
- defer tt.Stop()
- for {
- select {
- case e := <-ch:
- // just stop, this is ok
- if errors.Is(errors.Disabled, e.Error) {
- return
- }
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- assert.Fail(t, "timeout")
- }
- }
- }()
-
- wg.Wait()
-}
-
-// graph https://bit.ly/3ensdNb
-func TestRpcDisabled(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&Plugin1{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&Plugin2{})
- if err != nil {
- t.Fatal(err)
- }
-
- v := &config.Viper{}
- v.Path = "configs/.rr-rpc-disabled.yaml"
- v.Prefix = "rr"
- err = cont.Register(v)
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&rpc.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
-
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
- tt := time.NewTimer(time.Second * 20)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- defer tt.Stop()
- for {
- select {
- case e := <-ch:
- // RPC is turned off, should be and dial error
- if errors.Is(errors.Disabled, e.Error) {
- assert.FailNow(t, "should not be disabled error")
- }
- assert.Error(t, e.Error)
- err = cont.Stop()
- assert.Error(t, err)
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-tt.C:
- // timeout
- return
- }
- }
- }()
-
- wg.Wait()
-}
diff --git a/tests/plugins/server/configs/.rr-no-app-section.yaml b/tests/plugins/server/configs/.rr-no-app-section.yaml
deleted file mode 100644
index d28265d5..00000000
--- a/tests/plugins/server/configs/.rr-no-app-section.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-server:
- command: "php ../../client.php echo pipes"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG2: "C138"
- relay: "pipes"
- relay_timeout: "20s"
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/server/configs/.rr-sockets.yaml b/tests/plugins/server/configs/.rr-sockets.yaml
deleted file mode 100644
index 4c57f36f..00000000
--- a/tests/plugins/server/configs/.rr-sockets.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-server:
- command: "php socket.php"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG2: "C138"
- relay: "unix://unix.sock"
- relay_timeout: "20s"
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/server/configs/.rr-tcp.yaml b/tests/plugins/server/configs/.rr-tcp.yaml
deleted file mode 100644
index 6b9c9ddb..00000000
--- a/tests/plugins/server/configs/.rr-tcp.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-server:
- command: "php tcp.php"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG2: "C138"
- relay: "tcp://127.0.0.1:9999"
- relay_timeout: "20s"
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/server/configs/.rr-wrong-command.yaml b/tests/plugins/server/configs/.rr-wrong-command.yaml
deleted file mode 100644
index 9d105d90..00000000
--- a/tests/plugins/server/configs/.rr-wrong-command.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-server:
- command: "php some_absent_file.php"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG2: "C138"
- relay: "pipes"
- relay_timeout: "20s"
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/server/configs/.rr-wrong-relay.yaml b/tests/plugins/server/configs/.rr-wrong-relay.yaml
deleted file mode 100644
index c4d1edb0..00000000
--- a/tests/plugins/server/configs/.rr-wrong-relay.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-server:
- command: "php ../../client.php echo pipes"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG2: "C138"
- relay: "pupes"
- relay_timeout: "20s"
-logs:
- mode: development
- level: error
diff --git a/tests/plugins/server/configs/.rr.yaml b/tests/plugins/server/configs/.rr.yaml
deleted file mode 100644
index a1484c02..00000000
--- a/tests/plugins/server/configs/.rr.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-server:
- command: "php ../../client.php echo pipes"
- user: ""
- group: ""
- env:
- - RR_CONFIG: "/some/place/on/the/C134"
- - RR_CONFIG2: "C138"
- relay: "pipes"
- relay_timeout: "20s"
-logs:
- mode: development
- level: info
diff --git a/tests/plugins/server/plugin_pipes.go b/tests/plugins/server/plugin_pipes.go
deleted file mode 100644
index d136da1e..00000000
--- a/tests/plugins/server/plugin_pipes.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package server
-
-import (
- "context"
- "time"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/server"
-)
-
-const ConfigSection = "server"
-const Response = "test"
-
-var testPoolConfig = &pool.Config{
- NumWorkers: 10,
- MaxJobs: 100,
- AllocateTimeout: time.Second * 10,
- DestroyTimeout: time.Second * 10,
- Supervisor: &pool.SupervisorConfig{
- WatchTick: 60 * time.Second,
- TTL: 1000 * time.Second,
- IdleTTL: 10 * time.Second,
- ExecTTL: 10 * time.Second,
- MaxWorkerMemory: 1000,
- },
-}
-
-type Foo struct {
- configProvider config.Configurer
- wf server.Server
- pool pool.Pool
-}
-
-func (f *Foo) Init(p config.Configurer, workerFactory server.Server) error {
- f.configProvider = p
- f.wf = workerFactory
- return nil
-}
-
-func (f *Foo) Serve() chan error {
- const op = errors.Op("serve")
-
- // test payload for echo
- r := &payload.Payload{
- Context: nil,
- Body: []byte(Response),
- }
-
- errCh := make(chan error, 1)
-
- conf := &server.Config{}
- var err error
- err = f.configProvider.UnmarshalKey(ConfigSection, conf)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test CMDFactory
- cmd, err := f.wf.CmdFactory(nil)
- if err != nil {
- errCh <- err
- return errCh
- }
- if cmd == nil {
- errCh <- errors.E(op, errors.Str("command is nil"))
- return errCh
- }
-
- // test worker creation
- w, err := f.wf.NewWorker(context.Background(), nil)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test that our worker is functional
- sw := worker.From(w)
-
- rsp, err := sw.Exec(r)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- if string(rsp.Body) != Response {
- errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
- return errCh
- }
-
- // should not be errors
- err = sw.Stop()
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test pool
- f.pool, err = f.wf.NewWorkerPool(context.Background(), testPoolConfig, nil)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test pool execution
- rsp, err = f.pool.Exec(r)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // echo of the "test" should be -> test
- if string(rsp.Body) != Response {
- errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
- return errCh
- }
-
- return errCh
-}
-
-func (f *Foo) Stop() error {
- f.pool.Destroy(context.Background())
- return nil
-}
diff --git a/tests/plugins/server/plugin_sockets.go b/tests/plugins/server/plugin_sockets.go
deleted file mode 100644
index 143a604c..00000000
--- a/tests/plugins/server/plugin_sockets.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package server
-
-import (
- "context"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/server"
-)
-
-type Foo2 struct {
- configProvider config.Configurer
- wf server.Server
- pool pool.Pool
-}
-
-func (f *Foo2) Init(p config.Configurer, workerFactory server.Server) error {
- f.configProvider = p
- f.wf = workerFactory
- return nil
-}
-
-func (f *Foo2) Serve() chan error {
- const op = errors.Op("serve")
- var err error
- errCh := make(chan error, 1)
- conf := &server.Config{}
-
- // test payload for echo
- r := &payload.Payload{
- Context: nil,
- Body: []byte(Response),
- }
-
- err = f.configProvider.UnmarshalKey(ConfigSection, conf)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test CMDFactory
- cmd, err := f.wf.CmdFactory(nil)
- if err != nil {
- errCh <- err
- return errCh
- }
- if cmd == nil {
- errCh <- errors.E(op, "command is nil")
- return errCh
- }
-
- // test worker creation
- w, err := f.wf.NewWorker(context.Background(), nil)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test that our worker is functional
- sw := worker.From(w)
-
- rsp, err := sw.Exec(r)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- if string(rsp.Body) != Response {
- errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
- return errCh
- }
-
- // should not be errors
- err = sw.Stop()
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test pool
- f.pool, err = f.wf.NewWorkerPool(context.Background(), testPoolConfig, nil)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test pool execution
- rsp, err = f.pool.Exec(r)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // echo of the "test" should be -> test
- if string(rsp.Body) != Response {
- errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
- return errCh
- }
-
- return errCh
-}
-
-func (f *Foo2) Stop() error {
- f.pool.Destroy(context.Background())
- return nil
-}
diff --git a/tests/plugins/server/plugin_tcp.go b/tests/plugins/server/plugin_tcp.go
deleted file mode 100644
index 57a2e6ea..00000000
--- a/tests/plugins/server/plugin_tcp.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package server
-
-import (
- "context"
-
- "github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/pool"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/server"
-)
-
-type Foo3 struct {
- configProvider config.Configurer
- wf server.Server
- pool pool.Pool
-}
-
-func (f *Foo3) Init(p config.Configurer, workerFactory server.Server) error {
- f.configProvider = p
- f.wf = workerFactory
- return nil
-}
-
-func (f *Foo3) Serve() chan error {
- const op = errors.Op("serve")
- var err error
- errCh := make(chan error, 1)
- conf := &server.Config{}
-
- // test payload for echo
- r := &payload.Payload{
- Context: nil,
- Body: []byte(Response),
- }
-
- err = f.configProvider.UnmarshalKey(ConfigSection, conf)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test CMDFactory
- cmd, err := f.wf.CmdFactory(nil)
- if err != nil {
- errCh <- err
- return errCh
- }
- if cmd == nil {
- errCh <- errors.E(op, "command is nil")
- return errCh
- }
-
- // test worker creation
- w, err := f.wf.NewWorker(context.Background(), nil)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test that our worker is functional
- sw := worker.From(w)
-
- rsp, err := sw.Exec(r)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- if string(rsp.Body) != Response {
- errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
- return errCh
- }
-
- // should not be errors
- err = sw.Stop()
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test pool
- f.pool, err = f.wf.NewWorkerPool(context.Background(), testPoolConfig, nil)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // test pool execution
- rsp, err = f.pool.Exec(r)
- if err != nil {
- errCh <- err
- return errCh
- }
-
- // echo of the "test" should be -> test
- if string(rsp.Body) != Response {
- errCh <- errors.E("response from worker is wrong", errors.Errorf("response: %s", rsp.Body))
- return errCh
- }
-
- return errCh
-}
-
-func (f *Foo3) Stop() error {
- f.pool.Destroy(context.Background())
- return nil
-}
diff --git a/tests/plugins/server/server_plugin_test.go b/tests/plugins/server/server_plugin_test.go
deleted file mode 100644
index 06c9eb50..00000000
--- a/tests/plugins/server/server_plugin_test.go
+++ /dev/null
@@ -1,352 +0,0 @@
-package server
-
-import (
- "os"
- "os/signal"
- "sync"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/stretchr/testify/assert"
-)
-
-func TestAppPipes(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr.yaml"
- vp.Prefix = "rr"
-
- err = container.RegisterAll(
- vp,
- &server.Plugin{},
- &Foo{},
- &logger.ZapLogger{},
- )
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- tt := time.NewTimer(time.Second * 10)
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- go func() {
- defer wg.Done()
- defer tt.Stop()
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- assert.NoError(t, er)
- return
- case <-tt.C:
- assert.NoError(t, container.Stop())
- return
- }
- }
- }()
-
- wg.Wait()
-}
-
-func TestAppSockets(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-sockets.yaml"
- vp.Prefix = "rr"
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&server.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo2{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- // stop after 10 seconds
- tt := time.NewTicker(time.Second * 10)
-
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- if er != nil {
- panic(er)
- }
- return
- case <-tt.C:
- tt.Stop()
- assert.NoError(t, container.Stop())
- return
- }
- }
-}
-
-func TestAppTCP(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-tcp.yaml"
- vp.Prefix = "rr"
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&server.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo3{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- errCh, err := container.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- // stop by CTRL+C
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt)
-
- // stop after 10 seconds
- tt := time.NewTicker(time.Second * 10)
-
- for {
- select {
- case e := <-errCh:
- assert.NoError(t, e.Error)
- assert.NoError(t, container.Stop())
- return
- case <-c:
- er := container.Stop()
- if er != nil {
- panic(er)
- }
- return
- case <-tt.C:
- tt.Stop()
- assert.NoError(t, container.Stop())
- return
- }
- }
-}
-
-func TestAppWrongConfig(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rrrrrrrrrr.yaml"
- vp.Prefix = "rr"
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&server.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo3{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- assert.Error(t, container.Init())
-}
-
-func TestAppWrongRelay(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-wrong-relay.yaml"
- vp.Prefix = "rr"
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&server.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo3{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- assert.NoError(t, err)
-
- _, err = container.Serve()
- assert.Error(t, err)
-}
-
-func TestAppWrongCommand(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-wrong-command.yaml"
- vp.Prefix = "rr"
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&server.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo3{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = container.Serve()
- assert.Error(t, err)
-}
-
-func TestAppNoAppSectionInConfig(t *testing.T) {
- container, err := endure.NewContainer(nil, endure.RetryOnFail(true), endure.SetLogLevel(endure.ErrorLevel))
- if err != nil {
- t.Fatal(err)
- }
- // config plugin
- vp := &config.Viper{}
- vp.Path = "configs/.rr-wrong-command.yaml"
- vp.Prefix = "rr"
- err = container.Register(vp)
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&server.Plugin{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&Foo3{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Register(&logger.ZapLogger{})
- if err != nil {
- t.Fatal(err)
- }
-
- err = container.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = container.Serve()
- assert.Error(t, err)
-}
diff --git a/tests/plugins/server/socket.php b/tests/plugins/server/socket.php
deleted file mode 100644
index f90dda6f..00000000
--- a/tests/plugins/server/socket.php
+++ /dev/null
@@ -1,25 +0,0 @@
-<?php
-/**
- * @var Goridge\RelayInterface $relay
- */
-
-use Spiral\Goridge;
-use Spiral\RoadRunner;
-
-require dirname(__DIR__) . "/../vendor/autoload.php";
-
-$relay = new Goridge\SocketRelay(
- "unix.sock",
- null,
- Goridge\SocketRelay::SOCK_UNIX
- );
-
-$rr = new RoadRunner\Worker($relay);
-
-while ($in = $rr->waitPayload()) {
- try {
- $rr->respond(new RoadRunner\Payload((string)$in->body));
- } catch (\Throwable $e) {
- $rr->error((string)$e);
- }
-}
diff --git a/tests/plugins/server/tcp.php b/tests/plugins/server/tcp.php
deleted file mode 100644
index acc1e1a5..00000000
--- a/tests/plugins/server/tcp.php
+++ /dev/null
@@ -1,20 +0,0 @@
-<?php
-/**
- * @var Goridge\RelayInterface $relay
- */
-
-use Spiral\Goridge;
-use Spiral\RoadRunner;
-
-require dirname(__DIR__) . "/../vendor/autoload.php";
-
-$relay = new Goridge\SocketRelay("127.0.0.1", 9999);
-$rr = new RoadRunner\Worker($relay);
-
-while ($in = $rr->waitPayload()) {
- try {
- $rr->respond(new RoadRunner\Payload((string)$in->body));
- } catch (\Throwable $e) {
- $rr->error((string)$e);
- }
-}
diff --git a/tests/plugins/service/configs/.rr-service-error.yaml b/tests/plugins/service/configs/.rr-service-error.yaml
deleted file mode 100644
index 3b0f1eb9..00000000
--- a/tests/plugins/service/configs/.rr-service-error.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-service:
- some_service_1:
- command: "php test_files/loopo.php"
- process_num: 1
- exec_timeout: 5s # s,m,h (seconds, minutes, hours)
- remain_after_exit: true
- restart_sec: 1
-
-logs:
- level: info
- mode: raw
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/service/configs/.rr-service-init.yaml b/tests/plugins/service/configs/.rr-service-init.yaml
deleted file mode 100644
index e32f2eda..00000000
--- a/tests/plugins/service/configs/.rr-service-init.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-service:
- some_service_1:
- command: "php test_files/loop.php"
- process_num: 1
- exec_timeout: 5s # s,m,h (seconds, minutes, hours)
- remain_after_exit: true
- restart_sec: 1
- some_service_2:
- command: "test_files/test_binary"
- process_num: 1
- remain_after_exit: true
- restart_delay: 1s
- exec_timeout: 5s
-
-logs:
- level: info
- mode: raw
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/service/configs/.rr-service-restarts.yaml b/tests/plugins/service/configs/.rr-service-restarts.yaml
deleted file mode 100644
index f08d5720..00000000
--- a/tests/plugins/service/configs/.rr-service-restarts.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-service:
- some_service_2:
- command: "test_files/test_binary"
- process_num: 1
- remain_after_exit: true
- restart_delay: 1s
- exec_timeout: 2s
-
-logs:
- level: debug
- mode: raw
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/service/service_plugin_test.go b/tests/plugins/service/service_plugin_test.go
deleted file mode 100644
index ddf54520..00000000
--- a/tests/plugins/service/service_plugin_test.go
+++ /dev/null
@@ -1,254 +0,0 @@
-//go:build linux
-// +build linux
-
-package service
-
-import (
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/golang/mock/gomock"
- endure "github.com/spiral/endure/pkg/container"
- "github.com/spiral/roadrunner/v2/plugins/config"
- "github.com/spiral/roadrunner/v2/plugins/service"
- "github.com/spiral/roadrunner/v2/tests/mocks"
- "github.com/stretchr/testify/assert"
-)
-
-func TestServiceInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-service-init.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Info("The number is: 0\n").MinTimes(1)
- mockLogger.EXPECT().Info("The number is: 1\n").MinTimes(1)
- mockLogger.EXPECT().Info("The number is: 2\n").MinTimes(1)
- mockLogger.EXPECT().Info("The number is: 3\n").MinTimes(1)
- mockLogger.EXPECT().Info("The number is: 4\n").AnyTimes()
-
- // process interrupt error
- mockLogger.EXPECT().Error("process wait error", gomock.Any()).MinTimes(2)
-
- mockLogger.EXPECT().Info("Hello 0").MinTimes(1)
- mockLogger.EXPECT().Info("Hello 1").MinTimes(1)
- mockLogger.EXPECT().Info("Hello 2").MinTimes(1)
- mockLogger.EXPECT().Info("Hello 3").MinTimes(1)
- mockLogger.EXPECT().Info("Hello 4").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &service.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 10)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestServiceError(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-service-error.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
-
- // process interrupt error
- mockLogger.EXPECT().Error("process wait error", gomock.Any()).MinTimes(2)
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &service.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- assert.NoError(t, err)
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 10)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func TestServiceRestarts(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-service-restarts.yaml",
- Prefix: "rr",
- }
-
- controller := gomock.NewController(t)
- mockLogger := mocks.NewMockLogger(controller)
-
- mockLogger.EXPECT().Debug("worker destructed", "pid", gomock.Any()).AnyTimes()
- mockLogger.EXPECT().Debug("worker constructed", "pid", gomock.Any()).AnyTimes()
-
- // process interrupt error
- mockLogger.EXPECT().Error("process wait error", gomock.Any()).MinTimes(1)
-
- // should not be more than Hello 0, because of restarts
- mockLogger.EXPECT().Info("Hello 0").MinTimes(1)
- mockLogger.EXPECT().Info("Hello 1").AnyTimes()
-
- err = cont.RegisterAll(
- cfg,
- mockLogger,
- &service.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 10)
- stopCh <- struct{}{}
- wg.Wait()
-}
diff --git a/tests/plugins/service/test_files/loop.php b/tests/plugins/service/test_files/loop.php
deleted file mode 100644
index 6ba488ef..00000000
--- a/tests/plugins/service/test_files/loop.php
+++ /dev/null
@@ -1,6 +0,0 @@
-<?php
-for ($x = 0; $x <= 1000; $x++) {
- sleep(1);
- error_log("The number is: $x", 0);
-}
-?>
diff --git a/tests/plugins/service/test_files/test_binary b/tests/plugins/service/test_files/test_binary
deleted file mode 100755
index 480fb7e2..00000000
--- a/tests/plugins/service/test_files/test_binary
+++ /dev/null
Binary files differ
diff --git a/tests/plugins/status/configs/.rr-ready-init.yaml b/tests/plugins/status/configs/.rr-ready-init.yaml
deleted file mode 100755
index da9e84c3..00000000
--- a/tests/plugins/status/configs/.rr-ready-init.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6007
-
-server:
- command: "php ../../sleep.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-status:
- address: "127.0.0.1:34334"
-
-logs:
- mode: development
- level: error
-http:
- address: 127.0.0.1:11933
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 1
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
diff --git a/tests/plugins/status/configs/.rr-status-init.yaml b/tests/plugins/status/configs/.rr-status-init.yaml
deleted file mode 100755
index c791c10f..00000000
--- a/tests/plugins/status/configs/.rr-status-init.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6005
-
-server:
- command: "php ../../http/client.php echo pipes"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-status:
- address: "127.0.0.1:34333"
-
-logs:
- mode: development
- level: error
-http:
- address: 127.0.0.1:11933
- max_request_size: 1024
- middleware: [ "" ]
- uploads:
- forbid: [ ".php", ".exe", ".bat" ]
- trusted_subnets: [ "10.0.0.0/8", "127.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "::1/128", "fc00::/7", "fe80::/10" ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s \ No newline at end of file
diff --git a/tests/plugins/status/plugin_test.go b/tests/plugins/status/plugin_test.go
deleted file mode 100644
index 227cfd46..00000000
--- a/tests/plugins/status/plugin_test.go
+++ /dev/null
@@ -1,388 +0,0 @@
-package status
-
-import (
- "io/ioutil"
- "net"
- "net/http"
- "net/rpc"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/status"
- "github.com/stretchr/testify/assert"
-)
-
-func TestStatusHttp(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-status-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &status.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("CheckerGetStatus", checkHTTPStatus)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-const resp = `Service: http: Status: 200
-Service: rpc not found`
-
-func checkHTTPStatus(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:34333/health?plugin=http&plugin=rpc", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, resp, string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestStatusRPC(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-status-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &status.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("CheckerGetStatusRpc", checkRPCStatus)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func checkRPCStatus(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6005")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- st := &status.Status{}
-
- err = client.Call("status.Status", "http", &st)
- assert.NoError(t, err)
- assert.Equal(t, st.Code, 200)
-}
-
-func TestReadyHttp(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-status-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &status.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("CheckerGetReadiness", checkHTTPReadiness)
-
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-const resp2 = `Service: http: Status: 204
-Service: rpc not found`
-
-func checkHTTPReadiness(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:34333/ready?plugin=http&plugin=rpc", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, resp, string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func TestReadinessRPCWorkerNotReady(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel), endure.GracefulShutdownTimeout(time.Second*2))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-ready-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &httpPlugin.Plugin{},
- &status.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- assert.NoError(t, err)
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout, error here is OK, because in the PHP we are sleeping for the 300s
- _ = cont.Stop()
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
- t.Run("DoHttpReq", doHTTPReq)
- time.Sleep(time.Second * 5)
- t.Run("CheckerGetReadiness2", checkHTTPReadiness2)
- t.Run("CheckerGetRpcReadiness", checkRPCReadiness)
- stopCh <- struct{}{}
- wg.Wait()
-}
-
-func doHTTPReq(t *testing.T) {
- go func() {
- req, err := http.NewRequest("GET", "http://127.0.0.1:11933", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 200, r.StatusCode)
- assert.Equal(t, resp2, string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
- }()
-}
-
-func checkHTTPReadiness2(t *testing.T) {
- req, err := http.NewRequest("GET", "http://127.0.0.1:34334/ready?plugin=http&plugin=rpc", nil)
- assert.NoError(t, err)
-
- r, err := http.DefaultClient.Do(req)
- assert.NoError(t, err)
- b, err := ioutil.ReadAll(r.Body)
- assert.NoError(t, err)
- assert.Equal(t, 503, r.StatusCode)
- assert.Equal(t, "", string(b))
-
- err = r.Body.Close()
- assert.NoError(t, err)
-}
-
-func checkRPCReadiness(t *testing.T) {
- conn, err := net.Dial("tcp", "127.0.0.1:6007")
- assert.NoError(t, err)
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- st := &status.Status{}
-
- err = client.Call("status.Ready", "http", &st)
- assert.NoError(t, err)
- assert.Equal(t, st.Code, 503)
-}
diff --git a/tests/plugins/websockets/configs/.rr-websockets-allow.yaml b/tests/plugins/websockets/configs/.rr-websockets-allow.yaml
deleted file mode 100644
index 3d0268d4..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-allow.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../worker-ok.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:41278
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-redis:
- addrs:
- - "127.0.0.1:6379"
-
-broadcast:
- test:
- driver: memory
- config: {}
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml b/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml
deleted file mode 100644
index f8e36136..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-allow2.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../worker-ok.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:41270
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-test:
- addrs:
- - "127.0.0.1:6379"
-
-broadcast:
- test:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6379"
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-broker-no-section.yaml b/tests/plugins/websockets/configs/.rr-websockets-broker-no-section.yaml
deleted file mode 100644
index c72e1f15..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-broker-no-section.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:13235
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- test1:
- driver: no
- config:
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-deny.yaml b/tests/plugins/websockets/configs/.rr-websockets-deny.yaml
deleted file mode 100644
index 61265c4b..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-deny.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../worker-deny.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:15587
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- test:
- driver: memory
- config: {}
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml b/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml
deleted file mode 100644
index b99a3571..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-deny2.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../worker-deny.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:15588
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- test:
- driver: redis
- config:
- addrs:
- - "127.0.0.1:6379"
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-init.yaml b/tests/plugins/websockets/configs/.rr-websockets-init.yaml
deleted file mode 100644
index 3120f146..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-init.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:11111
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- default:
- driver: memory
- config: {}
-
-websockets:
- broker: default
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-redis.yaml b/tests/plugins/websockets/configs/.rr-websockets-redis.yaml
deleted file mode 100644
index fc01e0b1..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-redis.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../psr-worker-bench.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:13235
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-test:
- addrs:
- - "127.0.0.1:6379"
-
-broadcast:
- test:
- driver: redis
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/configs/.rr-websockets-stop.yaml b/tests/plugins/websockets/configs/.rr-websockets-stop.yaml
deleted file mode 100644
index 35529e9e..00000000
--- a/tests/plugins/websockets/configs/.rr-websockets-stop.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-rpc:
- listen: tcp://127.0.0.1:6001
-
-server:
- command: "php ../../worker-stop.php"
- user: ""
- group: ""
- relay: "pipes"
- relay_timeout: "20s"
-
-http:
- address: 127.0.0.1:11114
- max_request_size: 1024
- middleware: ["websockets"]
- trusted_subnets:
- [
- "10.0.0.0/8",
- "127.0.0.0/8",
- "172.16.0.0/12",
- "192.168.0.0/16",
- "::1/128",
- "fc00::/7",
- "fe80::/10",
- ]
- pool:
- num_workers: 2
- max_jobs: 0
- allocate_timeout: 60s
- destroy_timeout: 60s
-
-broadcast:
- test:
- driver: memory
- config: {}
-
-websockets:
- broker: test
- allowed_origin: "*"
- path: "/ws"
-
-logs:
- mode: development
- level: error
-
-endure:
- grace_period: 120s
- print_graph: false
- log_level: error
diff --git a/tests/plugins/websockets/websocket_plugin_test.go b/tests/plugins/websockets/websocket_plugin_test.go
deleted file mode 100644
index 3e74ca59..00000000
--- a/tests/plugins/websockets/websocket_plugin_test.go
+++ /dev/null
@@ -1,918 +0,0 @@
-package websockets
-
-import (
- "net"
- "net/http"
- "net/rpc"
- "net/url"
- "os"
- "os/signal"
- "sync"
- "syscall"
- "testing"
- "time"
-
- "github.com/fasthttp/websocket"
- json "github.com/json-iterator/go"
- endure "github.com/spiral/endure/pkg/container"
- goridgeRpc "github.com/spiral/goridge/v3/pkg/rpc"
- "github.com/spiral/roadrunner/v2/plugins/broadcast"
- "github.com/spiral/roadrunner/v2/plugins/config"
- httpPlugin "github.com/spiral/roadrunner/v2/plugins/http"
- "github.com/spiral/roadrunner/v2/plugins/logger"
- "github.com/spiral/roadrunner/v2/plugins/memory"
- "github.com/spiral/roadrunner/v2/plugins/redis"
- rpcPlugin "github.com/spiral/roadrunner/v2/plugins/rpc"
- "github.com/spiral/roadrunner/v2/plugins/server"
- "github.com/spiral/roadrunner/v2/plugins/websockets"
- websocketsv1 "github.com/spiral/roadrunner/v2/proto/websockets/v1beta"
- "github.com/spiral/roadrunner/v2/utils"
- "github.com/stretchr/testify/assert"
-)
-
-func TestWebsocketsInit(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-init.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- &broadcast.Plugin{},
- )
-
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("TestWSInit", wsInit)
- t.Run("RPCWsMemoryPubAsync", RPCWsPubAsync("11111"))
- t.Run("RPCWsMemory", RPCWsPub("11111"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestWSRedis(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-redis.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("RPCWsRedisPubAsync", RPCWsPubAsync("13235"))
- t.Run("RPCWsRedisPub", RPCWsPub("13235"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestWSRedisNoSection(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-broker-no-section.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = cont.Serve()
- assert.Error(t, err)
-}
-
-func TestWSDeny(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-deny.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("RPCWsMemoryDeny", RPCWsDeny("15587"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestWSDeny2(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-deny2.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &redis.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("RPCWsRedisDeny", RPCWsDeny("15588"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestWSStop(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-stop.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("RPCWsStop", RPCWsMemoryStop("11114"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func RPCWsMemoryStop(port string) func(t *testing.T) {
- return func(t *testing.T) {
- da := websocket.Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: time.Second * 20,
- }
-
- connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"}
-
- c, resp, err := da.Dial(connURL.String(), nil)
- assert.NotNil(t, resp)
- assert.Error(t, err)
- assert.Nil(t, c)
- assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) //nolint:staticcheck
- assert.Equal(t, resp.Header.Get("Stop"), "we-dont-like-you") //nolint:staticcheck
- if resp != nil && resp.Body != nil { //nolint:staticcheck
- _ = resp.Body.Close()
- }
- }
-}
-
-func TestWSAllow(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-allow.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("RPCWsMemoryAllow", RPCWsPub("41278"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func TestWSAllow2(t *testing.T) {
- cont, err := endure.NewContainer(nil, endure.SetLogLevel(endure.ErrorLevel))
- assert.NoError(t, err)
-
- cfg := &config.Viper{
- Path: "configs/.rr-websockets-allow2.yaml",
- Prefix: "rr",
- }
-
- err = cont.RegisterAll(
- cfg,
- &rpcPlugin.Plugin{},
- &logger.ZapLogger{},
- &server.Plugin{},
- &redis.Plugin{},
- &websockets.Plugin{},
- &httpPlugin.Plugin{},
- &memory.Plugin{},
- &broadcast.Plugin{},
- )
- assert.NoError(t, err)
-
- err = cont.Init()
- if err != nil {
- t.Fatal(err)
- }
-
- ch, err := cont.Serve()
- if err != nil {
- t.Fatal(err)
- }
-
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
-
- wg := &sync.WaitGroup{}
- wg.Add(1)
-
- stopCh := make(chan struct{}, 1)
-
- go func() {
- defer wg.Done()
- for {
- select {
- case e := <-ch:
- assert.Fail(t, "error", e.Error.Error())
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- case <-sig:
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- case <-stopCh:
- // timeout
- err = cont.Stop()
- if err != nil {
- assert.FailNow(t, "error", err.Error())
- }
- return
- }
- }
- }()
-
- time.Sleep(time.Second * 1)
- t.Run("RPCWsMemoryAllow", RPCWsPub("41270"))
-
- stopCh <- struct{}{}
-
- wg.Wait()
-}
-
-func wsInit(t *testing.T) {
- da := websocket.Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: time.Second * 20,
- }
-
- connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:11111", Path: "/ws"}
-
- c, resp, err := da.Dial(connURL.String(), nil)
- assert.NoError(t, err)
-
- defer func() {
- _ = resp.Body.Close()
- }()
-
- d, err := json.Marshal(messageWS("join", []byte("hello websockets"), "foo", "foo2"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- _, msg, err := c.ReadMessage()
- retMsg := utils.AsString(msg)
- assert.NoError(t, err)
-
- // subscription done
- assert.Equal(t, `{"topic":"@join","payload":["foo","foo2"]}`, retMsg)
-
- err = c.WriteControl(websocket.CloseMessage, nil, time.Time{})
- assert.NoError(t, err)
-}
-
-func RPCWsPubAsync(port string) func(t *testing.T) {
- return func(t *testing.T) {
- da := websocket.Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: time.Second * 20,
- }
-
- connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"}
-
- c, resp, err := da.Dial(connURL.String(), nil)
- assert.NoError(t, err)
-
- defer func() {
- if resp != nil && resp.Body != nil {
- _ = resp.Body.Close()
- }
- }()
-
- go func() {
- messagesToVerify := make([]string, 0, 4)
- messagesToVerify = append(messagesToVerify, `{"topic":"@join","payload":["foo","foo2"]}`)
- messagesToVerify = append(messagesToVerify, `{"topic":"foo","payload":"hello, PHP"}`)
- messagesToVerify = append(messagesToVerify, `{"topic":"@leave","payload":["foo"]}`)
- messagesToVerify = append(messagesToVerify, `{"topic":"foo2","payload":"hello, PHP2"}`)
- i := 0
- for {
- _, msg, err2 := c.ReadMessage()
- retMsg := utils.AsString(msg)
- assert.NoError(t, err2)
- assert.Equal(t, messagesToVerify[i], retMsg)
- i++
- if i == 3 {
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
-
- d, err := json.Marshal(messageWS("join", []byte("hello websockets"), "foo", "foo2"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- time.Sleep(time.Second)
-
- publishAsync(t, "foo")
-
- time.Sleep(time.Second)
-
- // //// LEAVE foo /////////
- d, err = json.Marshal(messageWS("leave", []byte("hello websockets"), "foo"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- time.Sleep(time.Second)
-
- // TRY TO PUBLISH TO UNSUBSCRIBED TOPIC
- publishAsync(t, "foo")
-
- go func() {
- time.Sleep(time.Second * 5)
- publishAsync(t, "foo2")
- }()
-
- err = c.WriteControl(websocket.CloseMessage, nil, time.Time{})
- assert.NoError(t, err)
- }
-}
-
-func RPCWsPub(port string) func(t *testing.T) {
- return func(t *testing.T) {
- da := websocket.Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: time.Second * 20,
- }
-
- connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"}
-
- c, resp, err := da.Dial(connURL.String(), nil)
- assert.NoError(t, err)
-
- defer func() {
- if resp != nil && resp.Body != nil {
- _ = resp.Body.Close()
- }
- }()
-
- go func() {
- messagesToVerify := make([]string, 0, 10)
- messagesToVerify = append(messagesToVerify, `{"topic":"@join","payload":["foo","foo2"]}`)
- messagesToVerify = append(messagesToVerify, `{"topic":"foo","payload":"hello, PHP"}`)
- messagesToVerify = append(messagesToVerify, `{"topic":"@leave","payload":["foo"]}`)
- messagesToVerify = append(messagesToVerify, `{"topic":"foo2","payload":"hello, PHP2"}`)
- i := 0
- for {
- _, msg, err2 := c.ReadMessage()
- retMsg := utils.AsString(msg)
- assert.NoError(t, err2)
- assert.Equal(t, messagesToVerify[i], retMsg)
- i++
- if i == 3 {
- return
- }
- }
- }()
-
- time.Sleep(time.Second)
-
- d, err := json.Marshal(messageWS("join", []byte("hello websockets"), "foo", "foo2"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- time.Sleep(time.Second)
-
- publish("", "foo")
-
- time.Sleep(time.Second)
-
- // //// LEAVE foo /////////
- d, err = json.Marshal(messageWS("leave", []byte("hello websockets"), "foo"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- time.Sleep(time.Second)
-
- // TRY TO PUBLISH TO UNSUBSCRIBED TOPIC
- publish("", "foo")
-
- go func() {
- time.Sleep(time.Second * 5)
- publish2(t, "", "foo2")
- }()
-
- err = c.WriteControl(websocket.CloseMessage, nil, time.Time{})
- assert.NoError(t, err)
- }
-}
-
-func RPCWsDeny(port string) func(t *testing.T) {
- return func(t *testing.T) {
- da := websocket.Dialer{
- Proxy: http.ProxyFromEnvironment,
- HandshakeTimeout: time.Second * 20,
- }
-
- connURL := url.URL{Scheme: "ws", Host: "127.0.0.1:" + port, Path: "/ws"}
-
- c, resp, err := da.Dial(connURL.String(), nil)
- assert.NoError(t, err)
- assert.NotNil(t, c)
- assert.Equal(t, http.StatusSwitchingProtocols, resp.StatusCode)
-
- defer func() {
- if resp != nil && resp.Body != nil {
- _ = resp.Body.Close()
- }
- }()
-
- d, err := json.Marshal(messageWS("join", []byte("hello websockets"), "foo", "foo2"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- _, msg, err := c.ReadMessage()
- retMsg := utils.AsString(msg)
- assert.NoError(t, err)
-
- // subscription done
- assert.Equal(t, `{"topic":"#join","payload":["foo","foo2"]}`, retMsg)
-
- // //// LEAVE foo, foo2 /////////
- d, err = json.Marshal(messageWS("leave", []byte("hello websockets"), "foo"))
- if err != nil {
- panic(err)
- }
-
- err = c.WriteMessage(websocket.BinaryMessage, d)
- assert.NoError(t, err)
-
- _, msg, err = c.ReadMessage()
- retMsg = utils.AsString(msg)
- assert.NoError(t, err)
-
- // subscription done
- assert.Equal(t, `{"topic":"@leave","payload":["foo"]}`, retMsg)
-
- err = c.WriteControl(websocket.CloseMessage, nil, time.Time{})
- assert.NoError(t, err)
- }
-}
-
-// ---------------------------------------------------------------------------------------------------
-
-func publish(topics ...string) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- if err != nil {
- panic(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.Publish", makeMessage([]byte("hello, PHP"), topics...), ret)
- if err != nil {
- panic(err)
- }
-}
-
-func publishAsync(t *testing.T, topics ...string) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- if err != nil {
- panic(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.PublishAsync", makeMessage([]byte("hello, PHP"), topics...), ret)
- assert.NoError(t, err)
- assert.True(t, ret.Ok)
-}
-
-func publish2(t *testing.T, topics ...string) {
- conn, err := net.Dial("tcp", "127.0.0.1:6001")
- if err != nil {
- panic(err)
- }
-
- client := rpc.NewClientWithCodec(goridgeRpc.NewClientCodec(conn))
-
- ret := &websocketsv1.Response{}
- err = client.Call("broadcast.Publish", makeMessage([]byte("hello, PHP2"), topics...), ret)
- assert.NoError(t, err)
- assert.True(t, ret.Ok)
-}
-
-func messageWS(command string, payload []byte, topics ...string) *websocketsv1.Message {
- return &websocketsv1.Message{
- Topics: topics,
- Command: command,
- Payload: payload,
- }
-}
-
-func makeMessage(payload []byte, topics ...string) *websocketsv1.Request {
- m := &websocketsv1.Request{
- Messages: []*websocketsv1.Message{
- {
- Topics: topics,
- Payload: payload,
- },
- },
- }
-
- return m
-}
diff --git a/pkg/transport/interface.go b/transport/interface.go
index 1b072378..e20f2b0b 100644
--- a/pkg/transport/interface.go
+++ b/transport/interface.go
@@ -4,8 +4,8 @@ import (
"context"
"os/exec"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/worker"
)
// Factory is responsible for wrapping given command into tasks WorkerProcess.
diff --git a/pkg/transport/pipe/pipe_factory.go b/transport/pipe/pipe_factory.go
index 9433a510..0d46f496 100755
--- a/pkg/transport/pipe/pipe_factory.go
+++ b/transport/pipe/pipe_factory.go
@@ -6,9 +6,9 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/pkg/pipe"
+ "github.com/spiral/roadrunner/v2/events"
"github.com/spiral/roadrunner/v2/internal"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/worker"
"go.uber.org/multierr"
)
diff --git a/pkg/transport/pipe/pipe_factory_spawn_test.go b/transport/pipe/pipe_factory_spawn_test.go
index f5e9669b..45b7aef8 100644
--- a/pkg/transport/pipe/pipe_factory_spawn_test.go
+++ b/transport/pipe/pipe_factory_spawn_test.go
@@ -8,14 +8,14 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/worker"
"github.com/stretchr/testify/assert"
)
func Test_GetState2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
go func() {
@@ -31,7 +31,7 @@ func Test_GetState2(t *testing.T) {
}
func Test_Kill2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
wg := &sync.WaitGroup{}
@@ -54,7 +54,7 @@ func Test_Kill2(t *testing.T) {
}
func Test_Pipe_Start2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
assert.NoError(t, err)
@@ -68,7 +68,7 @@ func Test_Pipe_Start2(t *testing.T) {
}
func Test_Pipe_StartError2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
err := cmd.Start()
if err != nil {
t.Errorf("error running the command: error %v", err)
@@ -80,7 +80,7 @@ func Test_Pipe_StartError2(t *testing.T) {
}
func Test_Pipe_PipeError3(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
_, err := cmd.StdinPipe()
if err != nil {
t.Errorf("error creating the STDIN pipe: error %v", err)
@@ -92,7 +92,7 @@ func Test_Pipe_PipeError3(t *testing.T) {
}
func Test_Pipe_PipeError4(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
_, err := cmd.StdinPipe()
if err != nil {
t.Errorf("error creating the STDIN pipe: error %v", err)
@@ -104,7 +104,7 @@ func Test_Pipe_PipeError4(t *testing.T) {
}
func Test_Pipe_Failboot2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
if ev, ok := event.(events.WorkerEvent); ok {
@@ -123,14 +123,14 @@ func Test_Pipe_Failboot2(t *testing.T) {
}
func Test_Pipe_Invalid2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
w, err := NewPipeFactory().SpawnWorker(cmd)
assert.Error(t, err)
assert.Nil(t, w)
}
func Test_Pipe_Echo2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
if err != nil {
t.Fatal(err)
@@ -155,7 +155,7 @@ func Test_Pipe_Echo2(t *testing.T) {
}
func Test_Pipe_Broken2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
if err != nil {
t.Fatal(err)
@@ -177,7 +177,7 @@ func Test_Pipe_Broken2(t *testing.T) {
func Benchmark_Pipe_SpawnWorker_Stop2(b *testing.B) {
f := NewPipeFactory()
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := f.SpawnWorker(cmd)
go func() {
if w.Wait() != nil {
@@ -193,7 +193,7 @@ func Benchmark_Pipe_SpawnWorker_Stop2(b *testing.B) {
}
func Benchmark_Pipe_Worker_ExecEcho2(b *testing.B) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorker(cmd)
sw := worker.From(w)
@@ -221,7 +221,7 @@ func Benchmark_Pipe_Worker_ExecEcho2(b *testing.B) {
}
func Benchmark_Pipe_Worker_ExecEcho4(b *testing.B) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
if err != nil {
b.Fatal(err)
@@ -244,7 +244,7 @@ func Benchmark_Pipe_Worker_ExecEcho4(b *testing.B) {
}
func Benchmark_Pipe_Worker_ExecEchoWithoutContext2(b *testing.B) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
if err != nil {
b.Fatal(err)
@@ -267,7 +267,7 @@ func Benchmark_Pipe_Worker_ExecEchoWithoutContext2(b *testing.B) {
}
func Test_Echo2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorker(cmd)
if err != nil {
@@ -297,7 +297,7 @@ func Test_Echo2(t *testing.T) {
}
func Test_BadPayload2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorker(cmd)
@@ -322,7 +322,7 @@ func Test_BadPayload2(t *testing.T) {
}
func Test_String2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorker(cmd)
go func() {
@@ -335,13 +335,13 @@ func Test_String2(t *testing.T) {
}
}()
- assert.Contains(t, w.String(), "php ../../../tests/client.php echo pipes")
+ assert.Contains(t, w.String(), "php ../../tests/client.php echo pipes")
assert.Contains(t, w.String(), "ready")
assert.Contains(t, w.String(), "numExecs: 0")
}
func Test_Echo_Slow2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "pipes", "10", "10")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "pipes", "10", "10")
w, _ := NewPipeFactory().SpawnWorker(cmd)
go func() {
@@ -367,7 +367,7 @@ func Test_Echo_Slow2(t *testing.T) {
}
func Test_Broken2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
data := ""
mu := &sync.Mutex{}
listener := func(event interface{}) {
@@ -399,7 +399,7 @@ func Test_Broken2(t *testing.T) {
}
func Test_Error2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "error", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "error", "pipes")
w, _ := NewPipeFactory().SpawnWorker(cmd)
go func() {
@@ -426,7 +426,7 @@ func Test_Error2(t *testing.T) {
}
func Test_NumExecs2(t *testing.T) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorker(cmd)
go func() {
diff --git a/pkg/transport/pipe/pipe_factory_test.go b/transport/pipe/pipe_factory_test.go
index e396fe57..f8198610 100755
--- a/pkg/transport/pipe/pipe_factory_test.go
+++ b/transport/pipe/pipe_factory_test.go
@@ -9,16 +9,16 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/worker"
"github.com/stretchr/testify/assert"
)
func Test_GetState(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
go func() {
@@ -39,7 +39,7 @@ func Test_GetState(t *testing.T) {
func Test_Kill(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
wg := &sync.WaitGroup{}
@@ -64,7 +64,7 @@ func Test_Kill(t *testing.T) {
func Test_Pipe_Start(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
assert.NoError(t, err)
@@ -79,7 +79,7 @@ func Test_Pipe_Start(t *testing.T) {
func Test_Pipe_StartError(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
err := cmd.Start()
if err != nil {
t.Errorf("error running the command: error %v", err)
@@ -93,7 +93,7 @@ func Test_Pipe_StartError(t *testing.T) {
func Test_Pipe_PipeError(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
_, err := cmd.StdinPipe()
if err != nil {
t.Errorf("error creating the STDIN pipe: error %v", err)
@@ -107,7 +107,7 @@ func Test_Pipe_PipeError(t *testing.T) {
func Test_Pipe_PipeError2(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
// error cause
_, err := cmd.StdinPipe()
if err != nil {
@@ -122,7 +122,7 @@ func Test_Pipe_PipeError2(t *testing.T) {
func Test_Pipe_Failboot(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
ctx := context.Background()
finish := make(chan struct{}, 10)
@@ -145,7 +145,7 @@ func Test_Pipe_Failboot(t *testing.T) {
func Test_Pipe_Invalid(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
ctx := context.Background()
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
assert.Error(t, err)
@@ -154,7 +154,7 @@ func Test_Pipe_Invalid(t *testing.T) {
func Test_Pipe_Echo(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
ctx := context.Background()
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -181,7 +181,7 @@ func Test_Pipe_Echo(t *testing.T) {
func Test_Pipe_Broken(t *testing.T) {
t.Parallel()
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
ctx := context.Background()
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -204,7 +204,7 @@ func Test_Pipe_Broken(t *testing.T) {
func Benchmark_Pipe_SpawnWorker_Stop(b *testing.B) {
f := NewPipeFactory()
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := f.SpawnWorkerWithTimeout(context.Background(), cmd)
go func() {
if w.Wait() != nil {
@@ -220,7 +220,7 @@ func Benchmark_Pipe_SpawnWorker_Stop(b *testing.B) {
}
func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorkerWithTimeout(context.Background(), cmd)
sw := worker.From(w)
@@ -248,7 +248,7 @@ func Benchmark_Pipe_Worker_ExecEcho(b *testing.B) {
}
func Benchmark_Pipe_Worker_ExecEcho3(b *testing.B) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
ctx := context.Background()
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -272,7 +272,7 @@ func Benchmark_Pipe_Worker_ExecEcho3(b *testing.B) {
}
func Benchmark_Pipe_Worker_ExecEchoWithoutContext(b *testing.B) {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
ctx := context.Background()
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -298,7 +298,7 @@ func Benchmark_Pipe_Worker_ExecEchoWithoutContext(b *testing.B) {
func Test_Echo(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, err := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -329,7 +329,7 @@ func Test_Echo(t *testing.T) {
func Test_BadPayload(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
@@ -356,7 +356,7 @@ func Test_BadPayload(t *testing.T) {
func Test_String(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
go func() {
@@ -369,7 +369,7 @@ func Test_String(t *testing.T) {
}
}()
- assert.Contains(t, w.String(), "php ../../../tests/client.php echo pipes")
+ assert.Contains(t, w.String(), "php ../../tests/client.php echo pipes")
assert.Contains(t, w.String(), "ready")
assert.Contains(t, w.String(), "numExecs: 0")
}
@@ -377,7 +377,7 @@ func Test_String(t *testing.T) {
func Test_Echo_Slow(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "pipes", "10", "10")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "pipes", "10", "10")
w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
go func() {
@@ -405,7 +405,7 @@ func Test_Echo_Slow(t *testing.T) {
func Test_Broken(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "pipes")
data := ""
mu := &sync.Mutex{}
listener := func(event interface{}) {
@@ -439,7 +439,7 @@ func Test_Broken(t *testing.T) {
func Test_Error(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "error", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "error", "pipes")
w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
go func() {
@@ -468,7 +468,7 @@ func Test_Error(t *testing.T) {
func Test_NumExecs(t *testing.T) {
t.Parallel()
ctx := context.Background()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
w, _ := NewPipeFactory().SpawnWorkerWithTimeout(ctx, cmd)
go func() {
diff --git a/pkg/transport/socket/socket_factory.go b/transport/socket/socket_factory.go
index dc2b75cf..d98ce607 100755
--- a/pkg/transport/socket/socket_factory.go
+++ b/transport/socket/socket_factory.go
@@ -12,9 +12,9 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/pkg/relay"
"github.com/spiral/goridge/v3/pkg/socket"
+ "github.com/spiral/roadrunner/v2/events"
"github.com/spiral/roadrunner/v2/internal"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/worker"
"go.uber.org/multierr"
"golang.org/x/sync/errgroup"
diff --git a/pkg/transport/socket/socket_factory_spawn_test.go b/transport/socket/socket_factory_spawn_test.go
index 905a3b6b..363a3510 100644
--- a/pkg/transport/socket/socket_factory_spawn_test.go
+++ b/transport/socket/socket_factory_spawn_test.go
@@ -9,9 +9,9 @@ import (
"testing"
"time"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/worker"
"github.com/stretchr/testify/assert"
)
@@ -28,7 +28,7 @@ func Test_Tcp_Start2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd)
assert.NoError(t, err)
@@ -51,7 +51,7 @@ func Test_Tcp_StartCloseFactory2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
f := NewSocketServer(ls, time.Minute)
defer func() {
@@ -84,7 +84,7 @@ func Test_Tcp_StartError2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
err = cmd.Start()
if err != nil {
t.Errorf("error executing the command: error %v", err)
@@ -108,7 +108,7 @@ func Test_Tcp_Failboot2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -140,7 +140,7 @@ func Test_Tcp_Invalid2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
w, err := NewSocketServer(ls, time.Second*1).SpawnWorker(cmd)
assert.Error(t, err)
@@ -160,7 +160,7 @@ func Test_Tcp_Broken2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "tcp")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -214,7 +214,7 @@ func Test_Tcp_Echo2(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, _ := NewSocketServer(ls, time.Minute).SpawnWorker(cmd)
go func() {
@@ -247,7 +247,7 @@ func Test_Unix_Start2(t *testing.T) {
assert.NoError(t, err)
}()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd)
assert.NoError(t, err)
@@ -271,7 +271,7 @@ func Test_Unix_Failboot2(t *testing.T) {
assert.NoError(t, err)
}()
- cmd := exec.Command("php", "../../../tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -298,7 +298,7 @@ func Test_Unix_Timeout2(t *testing.T) {
assert.NoError(t, err)
}()
- cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "unix", "200", "0")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "unix", "200", "0")
w, err := NewSocketServer(ls, time.Millisecond*100).SpawnWorker(cmd)
assert.Nil(t, w)
@@ -314,7 +314,7 @@ func Test_Unix_Invalid2(t *testing.T) {
assert.NoError(t, err)
}()
- cmd := exec.Command("php", "../../../tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
w, err := NewSocketServer(ls, time.Second*10).SpawnWorker(cmd)
assert.Error(t, err)
@@ -329,7 +329,7 @@ func Test_Unix_Broken2(t *testing.T) {
assert.NoError(t, errC)
}()
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "unix")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -378,7 +378,7 @@ func Test_Unix_Echo2(t *testing.T) {
assert.NoError(t, err)
}()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd)
if err != nil {
@@ -416,7 +416,7 @@ func Benchmark_Tcp_SpawnWorker_Stop2(b *testing.B) {
f := NewSocketServer(ls, time.Minute)
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, err := f.SpawnWorker(cmd)
if err != nil {
@@ -441,7 +441,7 @@ func Benchmark_Tcp_Worker_ExecEcho2(b *testing.B) {
assert.NoError(b, err)
}()
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd)
if err != nil {
@@ -481,7 +481,7 @@ func Benchmark_Unix_SpawnWorker_Stop2(b *testing.B) {
f := NewSocketServer(ls, time.Minute)
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := f.SpawnWorker(cmd)
if err != nil {
@@ -510,7 +510,7 @@ func Benchmark_Unix_Worker_ExecEcho2(b *testing.B) {
b.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := NewSocketServer(ls, time.Minute).SpawnWorker(cmd)
if err != nil {
diff --git a/pkg/transport/socket/socket_factory_test.go b/transport/socket/socket_factory_test.go
index 17437e2f..879dba8e 100755
--- a/pkg/transport/socket/socket_factory_test.go
+++ b/transport/socket/socket_factory_test.go
@@ -9,9 +9,9 @@ import (
"testing"
"time"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/payload"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/events"
+ "github.com/spiral/roadrunner/v2/payload"
+ "github.com/spiral/roadrunner/v2/worker"
"github.com/stretchr/testify/assert"
)
@@ -31,7 +31,7 @@ func Test_Tcp_Start(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
assert.NoError(t, err)
@@ -56,7 +56,7 @@ func Test_Tcp_StartCloseFactory(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
f := NewSocketServer(ls, time.Minute)
defer func() {
@@ -93,7 +93,7 @@ func Test_Tcp_StartError(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "pipes")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "pipes")
err = cmd.Start()
if err != nil {
t.Errorf("error executing the command: error %v", err)
@@ -122,7 +122,7 @@ func Test_Tcp_Failboot(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -156,7 +156,7 @@ func Test_Tcp_Timeout(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "tcp", "200", "0")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "tcp", "200", "0")
w, err := NewSocketServer(ls, time.Millisecond*1).SpawnWorkerWithTimeout(ctx, cmd)
assert.Nil(t, w)
@@ -179,7 +179,7 @@ func Test_Tcp_Invalid(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
w, err := NewSocketServer(ls, time.Second*1).SpawnWorkerWithTimeout(ctx, cmd)
assert.Error(t, err)
@@ -201,7 +201,7 @@ func Test_Tcp_Broken(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "tcp")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -257,7 +257,7 @@ func Test_Tcp_Echo(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, _ := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
go func() {
@@ -296,7 +296,7 @@ func Test_Unix_Start(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
assert.NoError(t, err)
@@ -326,7 +326,7 @@ func Test_Unix_Failboot(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/failboot.php")
+ cmd := exec.Command("php", "../../tests/failboot.php")
finish := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -359,7 +359,7 @@ func Test_Unix_Timeout(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/slow-client.php", "echo", "unix", "200", "0")
+ cmd := exec.Command("php", "../../tests/slow-client.php", "echo", "unix", "200", "0")
w, err := NewSocketServer(ls, time.Millisecond*100).SpawnWorkerWithTimeout(ctx, cmd)
assert.Nil(t, w)
@@ -381,7 +381,7 @@ func Test_Unix_Invalid(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/invalid.php")
+ cmd := exec.Command("php", "../../tests/invalid.php")
w, err := NewSocketServer(ls, time.Second*10).SpawnWorkerWithTimeout(ctx, cmd)
assert.Error(t, err)
@@ -402,7 +402,7 @@ func Test_Unix_Broken(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "broken", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "broken", "unix")
block := make(chan struct{}, 10)
listener := func(event interface{}) {
@@ -459,7 +459,7 @@ func Test_Unix_Echo(t *testing.T) {
t.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -503,7 +503,7 @@ func Benchmark_Tcp_SpawnWorker_Stop(b *testing.B) {
f := NewSocketServer(ls, time.Minute)
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, err := f.SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -534,7 +534,7 @@ func Benchmark_Tcp_Worker_ExecEcho(b *testing.B) {
b.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "tcp")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "tcp")
w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -572,7 +572,7 @@ func Benchmark_Unix_SpawnWorker_Stop(b *testing.B) {
f := NewSocketServer(ls, time.Minute)
for n := 0; n < b.N; n++ {
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := f.SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
@@ -599,7 +599,7 @@ func Benchmark_Unix_Worker_ExecEcho(b *testing.B) {
b.Skip("socket is busy")
}
- cmd := exec.Command("php", "../../../tests/client.php", "echo", "unix")
+ cmd := exec.Command("php", "../../tests/client.php", "echo", "unix")
w, err := NewSocketServer(ls, time.Minute).SpawnWorkerWithTimeout(ctx, cmd)
if err != nil {
diff --git a/pkg/worker/interface.go b/worker/interface.go
index ed8704bb..25e98f0a 100644
--- a/pkg/worker/interface.go
+++ b/worker/interface.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/spiral/goridge/v3/pkg/relay"
- "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/payload"
)
// State represents WorkerProcess status and updated time.
diff --git a/pkg/worker/state.go b/worker/state.go
index bf152e8b..bf152e8b 100755
--- a/pkg/worker/state.go
+++ b/worker/state.go
diff --git a/pkg/worker/state_test.go b/worker/state_test.go
index c67182d6..c67182d6 100755
--- a/pkg/worker/state_test.go
+++ b/worker/state_test.go
diff --git a/pkg/worker/sync_worker.go b/worker/sync_worker.go
index 74e29b71..deea8cb1 100755
--- a/pkg/worker/sync_worker.go
+++ b/worker/sync_worker.go
@@ -9,7 +9,7 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/pkg/frame"
"github.com/spiral/goridge/v3/pkg/relay"
- "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/payload"
"go.uber.org/multierr"
)
diff --git a/pkg/worker/sync_worker_test.go b/worker/sync_worker_test.go
index 64580f9f..41c0c92b 100755
--- a/pkg/worker/sync_worker_test.go
+++ b/worker/sync_worker_test.go
@@ -4,7 +4,7 @@ import (
"os/exec"
"testing"
- "github.com/spiral/roadrunner/v2/pkg/payload"
+ "github.com/spiral/roadrunner/v2/payload"
"github.com/stretchr/testify/assert"
)
diff --git a/pkg/worker/worker.go b/worker/worker.go
index fa74e7b5..38a1e9ac 100755
--- a/pkg/worker/worker.go
+++ b/worker/worker.go
@@ -10,8 +10,8 @@ import (
"github.com/spiral/errors"
"github.com/spiral/goridge/v3/pkg/relay"
+ "github.com/spiral/roadrunner/v2/events"
"github.com/spiral/roadrunner/v2/internal"
- "github.com/spiral/roadrunner/v2/pkg/events"
"go.uber.org/multierr"
)
diff --git a/pkg/worker/worker_test.go b/worker/worker_test.go
index 805f66b5..805f66b5 100755
--- a/pkg/worker/worker_test.go
+++ b/worker/worker_test.go
diff --git a/pkg/worker_watcher/container/channel/vec.go b/worker_watcher/container/channel/vec.go
index 5605f1e0..fd50c8d1 100644
--- a/pkg/worker_watcher/container/channel/vec.go
+++ b/worker_watcher/container/channel/vec.go
@@ -6,7 +6,7 @@ import (
"sync/atomic"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/worker"
)
type Vec struct {
diff --git a/pkg/worker_watcher/container/queue/queue.go b/worker_watcher/container/queue/queue.go
index edf81d60..a274a836 100644
--- a/pkg/worker_watcher/container/queue/queue.go
+++ b/worker_watcher/container/queue/queue.go
@@ -5,7 +5,7 @@ import (
"sync"
"sync/atomic"
- "github.com/spiral/roadrunner/v2/pkg/worker"
+ "github.com/spiral/roadrunner/v2/worker"
)
const (
diff --git a/pkg/worker_watcher/worker_watcher.go b/worker_watcher/worker_watcher.go
index 83f8e627..78bae778 100755
--- a/pkg/worker_watcher/worker_watcher.go
+++ b/worker_watcher/worker_watcher.go
@@ -7,10 +7,10 @@ import (
"time"
"github.com/spiral/errors"
- "github.com/spiral/roadrunner/v2/pkg/events"
- "github.com/spiral/roadrunner/v2/pkg/worker"
- "github.com/spiral/roadrunner/v2/pkg/worker_watcher/container/channel"
+ "github.com/spiral/roadrunner/v2/events"
"github.com/spiral/roadrunner/v2/utils"
+ "github.com/spiral/roadrunner/v2/worker"
+ "github.com/spiral/roadrunner/v2/worker_watcher/container/channel"
)
// Vector interface represents vector container
@@ -300,8 +300,8 @@ func (ww *workerWatcher) wait(w worker.BaseProcess) {
err = ww.Allocate()
if err != nil {
ww.events.Push(events.PoolEvent{
- Event: events.EventPoolError,
- Payload: errors.E(op, err),
+ Event: events.EventWorkerProcessExit,
+ Error: errors.E(op, err),
})
// no workers at all, panic