summaryrefslogtreecommitdiff
path: root/vendor/tower
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-15 16:37:08 -0600
committermo khan <mo@mokhan.ca>2025-07-17 16:30:22 -0600
commit45df4d0d9b577fecee798d672695fe24ff57fb1b (patch)
tree1b99bf645035b58e0d6db08c7a83521f41f7a75b /vendor/tower
parentf94f79608393d4ab127db63cc41668445ef6b243 (diff)
feat: migrate from Cedar to SpiceDB authorization system
This is a major architectural change that replaces the Cedar policy-based authorization system with SpiceDB's relation-based authorization. Key changes: - Migrate from Rust to Go implementation - Replace Cedar policies with SpiceDB schema and relationships - Switch from envoy `ext_authz` with Cedar to SpiceDB permission checks - Update build system and dependencies for Go ecosystem - Maintain Envoy integration for external authorization This change enables more flexible permission modeling through SpiceDB's Google Zanzibar inspired relation-based system, supporting complex hierarchical permissions that were difficult to express in Cedar. Breaking change: Existing Cedar policies and Rust-based configuration will no longer work and need to be migrated to SpiceDB schema.
Diffstat (limited to 'vendor/tower')
-rw-r--r--vendor/tower/.cargo-checksum.json1
-rw-r--r--vendor/tower/CHANGELOG.md444
-rw-r--r--vendor/tower/Cargo.lock760
-rw-r--r--vendor/tower/Cargo.toml328
-rw-r--r--vendor/tower/LICENSE25
-rw-r--r--vendor/tower/README.md187
-rw-r--r--vendor/tower/examples/tower-balance.rs235
-rw-r--r--vendor/tower/src/balance/error.rs21
-rw-r--r--vendor/tower/src/balance/mod.rs50
-rw-r--r--vendor/tower/src/balance/p2c/layer.rs60
-rw-r--r--vendor/tower/src/balance/p2c/make.rs125
-rw-r--r--vendor/tower/src/balance/p2c/mod.rs41
-rw-r--r--vendor/tower/src/balance/p2c/service.rs259
-rw-r--r--vendor/tower/src/balance/p2c/test.rs125
-rw-r--r--vendor/tower/src/buffer/error.rs68
-rw-r--r--vendor/tower/src/buffer/future.rs79
-rw-r--r--vendor/tower/src/buffer/layer.rs75
-rw-r--r--vendor/tower/src/buffer/message.rs16
-rw-r--r--vendor/tower/src/buffer/mod.rs47
-rw-r--r--vendor/tower/src/buffer/service.rs144
-rw-r--r--vendor/tower/src/buffer/worker.rs227
-rw-r--r--vendor/tower/src/builder/mod.rs809
-rw-r--r--vendor/tower/src/discover/list.rs62
-rw-r--r--vendor/tower/src/discover/mod.rs106
-rw-r--r--vendor/tower/src/filter/future.rs98
-rw-r--r--vendor/tower/src/filter/layer.rs72
-rw-r--r--vendor/tower/src/filter/mod.rs191
-rw-r--r--vendor/tower/src/filter/predicate.rs65
-rw-r--r--vendor/tower/src/hedge/delay.rs126
-rw-r--r--vendor/tower/src/hedge/latency.rs89
-rw-r--r--vendor/tower/src/hedge/mod.rs266
-rw-r--r--vendor/tower/src/hedge/rotating_histogram.rs73
-rw-r--r--vendor/tower/src/hedge/select.rs105
-rw-r--r--vendor/tower/src/layer.rs14
-rw-r--r--vendor/tower/src/lib.rs228
-rw-r--r--vendor/tower/src/limit/concurrency/future.rs41
-rw-r--r--vendor/tower/src/limit/concurrency/layer.rs60
-rw-r--r--vendor/tower/src/limit/concurrency/mod.rs10
-rw-r--r--vendor/tower/src/limit/concurrency/service.rs118
-rw-r--r--vendor/tower/src/limit/mod.rs9
-rw-r--r--vendor/tower/src/limit/rate/layer.rs26
-rw-r--r--vendor/tower/src/limit/rate/mod.rs8
-rw-r--r--vendor/tower/src/limit/rate/rate.rs30
-rw-r--r--vendor/tower/src/limit/rate/service.rs130
-rw-r--r--vendor/tower/src/load/completion.rs95
-rw-r--r--vendor/tower/src/load/constant.rs79
-rw-r--r--vendor/tower/src/load/mod.rs89
-rw-r--r--vendor/tower/src/load/peak_ewma.rs406
-rw-r--r--vendor/tower/src/load/pending_requests.rs217
-rw-r--r--vendor/tower/src/load_shed/error.rs34
-rw-r--r--vendor/tower/src/load_shed/future.rs73
-rw-r--r--vendor/tower/src/load_shed/layer.rs33
-rw-r--r--vendor/tower/src/load_shed/mod.rs76
-rw-r--r--vendor/tower/src/macros.rs42
-rw-r--r--vendor/tower/src/make/make_connection.rs47
-rw-r--r--vendor/tower/src/make/make_service.rs251
-rw-r--r--vendor/tower/src/make/make_service/shared.rs146
-rw-r--r--vendor/tower/src/make/mod.rs14
-rw-r--r--vendor/tower/src/ready_cache/cache.rs503
-rw-r--r--vendor/tower/src/ready_cache/error.rs28
-rw-r--r--vendor/tower/src/ready_cache/mod.rs6
-rw-r--r--vendor/tower/src/reconnect/future.rs73
-rw-r--r--vendor/tower/src/reconnect/mod.rs171
-rw-r--r--vendor/tower/src/retry/backoff.rs263
-rw-r--r--vendor/tower/src/retry/budget/mod.rs91
-rw-r--r--vendor/tower/src/retry/budget/tps_budget.rs260
-rw-r--r--vendor/tower/src/retry/future.rs120
-rw-r--r--vendor/tower/src/retry/layer.rs27
-rw-r--r--vendor/tower/src/retry/mod.rs94
-rw-r--r--vendor/tower/src/retry/policy.rs94
-rw-r--r--vendor/tower/src/spawn_ready/future.rs8
-rw-r--r--vendor/tower/src/spawn_ready/layer.rs18
-rw-r--r--vendor/tower/src/spawn_ready/mod.rs9
-rw-r--r--vendor/tower/src/spawn_ready/service.rs88
-rw-r--r--vendor/tower/src/steer/mod.rs204
-rw-r--r--vendor/tower/src/timeout/error.rs22
-rw-r--r--vendor/tower/src/timeout/future.rs53
-rw-r--r--vendor/tower/src/timeout/layer.rs24
-rw-r--r--vendor/tower/src/timeout/mod.rs70
-rw-r--r--vendor/tower/src/util/and_then.rs130
-rw-r--r--vendor/tower/src/util/boxed/layer.rs97
-rw-r--r--vendor/tower/src/util/boxed/layer_clone.rs128
-rw-r--r--vendor/tower/src/util/boxed/layer_clone_sync.rs129
-rw-r--r--vendor/tower/src/util/boxed/mod.rs11
-rw-r--r--vendor/tower/src/util/boxed/sync.rs111
-rw-r--r--vendor/tower/src/util/boxed/unsync.rs86
-rw-r--r--vendor/tower/src/util/boxed_clone.rs136
-rw-r--r--vendor/tower/src/util/boxed_clone_sync.rs101
-rw-r--r--vendor/tower/src/util/call_all/common.rs141
-rw-r--r--vendor/tower/src/util/call_all/mod.rs11
-rw-r--r--vendor/tower/src/util/call_all/ordered.rs177
-rw-r--r--vendor/tower/src/util/call_all/unordered.rs98
-rw-r--r--vendor/tower/src/util/either.rs103
-rw-r--r--vendor/tower/src/util/future_service.rs215
-rw-r--r--vendor/tower/src/util/map_err.rs98
-rw-r--r--vendor/tower/src/util/map_future.rs113
-rw-r--r--vendor/tower/src/util/map_request.rs90
-rw-r--r--vendor/tower/src/util/map_response.rs98
-rw-r--r--vendor/tower/src/util/map_result.rs99
-rw-r--r--vendor/tower/src/util/mod.rs1073
-rw-r--r--vendor/tower/src/util/oneshot.rs105
-rw-r--r--vendor/tower/src/util/optional/error.rs21
-rw-r--r--vendor/tower/src/util/optional/future.rs40
-rw-r--r--vendor/tower/src/util/optional/mod.rs59
-rw-r--r--vendor/tower/src/util/ready.rs103
-rw-r--r--vendor/tower/src/util/rng.rs181
-rw-r--r--vendor/tower/src/util/service_fn.rs82
-rw-r--r--vendor/tower/src/util/then.rs103
-rw-r--r--vendor/tower/tests/balance/main.rs170
-rw-r--r--vendor/tower/tests/buffer/main.rs459
-rw-r--r--vendor/tower/tests/builder.rs55
-rw-r--r--vendor/tower/tests/filter/async_filter.rs63
-rw-r--r--vendor/tower/tests/hedge/main.rs184
-rw-r--r--vendor/tower/tests/limit/concurrency.rs217
-rw-r--r--vendor/tower/tests/limit/main.rs5
-rw-r--r--vendor/tower/tests/limit/rate.rs71
-rw-r--r--vendor/tower/tests/load_shed/main.rs39
-rw-r--r--vendor/tower/tests/ready_cache/main.rs223
-rw-r--r--vendor/tower/tests/retry/main.rs226
-rw-r--r--vendor/tower/tests/spawn_ready/main.rs86
-rw-r--r--vendor/tower/tests/steer/main.rs59
-rw-r--r--vendor/tower/tests/support.rs112
-rw-r--r--vendor/tower/tests/util/call_all.rs253
-rw-r--r--vendor/tower/tests/util/main.rs8
-rw-r--r--vendor/tower/tests/util/oneshot.rs40
-rw-r--r--vendor/tower/tests/util/service_fn.rs12
126 files changed, 0 insertions, 16102 deletions
diff --git a/vendor/tower/.cargo-checksum.json b/vendor/tower/.cargo-checksum.json
deleted file mode 100644
index 8a9113f4..00000000
--- a/vendor/tower/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{"CHANGELOG.md":"8a03595505c62fe9db4f7ab459ae5743c75ccf1bb62896b4e6dbb0dccff7d54a","Cargo.lock":"5e28168b3c1cc2f2d8c09b4b8c22b5fb6fc3171d46063e869a950ccc3789b74b","Cargo.toml":"2d36de2fc4c565be880c0b43c1c8eeb2d533ec18a34004f6b43df84cf9317b96","LICENSE":"4249c8e6c5ebb85f97c77e6457c6fafc1066406eb8f1ef61e796fbdc5ff18482","README.md":"f8cd5239719594b9157fce747eb09255f848017aabed2848d210aa377b39ca98","examples/tower-balance.rs":"f99cbf31de892c5f7f7a3aa10671a90d994c9e4de91ae06fbe74d7cb2e575471","src/balance/error.rs":"2ee79fc7ed868781bb4b3ef12c9464772a246be3841befa3fa90e030f60531b1","src/balance/mod.rs":"73b8f0d5377edfbfdecbe828450e1a04003869cf08e1d1c0106616125638b4e6","src/balance/p2c/layer.rs":"dad25ecf6797186c451e219592e98d1ecb5cac4c6e806823b5065e488211195e","src/balance/p2c/make.rs":"b0a601cef73c5c38df9d7e9a313bb632fe74cf25b586583f254e92507cb98086","src/balance/p2c/mod.rs":"5b9faa387586d742e74fa65679f42895ad4c1f42ad28e8bb8df9525dee0998d9","src/balance/p2c/service.rs":"bfecc9bdcda0c4ca614554aaaec3402690792edc78a4418a0af4f65b52c8f31b","src/balance/p2c/test.rs":"7351bd8528b713352d7195d41ad7840e655d106c72a863dc428ddee4f5ec5261","src/buffer/error.rs":"cb32f3412eaa5c6317d3b1a2cb7d23416ba74e6b08967229699a9c62fe201c36","src/buffer/future.rs":"86825db0749a56d8590d572271814fb9b3ee068809d54efb79a765879157fe5a","src/buffer/layer.rs":"550a80ec01eadd54391e673fa521dd2b52e63a2ea94bb71e2efddef1b882d76d","src/buffer/message.rs":"2f0b52f5757c57b667b8537ad7da77dda1fa84f4b305cb5ff8e7950bcf2adb2a","src/buffer/mod.rs":"7708b45c4e67800d61384f954596bb9ad71e5517c94d1227097c9ec2073977e2","src/buffer/service.rs":"2568d32f8bb1038a5c06558971e8e81ecafdc326eef490515f0b83f7cc4efd7d","src/buffer/worker.rs":"64656f1f18433dd80d99669352acec14fecd43cb8ffa26c8f9df21f42a632ebf","src/builder/mod.rs":"f252cb52fbc9bf930313f5b8e01be3f9330a9420671d8fbac537b9ec178b6208","src/discover/list.rs":"49285478e09e8549a51ef0649353689c7105a94bb6a2d01c8920f8f5e6011fa2","src/discover/mod.rs":"fc9e05ab1c1053dcc954bc4a201f1e140c1840101108e511cf79f53d59f168e7","src/filter/future.rs":"4fc71c6e085ee4f8e2d52bf5628cdbbb6015f4bcb9e0a6aed3283ec165cd523b","src/filter/layer.rs":"a4939b4e25049fcf98d41a999750a01245e41fa9e04267354f5036604d8d8000","src/filter/mod.rs":"548708798210b61fd8167180533219dfd052678d0629350f55d3052e1c72eba1","src/filter/predicate.rs":"633d4374ad50c130d09fa549f11e4db8e1d72dbb1997c3e7cd9dd09dde232844","src/hedge/delay.rs":"cc501cf4070dbc3e6bb7d433acc3349aaa49b0bbb3a0eae3b469942db8bbb0fc","src/hedge/latency.rs":"11dca1117450c2e2859fe7d68f52b9d4502fc55ad15414a076260592cd3c49bb","src/hedge/mod.rs":"614c301e93b748031bc1836eccd44f16f4998710459f126d8bf6ee8b462e8034","src/hedge/rotating_histogram.rs":"f075966185592451d2f6784fdecf3109086b168f7a986c35fad52780be3e48a9","src/hedge/select.rs":"6fcbca8c90c999e12a87e7fdbd691b92c31b9c30f249d3db62bea4586ec0caa7","src/layer.rs":"0c422e476b83ea6adc9e6f96728aafcd52f6ac6a4457697e96bc228eec0e59b1","src/lib.rs":"787924ce11bf5ce940c75feb8b158ab74b9a949b2f1c9a2cc5acb115fbab5141","src/limit/concurrency/future.rs":"0c3be7829f7cf0ea57c6277193754e4f8b869f5a4e3e46b1a5c2ffe41ddb812f","src/limit/concurrency/layer.rs":"7b8862fd59dce5a0b9a16175b7d6516974a9dd2ac4e4543e633655a528045996","src/limit/concurrency/mod.rs":"99e5e2bf0273fae172f893d89a93bb3768dc0522b09ae94c5d0ab6492d460391","src/limit/concurrency/service.rs":"daf29dbe8f10399a5df6e2bd06357b861a7969d7f5817cf6495e9e9fee51e350","src/limit/mod.rs":"59565acc1d14a3500069ba2dc9667e7fc03162cc89db4051204d7d38b5b69ccb","src/limit/rate/layer.rs":"49b612e4d70c01e034f46cf21abf6d42a50ccd838b8a606c41294ae8e132316d","src/limit/rate/mod.rs":"6414465408a5d9c06789f586ddc2f40f8e59f3a1eb19cec5e37df82095012cb2","src/limit/rate/rate.rs":"af8c6efdf0dd1911e0bf5c4af0e4ae49dc5a45d30abfef9a606c2a1664081ab7","src/limit/rate/service.rs":"77785af51753f0d548e4367860b922482dbbf9cb33ad791489277ac59c96d90e","src/load/completion.rs":"7af27f4112046b0c7904751820187fb71dda9ccc1c2968e7ede5dc37152774e4","src/load/constant.rs":"87f891021018e55ab37389bbe5d4c18b3d9b97ff557fbec26639f1c80bfce4c7","src/load/mod.rs":"1e9b137c3b0de832a7439afe596ed66cd5023ee12f92bbdf41c79f81596ac462","src/load/peak_ewma.rs":"8c83d741be781a3c508a2d857b8166daab5a5c74b283f6e1c73d5fb5b86041b9","src/load/pending_requests.rs":"93c6595cd206e587f142119cef64427a8ae32eb3cdbe053dbe92b441d6cd5a10","src/load_shed/error.rs":"5cde276670858be38b84269a3833b140bd690680a30b52518c7218094d9c5184","src/load_shed/future.rs":"999c3080c947746dff8623cec30a34b8af9d46d73192dfe4be821b0e418a0e89","src/load_shed/layer.rs":"48c7a9973132740aa894bade8d9a53624fad60c40b83b523860f97ed83a4496e","src/load_shed/mod.rs":"919a6b369d2e32265e287994da6c99716c4c53263e25dd44c2e8ac7d68a36604","src/macros.rs":"5910f539cd05afa93bfeaa2c7b5b4c20714d4df06f052a699ce3d9dcc89131ac","src/make/make_connection.rs":"d30788e3406d2e7dbe218f9ea40aaec6e5763e635f3df998677115c2e768e21a","src/make/make_service.rs":"eba1e9615416d9b2f019d87dfc82c4c7a8245aa06722bed1391f62664c8db35a","src/make/make_service/shared.rs":"99f83a50621cf823c4beaec82bdd72af894f75dec973dcd2303556d58d9f3e9f","src/make/mod.rs":"defd871542f92bea3be28e83c766e31644c007a584b824bdb59944cf5ece3d0d","src/ready_cache/cache.rs":"be01ac0b970949015213d6d3a2a06f98c8326546319f81441a1e52b964312dfe","src/ready_cache/error.rs":"385425991f9801eecd4206e94b46f1ec5ed7eb47a44aadb715515faa960052b2","src/ready_cache/mod.rs":"987076ba544d8d321229e5faf65d1142663bf02c391eb1df73c7881b6f1ebdae","src/reconnect/future.rs":"8c5306a38e84513f95c7e6165acc0bfccf71532f1f4826543b21bb288ff567f9","src/reconnect/mod.rs":"b76c7358e3c6aeb59e132af4d43f0a860898ef60e0ccc3ce04ae25a4d21ed75f","src/retry/backoff.rs":"4e44dff77fe29ac662aac237966a53b23e959a9c740367ee38b628fa410ea2f4","src/retry/budget/mod.rs":"eead7c9201ff6de3c023ee43c6d979c189491c43d0e65bbc68bbd8a7b62b7137","src/retry/budget/tps_budget.rs":"86fc1d1e5b4f1f4e6ce4b3cea04f8ea09449a51057e6617755adaa0cd4fd786a","src/retry/future.rs":"c029d3898c55ef89a22b3ea79a5f34f3ec19be43eb9670349872c9af197bf7e4","src/retry/layer.rs":"9b99d1f195cf26f92e1a1358aed5575d7b1419155be9dd9b4b36520455a5c3f3","src/retry/mod.rs":"b1870f11b0cd4a3c563f6f808ccffe631f97adc72f0f8a0565116548882ad015","src/retry/policy.rs":"cf1ac847e1500696b366ed31fdb47d5fcf7e721074967f6ebedb43b7e80d1170","src/spawn_ready/future.rs":"0fbcfdcb51faaed676f38bc26355c1c77e7da1fd99fd48718eb9682b3b085960","src/spawn_ready/layer.rs":"9984776ae6b8a56b60dd844b0fe3f0a74ec3a4644c28201b3d10e8a67ebcb8ed","src/spawn_ready/mod.rs":"6e7d781856d451e5195bda913378691858d966cd8724ade3e0b183c193268185","src/spawn_ready/service.rs":"654badbaf5315fb9eca3b919fd23fe26c37e2f299c28e59bf978083368e97642","src/steer/mod.rs":"9336d2503bf3353c7aab165c9ce04b1146936d982546e0ea168fab2d44891f4e","src/timeout/error.rs":"b51cedc818cd99394bcdd2aae857580c4157107ae9099ecd98e3a8ec757fc7b6","src/timeout/future.rs":"c95840418f9409ab807c9658248f043a637795417bf6fce3c34cd3c6ca2eb50c","src/timeout/layer.rs":"c3eb08eb639d586a4387347d66e2c614b21439e9910adfb7cdc8bb3accb7978d","src/timeout/mod.rs":"9759595d35c7545287642c5c90d0eb3460f6631d890df8c7b947c6a82e94a428","src/util/and_then.rs":"756e617a8bf1beead2c97e798cb3d867019f89cc7040cb66e7a267acb559e452","src/util/boxed/layer.rs":"b04a61d416697a33cebfa0613a1bbaab39f1e006c40318abc766a7c359ecbc7d","src/util/boxed/layer_clone.rs":"ce60dc639f37ef4982a1327832d073ee3244f9db786ca7f6293be318bbc1f791","src/util/boxed/layer_clone_sync.rs":"e62f05f2a44b55b56159233cb7cf69abac5b31585a6882c7cebf95285c1d03e0","src/util/boxed/mod.rs":"66e47f27533c090c2ef20eea42f3a84a02648c847fdaf147266bdfbe4f3789f0","src/util/boxed/sync.rs":"b57b658f36ec03a434e25a86dccb17ff3f9adc4ef4a7db15b963d81cbb623dd1","src/util/boxed/unsync.rs":"8a1880bd5d06544fe14ee55b88891061afba1eb5e95ee1c2c174a179928f1f3b","src/util/boxed_clone.rs":"fda370fe8d2f0dd5671534b3f4c66fbe0357cf11427a327eea0eff64bbe112ee","src/util/boxed_clone_sync.rs":"39a71eb2019ca468acef3116bbf1f9103297e1bc55c2267624452f861e16d9e7","src/util/call_all/common.rs":"5c7fee79aeb1d06e802e3441dcc323794fec8c13c0b15f4ffc082e2fc331f604","src/util/call_all/mod.rs":"30ad048636f4efb770a2d2dc3e5830827125fed132aebe741429fe766a6a38b1","src/util/call_all/ordered.rs":"b6f9c1d859ad97d2c0fc1bcb2bc7140a1c309b70827d78223ad5947041b3d077","src/util/call_all/unordered.rs":"4f41c1d16640182e20490dfbc99bfc2d84dfc6c01b70cf1fd077bd85387dc884","src/util/either.rs":"ec87f9dd219de3e29442b9f2276e65c80b6715a1ab8fdb7931d092f5df94c49e","src/util/future_service.rs":"24ffca71db28db861d57bd6d2482554492ea8db2bc795598c4d19829bc085ddc","src/util/map_err.rs":"605fd4f2c8ab2c4c40a526c29735ba6368d6a190ba07438c8725d959fbe4ed4c","src/util/map_future.rs":"ae6ca994de359ef5d268f4e3ad7a4637c9071e8b184a3b2d3dc7a45fe8589947","src/util/map_request.rs":"f6bcb735b1e9e79df1ef9605b7af647d11ba5c098445569bde97ba1cc935b929","src/util/map_response.rs":"06a3dbc522dbcfaf996155600d13309fb4d0a0ea7328d60bd77d845f19fa3267","src/util/map_result.rs":"5390b2177a21fec90cb8c0e0dcd93a2fa1fca5977292d65bdb9997312c7f265a","src/util/mod.rs":"7e921108e2e23063ebb5da1cc1da71156de9fad691c9a3f71e932112d9d6108b","src/util/oneshot.rs":"5e157ca0d1d22a92cfac18e632e2dfd5761319a55cd2cb0705d26406fc9eda50","src/util/optional/error.rs":"984180fcdac724c1dd8ce7bc7d35f1185555b5c407e025ea99a51bfa9ea506c2","src/util/optional/future.rs":"4ca99dbc2ab0eb39b922f54d6e159dce1922e4623a57762cd827a48fc400a77e","src/util/optional/mod.rs":"14446dff77502412d8de6654bcde0db1ce11ff95ba9ad7cd8a44128d52dd987d","src/util/ready.rs":"ed878448e46ddc9bfc4583ee0b14840325c99aadbe89abbc66122b550d481fbf","src/util/rng.rs":"585a8d731e3d2e0e2122ae70b8d2f0953890b0cfe1bb2586e183bba9c3f28326","src/util/service_fn.rs":"149dd5ef61af4a0ae8a55e28534f829a754dd0fa9604e01135143dcde7ed7c37","src/util/then.rs":"751f6e01f454ed894c935c46da3fcf77cc880b03d4c3d258f30aaa82c64dee83","tests/balance/main.rs":"fb527f3a6a0409c59d9c622d46d807adf1f70f6e85a574aa70d2b20e873f9f68","tests/buffer/main.rs":"2394802778c1f0f10b9e2a385864967fc268a3149f17eb620de1de9371d78f66","tests/builder.rs":"74fd26bea2162b2e3f3c99281ce09a0b5351a61e7c5563c0de455fa165feb231","tests/filter/async_filter.rs":"8748a06916f42859d9e07883ba09f1141362a8fd2f7c92380c1f6f0355fc6c98","tests/hedge/main.rs":"e3ca75823f8132ee2dbea21d68e5990c8523df06414bf93af16d78009d4a720f","tests/limit/concurrency.rs":"20b06a1c60f2b4aab764d90a91def14b094ec6d67deefc9529461cd999d343d5","tests/limit/main.rs":"039a2731c8fce846b71d98937da2a08cea71a6a0ed2ebe0f20d215f33cdec66e","tests/limit/rate.rs":"52cfca0f27b6081e3ad19ceace931628a4eaf668274f963e15d80e1ea7aec380","tests/load_shed/main.rs":"c1d82eb9b30a4b1f0517b48cce9e87c9fb75c8d055c5b537f125d89225f65e95","tests/ready_cache/main.rs":"7bc01d338c1888851303a07858acf9033d52b1c64f372a61201ac55122820640","tests/retry/main.rs":"25f3eee42700c852bd480557465a0a89d7f8340d005be31d6806eebed7bb736c","tests/spawn_ready/main.rs":"61230d0e5e412bfa7203142f4164cfb87402662e9cfd3de9ccf24f7edf911d16","tests/steer/main.rs":"979b3492545f4ecf71d65898a6c0c97d5f122ef6104e6a4e4820031fa10acff8","tests/support.rs":"8e6e32f9e0959b51096e38c98a848f10b2eedb9314335005b734f6a7e65ad11a","tests/util/call_all.rs":"15aaa16d2fe737ad1eb064660c25e2933adaf709fa3e959b5d3c458bf62bd193","tests/util/main.rs":"a8b7960492d409914041a358c8eda79c10462baaf6548757fe05285a43e7f4a8","tests/util/oneshot.rs":"e4f013e5e3d2f2dc25d43337a8dd50d45ba835af74883908b67cab3d05380795","tests/util/service_fn.rs":"302962b6723804b415bf079717809dffebc455595567d865578c8701f2c0941f"},"package":"d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"} \ No newline at end of file
diff --git a/vendor/tower/CHANGELOG.md b/vendor/tower/CHANGELOG.md
deleted file mode 100644
index fe82bfb5..00000000
--- a/vendor/tower/CHANGELOG.md
+++ /dev/null
@@ -1,444 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-# 0.5.2
-
-### Added
-
-- **util**: Add `BoxCloneSyncService` which is a `Clone + Send + Sync` boxed `Service` ([#777])
-- **util**: Add `BoxCloneSyncServiceLayer` which is a `Clone + Send + Sync` boxed `Layer` ([802])
-
-# 0.5.1
-
-### Fixed
-
-- Fix minimum version of `tower-layer` dependency ([#787])
-
-[#787]: https://github.com/tower-rs/tower/pull/787
-
-# 0.5.0
-
-### Fixed
-
-- **util**: `BoxService` is now `Sync` ([#702])
-
-### Changed
-
-- **util**: Removed deprecated `ServiceExt::ready_and` method and `ReadyAnd`
- future ([#652])
-- **retry**: **Breaking Change** `retry::Policy::retry` now accepts `&mut Req` and `&mut Res` instead of the previous mutable versions. This
- increases the flexibility of the retry policy. To update, update your method signature to include `mut` for both parameters. ([#584])
-- **retry**: **Breaking Change** Change Policy to accept &mut self ([#681])
-- **retry**: Add generic backoff utilities ([#685])
-- **retry**: Add Budget trait. This allows end-users to implement their own budget and bucket implementations. ([#703])
-- **reconnect**: **Breaking Change** Remove unused generic parameter from `Reconnect::new` ([#755])
-- **ready-cache**: Allow iteration over ready services ([#700])
-- **discover**: Implement `Clone` for Change ([#701])
-- **util**: Add a BoxCloneServiceLayer ([#708])
-- **rng**: use a simpler random 2-sampler ([#716])
-- **filter**: Derive `Clone` for `AsyncFilterLayer` ([#731])
-- **general**: Update IndexMap ([#741])
-- **MSRV**: Increase MSRV to 1.63.0 ([#741])
-
-[#702]: https://github.com/tower-rs/tower/pull/702
-[#652]: https://github.com/tower-rs/tower/pull/652
-[#584]: https://github.com/tower-rs/tower/pull/584
-[#681]: https://github.com/tower-rs/tower/pull/681
-[#685]: https://github.com/tower-rs/tower/pull/685
-[#703]: https://github.com/tower-rs/tower/pull/703
-[#755]: https://github.com/tower-rs/tower/pull/755
-[#700]: https://github.com/tower-rs/tower/pull/700
-[#701]: https://github.com/tower-rs/tower/pull/701
-[#708]: https://github.com/tower-rs/tower/pull/708
-[#716]: https://github.com/tower-rs/tower/pull/716
-[#731]: https://github.com/tower-rs/tower/pull/731
-[#741]: https://github.com/tower-rs/tower/pull/741
-
-# 0.4.12 (February 16, 2022)
-
-### Fixed
-
-- **hedge**, **load**, **retry**: Fix use of `Instant` operations that can panic
- on platforms where `Instant` is not monotonic ([#633])
-- Disable `attributes` feature on `tracing` dependency ([#623])
-- Remove unused dependencies and dependency features with some feature
- combinations ([#603], [#602])
-- **docs**: Fix a typo in the RustDoc for `Buffer` ([#622])
-
-### Changed
-
-- Updated minimum supported Rust version (MSRV) to 1.49.0.
-- **hedge**: Updated `hdrhistogram` dependency to v7.0 ([#602])
-- Updated `tokio-util` dependency to v0.7 ([#638])
-
-[#633]: https://github.com/tower-rs/tower/pull/633
-[#623]: https://github.com/tower-rs/tower/pull/623
-[#603]: https://github.com/tower-rs/tower/pull/603
-[#602]: https://github.com/tower-rs/tower/pull/602
-[#622]: https://github.com/tower-rs/tower/pull/622
-[#638]: https://github.com/tower-rs/tower/pull/638
-
-# 0.4.11 (November 18, 2021)
-
-### Added
-
-- **util**: Add `BoxCloneService` which is a `Clone + Send` boxed `Service` ([#615])
-- **util**: Add `ServiceExt::boxed` and `ServiceExt::boxed_clone` for applying the
- `BoxService` and `BoxCloneService` middleware ([#616])
-- **builder**: Add `ServiceBuilder::boxed` and `ServiceBuilder::boxed_clone` for
- applying `BoxService` and `BoxCloneService` layers ([#616])
-
-### Fixed
-
-- **util**: Remove redundant `F: Clone` bound from `ServiceExt::map_request` ([#607])
-- **util**: Remove unnecessary `Debug` bounds from `impl Debug for BoxService` ([#617])
-- **util**: Remove unnecessary `Debug` bounds from `impl Debug for UnsyncBoxService` ([#617])
-- **balance**: Remove redundant `Req: Clone` bound from `Clone` impls
- for `MakeBalance`, and `MakeBalanceLayer` ([#607])
-- **balance**: Remove redundant `Req: Debug` bound from `Debug` impls
- for `MakeBalance`, `MakeFuture`, `Balance`, and `Pool` ([#607])
-- **ready-cache**: Remove redundant `Req: Debug` bound from `Debug` impl
- for `ReadyCache` ([#607])
-- **steer**: Remove redundant `Req: Debug` bound from `Debug` impl
- for `Steer` ([#607])
-- **docs**: Fix `doc(cfg(...))` attributes
- of `PeakEwmaDiscover`, and `PendingRequestsDiscover` ([#610])
-
-[#607]: https://github.com/tower-rs/tower/pull/607
-[#610]: https://github.com/tower-rs/tower/pull/610
-[#615]: https://github.com/tower-rs/tower/pull/615
-[#616]: https://github.com/tower-rs/tower/pull/616
-[#617]: https://github.com/tower-rs/tower/pull/617
-
-# 0.4.10 (October 19, 2021)
-
-- Fix accidental breaking change when using the
- `rustdoc::broken_intra_doc_links` lint ([#605])
-- Clarify that tower's minimum supported rust version is 1.46 ([#605])
-
-[#605]: https://github.com/tower-rs/tower/pull/605
-
-# 0.4.9 (October 13, 2021)
-
-- Migrate to [pin-project-lite] ([#595])
-- **builder**: Implement `Layer` for `ServiceBuilder` ([#600])
-- **builder**: Add `ServiceBuilder::and_then` analogous to
- `ServiceExt::and_then` ([#601])
-
-[#600]: https://github.com/tower-rs/tower/pull/600
-[#601]: https://github.com/tower-rs/tower/pull/601
-[#595]: https://github.com/tower-rs/tower/pull/595
-[pin-project-lite]: https://crates.io/crates/pin-project-lite
-
-# 0.4.8 (May 28, 2021)
-
-- **builder**: Add `ServiceBuilder::map_result` analogous to
- `ServiceExt::map_result` ([#583])
-- **limit**: Add `GlobalConcurrencyLimitLayer` to allow reusing a concurrency
- limit across multiple services ([#574])
-
-[#574]: https://github.com/tower-rs/tower/pull/574
-[#583]: https://github.com/tower-rs/tower/pull/583
-
-# 0.4.7 (April 27, 2021)
-
-### Added
-
-- **builder**: Add `ServiceBuilder::check_service` to check the request,
- response, and error types of the output service. ([#576])
-- **builder**: Add `ServiceBuilder::check_service_clone` to check the output
- service can be cloned. ([#576])
-
-### Fixed
-
-- **spawn_ready**: Abort spawned background tasks when the `SpawnReady` service
- is dropped, fixing a potential task/resource leak (#[581])
-- Fixed broken documentation links ([#578])
-
-[#576]: https://github.com/tower-rs/tower/pull/576
-[#578]: https://github.com/tower-rs/tower/pull/578
-[#581]: https://github.com/tower-rs/tower/pull/581
-
-# 0.4.6 (February 26, 2021)
-
-### Deprecated
-
-- **util**: Deprecated `ServiceExt::ready_and` (renamed to `ServiceExt::ready`).
- ([#567])
-- **util**: Deprecated `ReadyAnd` future (renamed to `Ready`). ([#567])
-### Added
-
-- **builder**: Add `ServiceBuilder::layer_fn` to add a layer built from a
- function. ([#560])
-- **builder**: Add `ServiceBuilder::map_future` for transforming the futures
- produced by a service. ([#559])
-- **builder**: Add `ServiceBuilder::service_fn` for applying `Layer`s to an
- async function using `util::service_fn`. ([#564])
-- **util**: Add example for `service_fn`. ([#563])
-- **util**: Add `BoxLayer` for creating boxed `Layer` trait objects. ([#569])
-
-[#567]: https://github.com/tower-rs/tower/pull/567
-[#560]: https://github.com/tower-rs/tower/pull/560
-[#559]: https://github.com/tower-rs/tower/pull/559
-[#564]: https://github.com/tower-rs/tower/pull/564
-[#563]: https://github.com/tower-rs/tower/pull/563
-[#569]: https://github.com/tower-rs/tower/pull/569
-
-# 0.4.5 (February 10, 2021)
-
-### Added
-
-- **util**: Add `ServiceExt::map_future`. ([#542])
-- **builder**: Add `ServiceBuilder::option_layer` to optionally add a layer. ([#555])
-- **make**: Add `Shared` which lets you implement `MakeService` by cloning a
- service. ([#533])
-
-### Fixed
-
-- **util**: Make combinators that contain closures implement `Debug`. They
- previously wouldn't since closures never implement `Debug`. ([#552])
-- **steer**: Implement `Clone` for `Steer`. ([#554])
-- **spawn-ready**: SpawnReady now propagates the current `tracing` span to
- spawned tasks ([#557])
-- Only pull in `tracing` for the features that need it. ([#551])
-
-[#542]: https://github.com/tower-rs/tower/pull/542
-[#555]: https://github.com/tower-rs/tower/pull/555
-[#557]: https://github.com/tower-rs/tower/pull/557
-[#533]: https://github.com/tower-rs/tower/pull/533
-[#551]: https://github.com/tower-rs/tower/pull/551
-[#554]: https://github.com/tower-rs/tower/pull/554
-[#552]: https://github.com/tower-rs/tower/pull/552
-
-# 0.4.4 (January 20, 2021)
-
-### Added
-
-- **util**: Implement `Layer` for `Either<A, B>`. ([#531])
-- **util**: Implement `Clone` for `FilterLayer`. ([#535])
-- **timeout**: Implement `Clone` for `TimeoutLayer`. ([#535])
-- **limit**: Implement `Clone` for `RateLimitLayer`. ([#535])
-
-### Fixed
-
-- Added "full" feature which turns on all other features. ([#532])
-- **spawn-ready**: Avoid oneshot allocations. ([#538])
-
-[#531]: https://github.com/tower-rs/tower/pull/531
-[#532]: https://github.com/tower-rs/tower/pull/532
-[#535]: https://github.com/tower-rs/tower/pull/535
-[#538]: https://github.com/tower-rs/tower/pull/538
-
-# 0.4.3 (January 13, 2021)
-
-### Added
-
-- **filter**: `Filter::check` and `AsyncFilter::check` methods which check a
- request against the filter's `Predicate` ([#521])
-- **filter**: Added `get_ref`, `get_mut`, and `into_inner` methods to `Filter`
- and `AsyncFilter`, allowing access to the wrapped service ([#522])
-- **util**: Added `layer` associated function to `AndThen`, `Then`,
- `MapRequest`, `MapResponse`, and `MapResult` types. These return a `Layer`
- that produces middleware of that type, as a convenience to avoid having to
- import the `Layer` type separately. ([#524])
-- **util**: Added missing `Clone` impls to `AndThenLayer`, `MapRequestLayer`,
- and `MapErrLayer`, when the mapped function implements `Clone` ([#525])
-- **util**: Added `FutureService::new` constructor, with less restrictive bounds
- than the `future_service` free function ([#523])
-
-[#521]: https://github.com/tower-rs/tower/pull/521
-[#522]: https://github.com/tower-rs/tower/pull/522
-[#523]: https://github.com/tower-rs/tower/pull/523
-[#524]: https://github.com/tower-rs/tower/pull/524
-[#525]: https://github.com/tower-rs/tower/pull/525
-
-# 0.4.2 (January 11, 2021)
-
-### Added
-
-- Export `layer_fn` and `LayerFn` from the `tower::layer` module. ([#516])
-
-### Fixed
-
-- Fix missing `Sync` implementation for `Buffer` and `ConcurrencyLimit` ([#518])
-
-[#518]: https://github.com/tower-rs/tower/pull/518
-[#516]: https://github.com/tower-rs/tower/pull/516
-
-# 0.4.1 (January 7, 2021)
-
-### Fixed
-
-- Updated `tower-layer` to 0.3.1 to fix broken re-exports.
-
-# 0.4.0 (January 7, 2021)
-
-This is a major breaking release including a large number of changes. In
-particular, this release updates `tower` to depend on Tokio 1.0, and moves all
-middleware into the `tower` crate. In addition, Tower 0.4 reworks several
-middleware APIs, as well as introducing new ones.
-
-This release does *not* change the core `Service` or `Layer` traits, so `tower`
-0.4 still depends on `tower-service` 0.3 and `tower-layer` 0.3. This means that
-`tower` 0.4 is still compatible with libraries that depend on those crates.
-
-### Added
-
-- **make**: Added `MakeService::into_service` and `MakeService::as_service` for
- converting `MakeService`s into `Service`s ([#492])
-- **steer**: Added `steer` middleware for routing requests to one of a set of
- services ([#426])
-- **util**: Added `MapRequest` middleware and `ServiceExt::map_request`, for
- applying a function to a request before passing it to the inner service
- ([#435])
-- **util**: Added `MapResponse` middleware and `ServiceExt::map_response`, for
- applying a function to the `Response` type of an inner service after its
- future completes ([#435])
-- **util**: Added `MapErr` middleware and `ServiceExt::map_err`, for
- applying a function to the `Error` returned by an inner service if it fails
- ([#396])
-- **util**: Added `MapResult` middleware and `ServiceExt::map_result`, for
- applying a function to the `Result` returned by an inner service's future
- regardless of whether it succeeds or fails ([#499])
-- **util**: Added `Then` middleware and `ServiceExt::then`, for chaining another
- future after an inner service's future completes (with a `Response` or an
- `Error`) ([#500])
-- **util**: Added `AndThen` middleware and `ServiceExt::and_then`, for
- chaining another future after an inner service's future completes successfully
- ([#485])
-- **util**: Added `layer_fn`, for constructing a `Layer` from a function taking
- a `Service` and returning a different `Service` ([#491])
-- **util**: Added `FutureService`, which implements `Service` for a
- `Future` whose `Output` type is a `Service` ([#496])
-- **util**: Added `BoxService::layer` and `UnsyncBoxService::layer`, to make
- constructing layers more ergonomic ([#503])
-- **layer**: Added `Layer` impl for `&Layer` ([#446])
-- **retry**: Added `Retry::get_ref`, `Retry::get_mut`, and `Retry::into_inner`
- to access the inner service ([#463])
-- **timeout**: Added `Timeout::get_ref`, `Timeout::get_mut`, and
- `Timeout::into_inner` to access the inner service ([#463])
-- **buffer**: Added `Clone` and `Copy` impls for `BufferLayer` (#[493])
-- Several documentation improvements ([#442], [#444], [#445], [#449], [#487],
- [#490], [#506]])
-
-### Changed
-
-- All middleware `tower-*` crates were merged into `tower` and placed
- behind feature flags ([#432])
-- Updated Tokio dependency to 1.0 ([#489])
-- **builder**: Make `ServiceBuilder::service` take `self` by reference rather
- than by value ([#504])
-- **reconnect**: Return errors from `MakeService` in the response future, rather than
- in `poll_ready`, allowing the reconnect service to be reused when a reconnect
- fails ([#386], [#437])
-- **discover**: Changed `Discover` to be a sealed trait alias for a
- `TryStream<Item = Change>`. `Discover` implementations are now written by
- implementing `Stream`. ([#443])
-- **load**: Renamed the `Instrument` trait to `TrackCompletion` ([#445])
-- **load**: Renamed `NoInstrument` to `CompleteOnResponse` ([#445])
-- **balance**: Renamed `BalanceLayer` to `MakeBalanceLayer` ([#449])
-- **balance**: Renamed `BalanceMake` to `MakeBalance` ([#449])
-- **ready-cache**: Changed `ready_cache::error::Failed`'s `fmt::Debug` impl to
- require the key type to also implement `fmt::Debug` ([#467])
-- **filter**: Changed `Filter` and `Predicate` to use a synchronous function as
- a predicate ([#508])
-- **filter**: Renamed the previous `Filter` and `Predicate` (where `Predicate`s
- returned a `Future`) to `AsyncFilter` and `AsyncPredicate` ([#508])
-- **filter**: `Predicate`s now take a `Request` type by value and may return a
- new request, potentially of a different type ([#508])
-- **filter**: `Predicate`s may now return an error of any type ([#508])
-
-### Fixed
-
-- **limit**: Fixed an issue where `RateLimit` services do not reset the remaining
- count when rate limiting ([#438], [#439])
-- **util**: Fixed a bug where `oneshot` futures panic if the service does not
- immediately become ready ([#447])
-- **ready-cache**: Fixed `ready_cache::error::Failed` not returning inner error types
- via `Error::source` ([#467])
-- **hedge**: Fixed an interaction with `buffer` where `buffer` slots were
- eagerly reserved for hedge requests even if they were not sent ([#472])
-- **hedge**: Fixed the use of a fixed 10 second bound on the hedge latency
- histogram resulting on errors with longer-lived requests. The latency
- histogram now automatically resizes ([#484])
-- **buffer**: Fixed an issue where tasks waiting for buffer capacity were not
- woken when a buffer is dropped, potentially resulting in a task leak ([#480])
-
-### Removed
-
-- Remove `ServiceExt::ready`.
-- **discover**: Removed `discover::stream` module, since `Discover` is now an
- alias for `Stream` ([#443])
-- **buffer**: Removed `MakeBalance::from_rng`, which caused all balancers to use
- the same RNG ([#497])
-
-[#432]: https://github.com/tower-rs/tower/pull/432
-[#426]: https://github.com/tower-rs/tower/pull/426
-[#435]: https://github.com/tower-rs/tower/pull/435
-[#499]: https://github.com/tower-rs/tower/pull/499
-[#386]: https://github.com/tower-rs/tower/pull/386
-[#437]: https://github.com/tower-rs/tower/pull/487
-[#438]: https://github.com/tower-rs/tower/pull/438
-[#437]: https://github.com/tower-rs/tower/pull/439
-[#443]: https://github.com/tower-rs/tower/pull/443
-[#442]: https://github.com/tower-rs/tower/pull/442
-[#444]: https://github.com/tower-rs/tower/pull/444
-[#445]: https://github.com/tower-rs/tower/pull/445
-[#446]: https://github.com/tower-rs/tower/pull/446
-[#447]: https://github.com/tower-rs/tower/pull/447
-[#449]: https://github.com/tower-rs/tower/pull/449
-[#463]: https://github.com/tower-rs/tower/pull/463
-[#396]: https://github.com/tower-rs/tower/pull/396
-[#467]: https://github.com/tower-rs/tower/pull/467
-[#472]: https://github.com/tower-rs/tower/pull/472
-[#480]: https://github.com/tower-rs/tower/pull/480
-[#484]: https://github.com/tower-rs/tower/pull/484
-[#489]: https://github.com/tower-rs/tower/pull/489
-[#497]: https://github.com/tower-rs/tower/pull/497
-[#487]: https://github.com/tower-rs/tower/pull/487
-[#493]: https://github.com/tower-rs/tower/pull/493
-[#491]: https://github.com/tower-rs/tower/pull/491
-[#495]: https://github.com/tower-rs/tower/pull/495
-[#503]: https://github.com/tower-rs/tower/pull/503
-[#504]: https://github.com/tower-rs/tower/pull/504
-[#492]: https://github.com/tower-rs/tower/pull/492
-[#500]: https://github.com/tower-rs/tower/pull/500
-[#490]: https://github.com/tower-rs/tower/pull/490
-[#506]: https://github.com/tower-rs/tower/pull/506
-[#508]: https://github.com/tower-rs/tower/pull/508
-[#485]: https://github.com/tower-rs/tower/pull/485
-
-# 0.3.1 (January 17, 2020)
-
-- Allow opting out of tracing/log (#410).
-
-# 0.3.0 (December 19, 2019)
-
-- Update all tower based crates to `0.3`.
-- Update to `tokio 0.2`
-- Update to `futures 0.3`
-
-# 0.3.0-alpha.2 (September 30, 2019)
-
-- Move to `futures-*-preview 0.3.0-alpha.19`
-- Move to `pin-project 0.4`
-
-# 0.3.0-alpha.1a (September 13, 2019)
-
-- Update `tower-buffer` to `0.3.0-alpha.1b`
-
-# 0.3.0-alpha.1 (September 11, 2019)
-
-- Move to `std::future`
-
-# 0.1.1 (July 19, 2019)
-
-- Add `ServiceBuilder::into_inner`
-
-# 0.1.0 (April 26, 2019)
-
-- Initial release
diff --git a/vendor/tower/Cargo.lock b/vendor/tower/Cargo.lock
deleted file mode 100644
index 833ac97f..00000000
--- a/vendor/tower/Cargo.lock
+++ /dev/null
@@ -1,760 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "addr2line"
-version = "0.15.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a"
-dependencies = [
- "gimli",
-]
-
-[[package]]
-name = "adler"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
-
-[[package]]
-name = "aho-corasick"
-version = "0.7.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d"
-dependencies = [
- "memchr",
-]
-
-[[package]]
-name = "async-stream"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
-dependencies = [
- "async-stream-impl",
- "futures-core",
- "pin-project-lite",
-]
-
-[[package]]
-name = "async-stream-impl"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.90",
-]
-
-[[package]]
-name = "autocfg"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
-
-[[package]]
-name = "backtrace"
-version = "0.3.59"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4717cfcbfaa661a0fd48f8453951837ae7e8f81e481fbb136e3202d72805a744"
-dependencies = [
- "addr2line",
- "cc",
- "cfg-if 1.0.0",
- "libc",
- "miniz_oxide",
- "object",
- "rustc-demangle",
-]
-
-[[package]]
-name = "byteorder"
-version = "1.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
-
-[[package]]
-name = "bytes"
-version = "1.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b"
-
-[[package]]
-name = "cc"
-version = "1.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d"
-dependencies = [
- "shlex",
-]
-
-[[package]]
-name = "cfg-if"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33"
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "env_logger"
-version = "0.8.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
-dependencies = [
- "log",
- "regex",
-]
-
-[[package]]
-name = "equivalent"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
-
-[[package]]
-name = "fnv"
-version = "1.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
-
-[[package]]
-name = "futures"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-executor",
- "futures-io",
- "futures-sink",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-channel"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
-dependencies = [
- "futures-core",
- "futures-sink",
-]
-
-[[package]]
-name = "futures-core"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
-
-[[package]]
-name = "futures-executor"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
-dependencies = [
- "futures-core",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-io"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
-
-[[package]]
-name = "futures-macro"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.90",
-]
-
-[[package]]
-name = "futures-sink"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
-
-[[package]]
-name = "futures-task"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
-
-[[package]]
-name = "futures-util"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-io",
- "futures-macro",
- "futures-sink",
- "futures-task",
- "memchr",
- "pin-project-lite",
- "pin-utils",
- "slab",
-]
-
-[[package]]
-name = "getrandom"
-version = "0.2.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7"
-dependencies = [
- "cfg-if 1.0.0",
- "libc",
- "wasi",
-]
-
-[[package]]
-name = "gimli"
-version = "0.24.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
-
-[[package]]
-name = "hashbrown"
-version = "0.15.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
-
-[[package]]
-name = "hdrhistogram"
-version = "7.5.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
-dependencies = [
- "byteorder",
- "num-traits",
-]
-
-[[package]]
-name = "http"
-version = "1.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea"
-dependencies = [
- "bytes",
- "fnv",
- "itoa",
-]
-
-[[package]]
-name = "indexmap"
-version = "2.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f"
-dependencies = [
- "equivalent",
- "hashbrown",
-]
-
-[[package]]
-name = "itoa"
-version = "1.0.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
-
-[[package]]
-name = "lazy_static"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-
-[[package]]
-name = "libc"
-version = "0.2.168"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d"
-
-[[package]]
-name = "log"
-version = "0.4.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
-
-[[package]]
-name = "memchr"
-version = "2.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
-
-[[package]]
-name = "miniz_oxide"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
-dependencies = [
- "adler",
- "autocfg",
-]
-
-[[package]]
-name = "nu-ansi-term"
-version = "0.46.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
-dependencies = [
- "overload",
- "winapi",
-]
-
-[[package]]
-name = "num-traits"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
-
-[[package]]
-name = "object"
-version = "0.24.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170"
-
-[[package]]
-name = "once_cell"
-version = "1.20.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
-
-[[package]]
-name = "overload"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
-
-[[package]]
-name = "pin-project"
-version = "1.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95"
-dependencies = [
- "pin-project-internal",
-]
-
-[[package]]
-name = "pin-project-internal"
-version = "1.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.90",
-]
-
-[[package]]
-name = "pin-project-lite"
-version = "0.2.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
-
-[[package]]
-name = "pin-utils"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-
-[[package]]
-name = "ppv-lite86"
-version = "0.2.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04"
-dependencies = [
- "zerocopy",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.92"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quickcheck"
-version = "1.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6"
-dependencies = [
- "env_logger",
- "log",
- "rand",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.37"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "rand"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
-dependencies = [
- "libc",
- "rand_chacha",
- "rand_core",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
-dependencies = [
- "ppv-lite86",
- "rand_core",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.6.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "regex"
-version = "1.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd"
-dependencies = [
- "aho-corasick",
- "memchr",
- "regex-syntax",
- "thread_local 0.3.6",
-]
-
-[[package]]
-name = "regex-syntax"
-version = "0.6.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716"
-
-[[package]]
-name = "rustc-demangle"
-version = "0.1.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
-
-[[package]]
-name = "sharded-slab"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
-dependencies = [
- "lazy_static",
-]
-
-[[package]]
-name = "shlex"
-version = "1.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
-
-[[package]]
-name = "slab"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
-name = "spin"
-version = "0.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
-
-[[package]]
-name = "syn"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "66850e97125af79138385e9b88339cbcd037e3f28ceab8c5ad98e64f0f1f80bf"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-xid",
-]
-
-[[package]]
-name = "syn"
-version = "2.0.90"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "sync_wrapper"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263"
-
-[[package]]
-name = "thread_local"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
-dependencies = [
- "lazy_static",
-]
-
-[[package]]
-name = "thread_local"
-version = "1.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c"
-dependencies = [
- "cfg-if 1.0.0",
- "once_cell",
-]
-
-[[package]]
-name = "tokio"
-version = "1.42.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551"
-dependencies = [
- "backtrace",
- "pin-project-lite",
- "tokio-macros",
-]
-
-[[package]]
-name = "tokio-macros"
-version = "2.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.90",
-]
-
-[[package]]
-name = "tokio-stream"
-version = "0.1.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
-dependencies = [
- "futures-core",
- "pin-project-lite",
- "tokio",
-]
-
-[[package]]
-name = "tokio-test"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
-dependencies = [
- "async-stream",
- "bytes",
- "futures-core",
- "tokio",
- "tokio-stream",
-]
-
-[[package]]
-name = "tokio-util"
-version = "0.7.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078"
-dependencies = [
- "bytes",
- "futures-core",
- "futures-sink",
- "pin-project-lite",
- "tokio",
-]
-
-[[package]]
-name = "tower"
-version = "0.5.2"
-dependencies = [
- "futures",
- "futures-core",
- "futures-util",
- "hdrhistogram",
- "http",
- "indexmap",
- "lazy_static",
- "pin-project-lite",
- "quickcheck",
- "rand",
- "slab",
- "sync_wrapper",
- "tokio",
- "tokio-stream",
- "tokio-test",
- "tokio-util",
- "tower-layer",
- "tower-service",
- "tower-test",
- "tracing",
- "tracing-subscriber",
-]
-
-[[package]]
-name = "tower-layer"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
-
-[[package]]
-name = "tower-service"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
-
-[[package]]
-name = "tower-test"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4546773ffeab9e4ea02b8872faa49bb616a80a7da66afc2f32688943f97efa7"
-dependencies = [
- "futures-util",
- "pin-project",
- "tokio",
- "tokio-test",
- "tower-layer",
- "tower-service",
-]
-
-[[package]]
-name = "tracing"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c21ff9457accc293386c20e8f754d0b059e67e325edf2284f04230d125d7e5ff"
-dependencies = [
- "cfg-if 0.1.9",
- "log",
- "spin",
- "tracing-attributes",
- "tracing-core",
-]
-
-[[package]]
-name = "tracing-attributes"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e27d1065a1de5d8ad2637e41fe14d3cd14363d4a20cb99090b9012004955637"
-dependencies = [
- "quote",
- "syn 1.0.5",
-]
-
-[[package]]
-name = "tracing-core"
-version = "0.1.33"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
-dependencies = [
- "once_cell",
-]
-
-[[package]]
-name = "tracing-subscriber"
-version = "0.3.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
-dependencies = [
- "nu-ansi-term",
- "sharded-slab",
- "thread_local 1.1.8",
- "tracing-core",
-]
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
-
-[[package]]
-name = "unicode-xid"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
-
-[[package]]
-name = "wasi"
-version = "0.11.0+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
-
-[[package]]
-name = "winapi"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92c1eb33641e276cfa214a0522acad57be5c56b10cb348b3c5117db75f3ac4b0"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-
-[[package]]
-name = "zerocopy"
-version = "0.7.35"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0"
-dependencies = [
- "byteorder",
- "zerocopy-derive",
-]
-
-[[package]]
-name = "zerocopy-derive"
-version = "0.7.35"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.90",
-]
diff --git a/vendor/tower/Cargo.toml b/vendor/tower/Cargo.toml
deleted file mode 100644
index dfbce090..00000000
--- a/vendor/tower/Cargo.toml
+++ /dev/null
@@ -1,328 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2018"
-rust-version = "1.64.0"
-name = "tower"
-version = "0.5.2"
-authors = ["Tower Maintainers <team@tower-rs.com>"]
-build = false
-autolib = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = """
-Tower is a library of modular and reusable components for building robust
-clients and servers.
-"""
-homepage = "https://github.com/tower-rs/tower"
-readme = "README.md"
-keywords = [
- "io",
- "async",
- "non-blocking",
- "futures",
- "service",
-]
-categories = [
- "asynchronous",
- "network-programming",
-]
-license = "MIT"
-repository = "https://github.com/tower-rs/tower"
-
-[package.metadata.docs.rs]
-all-features = true
-rustdoc-args = [
- "--cfg",
- "docsrs",
-]
-
-[package.metadata.playground]
-features = ["full"]
-
-[lib]
-name = "tower"
-path = "src/lib.rs"
-
-[[example]]
-name = "tower-balance"
-path = "examples/tower-balance.rs"
-required-features = ["full"]
-
-[[test]]
-name = "balance"
-path = "tests/balance/main.rs"
-
-[[test]]
-name = "buffer"
-path = "tests/buffer/main.rs"
-
-[[test]]
-name = "builder"
-path = "tests/builder.rs"
-
-[[test]]
-name = "hedge"
-path = "tests/hedge/main.rs"
-
-[[test]]
-name = "limit"
-path = "tests/limit/main.rs"
-
-[[test]]
-name = "load_shed"
-path = "tests/load_shed/main.rs"
-
-[[test]]
-name = "ready_cache"
-path = "tests/ready_cache/main.rs"
-
-[[test]]
-name = "retry"
-path = "tests/retry/main.rs"
-
-[[test]]
-name = "spawn_ready"
-path = "tests/spawn_ready/main.rs"
-
-[[test]]
-name = "steer"
-path = "tests/steer/main.rs"
-
-[[test]]
-name = "support"
-path = "tests/support.rs"
-
-[[test]]
-name = "util"
-path = "tests/util/main.rs"
-
-[dependencies.futures-core]
-version = "0.3.22"
-optional = true
-
-[dependencies.futures-util]
-version = "0.3.22"
-features = ["alloc"]
-optional = true
-default-features = false
-
-[dependencies.hdrhistogram]
-version = "7.0"
-optional = true
-default-features = false
-
-[dependencies.indexmap]
-version = "2.0.2"
-optional = true
-
-[dependencies.pin-project-lite]
-version = "0.2.7"
-optional = true
-
-[dependencies.slab]
-version = "0.4"
-optional = true
-
-[dependencies.sync_wrapper]
-version = "1"
-optional = true
-
-[dependencies.tokio]
-version = "1.6.2"
-features = ["sync"]
-optional = true
-
-[dependencies.tokio-stream]
-version = "0.1.0"
-optional = true
-
-[dependencies.tokio-util]
-version = "0.7.0"
-optional = true
-default-features = false
-
-[dependencies.tower-layer]
-version = "0.3.3"
-
-[dependencies.tower-service]
-version = "0.3.3"
-
-[dependencies.tracing]
-version = "0.1.2"
-features = ["std"]
-optional = true
-default-features = false
-
-[dev-dependencies.futures]
-version = "0.3.22"
-
-[dev-dependencies.hdrhistogram]
-version = "7.0"
-default-features = false
-
-[dev-dependencies.http]
-version = "1"
-
-[dev-dependencies.lazy_static]
-version = "1.4.0"
-
-[dev-dependencies.pin-project-lite]
-version = "0.2.7"
-
-[dev-dependencies.quickcheck]
-version = "1"
-
-[dev-dependencies.rand]
-version = "0.8"
-features = ["small_rng"]
-
-[dev-dependencies.tokio]
-version = "1.6.2"
-features = [
- "macros",
- "sync",
- "test-util",
- "rt-multi-thread",
-]
-
-[dev-dependencies.tokio-stream]
-version = "0.1.0"
-
-[dev-dependencies.tokio-test]
-version = "0.4"
-
-[dev-dependencies.tower-test]
-version = "0.4"
-
-[dev-dependencies.tracing]
-version = "0.1.2"
-features = ["std"]
-default-features = false
-
-[dev-dependencies.tracing-subscriber]
-version = "0.3"
-features = [
- "fmt",
- "ansi",
-]
-default-features = false
-
-[features]
-__common = [
- "futures-core",
- "pin-project-lite",
-]
-balance = [
- "discover",
- "load",
- "ready-cache",
- "make",
- "slab",
- "util",
-]
-buffer = [
- "__common",
- "tokio/sync",
- "tokio/rt",
- "tokio-util",
- "tracing",
-]
-discover = ["__common"]
-filter = [
- "__common",
- "futures-util",
-]
-full = [
- "balance",
- "buffer",
- "discover",
- "filter",
- "hedge",
- "limit",
- "load",
- "load-shed",
- "make",
- "ready-cache",
- "reconnect",
- "retry",
- "spawn-ready",
- "steer",
- "timeout",
- "util",
-]
-hedge = [
- "util",
- "filter",
- "futures-util",
- "hdrhistogram",
- "tokio/time",
- "tracing",
-]
-limit = [
- "__common",
- "tokio/time",
- "tokio/sync",
- "tokio-util",
- "tracing",
-]
-load = [
- "__common",
- "tokio/time",
- "tracing",
-]
-load-shed = ["__common"]
-log = ["tracing/log"]
-make = [
- "futures-util",
- "pin-project-lite",
- "tokio/io-std",
-]
-ready-cache = [
- "futures-core",
- "futures-util",
- "indexmap",
- "tokio/sync",
- "tracing",
- "pin-project-lite",
-]
-reconnect = [
- "make",
- "tokio/io-std",
- "tracing",
-]
-retry = [
- "__common",
- "tokio/time",
- "util",
-]
-spawn-ready = [
- "__common",
- "futures-util",
- "tokio/sync",
- "tokio/rt",
- "util",
- "tracing",
-]
-steer = []
-timeout = [
- "pin-project-lite",
- "tokio/time",
-]
-util = [
- "__common",
- "futures-util",
- "pin-project-lite",
- "sync_wrapper",
-]
diff --git a/vendor/tower/LICENSE b/vendor/tower/LICENSE
deleted file mode 100644
index b980cacc..00000000
--- a/vendor/tower/LICENSE
+++ /dev/null
@@ -1,25 +0,0 @@
-Copyright (c) 2019 Tower Contributors
-
-Permission is hereby granted, free of charge, to any
-person obtaining a copy of this software and associated
-documentation files (the "Software"), to deal in the
-Software without restriction, including without
-limitation the rights to use, copy, modify, merge,
-publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software
-is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice
-shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
-TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
-PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
-SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
-IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-DEALINGS IN THE SOFTWARE.
diff --git a/vendor/tower/README.md b/vendor/tower/README.md
deleted file mode 100644
index fa9e208d..00000000
--- a/vendor/tower/README.md
+++ /dev/null
@@ -1,187 +0,0 @@
-# Tower
-
-Tower is a library of modular and reusable components for building robust
-networking clients and servers.
-
-[![Crates.io][crates-badge]][crates-url]
-[![Documentation][docs-badge]][docs-url]
-[![Documentation (master)][docs-master-badge]][docs-master-url]
-[![MIT licensed][mit-badge]][mit-url]
-[![Build Status][actions-badge]][actions-url]
-[![Discord chat][discord-badge]][discord-url]
-
-[crates-badge]: https://img.shields.io/crates/v/tower.svg
-[crates-url]: https://crates.io/crates/tower
-[docs-badge]: https://docs.rs/tower/badge.svg
-[docs-url]: https://docs.rs/tower
-[docs-master-badge]: https://img.shields.io/badge/docs-master-blue
-[docs-master-url]: https://tower-rs.github.io/tower/tower
-[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
-[mit-url]: LICENSE
-[actions-badge]: https://github.com/tower-rs/tower/workflows/CI/badge.svg
-[actions-url]:https://github.com/tower-rs/tower/actions?query=workflow%3ACI
-[discord-badge]: https://img.shields.io/discord/500028886025895936?logo=discord&label=discord&logoColor=white
-[discord-url]: https://discord.gg/EeF3cQw
-## Overview
-
-Tower aims to make it as easy as possible to build robust networking clients and
-servers. It is protocol agnostic, but is designed around a request / response
-pattern. If your protocol is entirely stream based, Tower may not be a good fit.
-
-Tower provides a simple core abstraction, the [`Service`] trait, which
-represents an asynchronous function taking a request and returning either a
-response or an error. This abstraction can be used to model both clients and
-servers.
-
-Generic components, like [timeouts], [rate limiting], and [load balancing],
-can be modeled as [`Service`]s that wrap some inner service and apply
-additional behavior before or after the inner service is called. This allows
-implementing these components in a protocol-agnostic, composable way. Typically,
-such services are referred to as _middleware_.
-
-An additional abstraction, the [`Layer`] trait, is used to compose
-middleware with [`Service`]s. If a [`Service`] can be thought of as an
-asynchronous function from a request type to a response type, a [`Layer`] is
-a function taking a [`Service`] of one type and returning a [`Service`] of a
-different type. The [`ServiceBuilder`] type is used to add middleware to a
-service by composing it with multiple [`Layer`]s.
-
-### The Tower Ecosystem
-
-Tower is made up of the following crates:
-
-* [`tower`] (this crate)
-* [`tower-service`]
-* [`tower-layer`]
-* [`tower-test`]
-
-Since the [`Service`] and [`Layer`] traits are important integration points
-for all libraries using Tower, they are kept as stable as possible, and
-breaking changes are made rarely. Therefore, they are defined in separate
-crates, [`tower-service`] and [`tower-layer`]. This crate contains
-re-exports of those core traits, implementations of commonly-used
-middleware, and [utilities] for working with [`Service`]s and [`Layer`]s.
-Finally, the [`tower-test`] crate provides tools for testing programs using
-Tower.
-
-## Usage
-
-Tower provides an abstraction layer, and generic implementations of various
-middleware. This means that the `tower` crate on its own does *not* provide
-a working implementation of a network client or server. Instead, Tower's
-[`Service` trait][`Service`] provides an integration point between
-application code, libraries providing middleware implementations, and
-libraries that implement servers and/or clients for various network
-protocols.
-
-Depending on your particular use case, you might use Tower in several ways:
-
-* **Implementing application logic** for a networked program. You might
- use the [`Service`] trait to model your application's behavior, and use
- the middleware [provided by this crate][all_layers] and by other libraries
- to add functionality to clients and servers provided by one or more
- protocol implementations.
-* **Implementing middleware** to add custom behavior to network clients and
- servers in a reusable manner. This might be general-purpose middleware
- (and if it is, please consider releasing your middleware as a library for
- other Tower users!) or application-specific behavior that needs to be
- shared between multiple clients or servers.
-* **Implementing a network protocol**. Libraries that implement network
- protocols (such as HTTP) can depend on `tower-service` to use the
- [`Service`] trait as an integration point between the protocol and user
- code. For example, a client for some protocol might implement [`Service`],
- allowing users to add arbitrary Tower middleware to those clients.
- Similarly, a server might be created from a user-provided [`Service`].
-
- Additionally, when a network protocol requires functionality already
- provided by existing Tower middleware, a protocol implementation might use
- Tower middleware internally, as well as an integration point.
-
-### Library Support
-
-A number of third-party libraries support Tower and the [`Service`] trait.
-The following is an incomplete list of such libraries:
-
-* [`hyper`]: A fast and correct low-level HTTP implementation.
-* [`tonic`]: A [gRPC-over-HTTP/2][grpc] implementation built on top of
- [`hyper`]. See [here][tonic-examples] for examples of using [`tonic`] with
- Tower.
-* [`axum`]: Ergonomic and modular web framework built with Tokio, Tower, and Hyper.
-* [`tower-lsp`]: implementations of the [Language
- Server Protocol][lsp] based on Tower.
-* [`kube`]: Kubernetes client and futures controller runtime. [`kube::Client`]
- makes use of the Tower ecosystem: [`tower`], [`tower-http`], and
- [`tower-test`]. See [here][kube-example-minimal] and
- [here][kube-example-trace] for examples of using [`kube`] with Tower.
-
-[`hyper`]: https://crates.io/crates/hyper
-[`tonic`]: https://crates.io/crates/tonic
-[tonic-examples]: https://github.com/hyperium/tonic/tree/master/examples/src/tower
-[grpc]: https://grpc.io
-[`axum`]: https://crates.io/crates/axum
-[`tower-lsp`]: https://crates.io/crates/tower-lsp
-[lsp]: https://microsoft.github.io/language-server-protocol/
-[`kube`]: https://crates.io/crates/kube
-[`kube::Client`]: https://docs.rs/kube/latest/kube/struct.Client.html
-[kube-example-minimal]: https://github.com/clux/kube-rs/blob/master/examples/custom_client.rs
-[kube-example-trace]: https://github.com/clux/kube-rs/blob/master/examples/custom_client_trace.rs
-[`tower-http`]: https://crates.io/crates/tower-http
-
-If you're the maintainer of a crate that supports Tower, we'd love to add
-your crate to this list! Please [open a PR] adding a brief description of
-your library!
-
-### Getting Started
-
-The various middleware implementations provided by this crate are feature
-flagged, so that users can only compile the parts of Tower they need. By
-default, all the optional middleware are disabled.
-
-To get started using all of Tower's optional middleware, add this to your
-`Cargo.toml`:
-
-```toml
-tower = { version = "0.5.1", features = ["full"] }
-```
-
-Alternatively, you can only enable some features. For example, to enable
-only the [`retry`] and [`timeout`][timeouts] middleware, write:
-
-```toml
-tower = { version = "0.5.1", features = ["retry", "timeout"] }
-```
-
-See [here][all_layers] for a complete list of all middleware provided by
-Tower.
-
-[`Service`]: https://docs.rs/tower/latest/tower/trait.Service.html
-[`Layer`]: https://docs.rs/tower/latest/tower/trait.Layer.html
-[all_layers]: https://docs.rs/tower/latest/tower/#modules
-[timeouts]: https://docs.rs/tower/latest/tower/timeout/
-[rate limiting]: https://docs.rs/tower/latest/tower/limit/rate
-[load balancing]: https://docs.rs/tower/latest/tower/balance/
-[`ServiceBuilder`]: https://docs.rs/tower/latest/tower/struct.ServiceBuilder.html
-[utilities]: https://docs.rs/tower/latest/tower/trait.ServiceExt.html
-[`tower`]: https://crates.io/crates/tower
-[`tower-service`]: https://crates.io/crates/tower-service
-[`tower-layer`]: https://crates.io/crates/tower-layer
-[`tower-test`]: https://crates.io/crates/tower-test
-[`retry`]: https://docs.rs/tower/latest/tower/retry
-[open a PR]: https://github.com/tower-rs/tower/compare
-
-
-## Supported Rust Versions
-
-Tower will keep a rolling MSRV (minimum supported Rust version) policy of **at
-least** 6 months. When increasing the MSRV, the new Rust version must have been
-released at least six months ago. The current MSRV is 1.64.0.
-
-## License
-
-This project is licensed under the [MIT license](LICENSE).
-
-### Contribution
-
-Unless you explicitly state otherwise, any contribution intentionally submitted
-for inclusion in Tower by you, shall be licensed as MIT, without any additional
-terms or conditions.
diff --git a/vendor/tower/examples/tower-balance.rs b/vendor/tower/examples/tower-balance.rs
deleted file mode 100644
index 998bb7c5..00000000
--- a/vendor/tower/examples/tower-balance.rs
+++ /dev/null
@@ -1,235 +0,0 @@
-//! Exercises load balancers with mocked services.
-
-use futures_core::{Stream, TryStream};
-use futures_util::{stream, stream::StreamExt, stream::TryStreamExt};
-use hdrhistogram::Histogram;
-use pin_project_lite::pin_project;
-use rand::{self, Rng};
-use std::hash::Hash;
-use std::time::Duration;
-use std::{
- pin::Pin,
- task::{Context, Poll},
-};
-use tokio::time::{self, Instant};
-use tower::balance as lb;
-use tower::discover::{Change, Discover};
-use tower::limit::concurrency::ConcurrencyLimit;
-use tower::load;
-use tower::util::ServiceExt;
-use tower_service::Service;
-
-const REQUESTS: usize = 100_000;
-const CONCURRENCY: usize = 500;
-const DEFAULT_RTT: Duration = Duration::from_millis(30);
-static ENDPOINT_CAPACITY: usize = CONCURRENCY;
-static MAX_ENDPOINT_LATENCIES: [Duration; 10] = [
- Duration::from_millis(1),
- Duration::from_millis(5),
- Duration::from_millis(10),
- Duration::from_millis(10),
- Duration::from_millis(10),
- Duration::from_millis(100),
- Duration::from_millis(100),
- Duration::from_millis(100),
- Duration::from_millis(500),
- Duration::from_millis(1000),
-];
-
-struct Summary {
- latencies: Histogram<u64>,
- start: Instant,
- count_by_instance: [usize; 10],
-}
-
-#[tokio::main]
-async fn main() {
- tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::default()).unwrap();
-
- println!("REQUESTS={}", REQUESTS);
- println!("CONCURRENCY={}", CONCURRENCY);
- println!("ENDPOINT_CAPACITY={}", ENDPOINT_CAPACITY);
- print!("MAX_ENDPOINT_LATENCIES=[");
- for max in &MAX_ENDPOINT_LATENCIES {
- let l = max.as_secs() * 1_000 + u64::from(max.subsec_millis());
- print!("{}ms, ", l);
- }
- println!("]");
-
- let decay = Duration::from_secs(10);
- let d = gen_disco();
- let pe = lb::p2c::Balance::new(load::PeakEwmaDiscover::new(
- d,
- DEFAULT_RTT,
- decay,
- load::CompleteOnResponse::default(),
- ));
- run("P2C+PeakEWMA...", pe).await;
-
- let d = gen_disco();
- let ll = lb::p2c::Balance::new(load::PendingRequestsDiscover::new(
- d,
- load::CompleteOnResponse::default(),
- ));
- run("P2C+LeastLoaded...", ll).await;
-}
-
-type Error = Box<dyn std::error::Error + Send + Sync>;
-
-type Key = usize;
-
-pin_project! {
- struct Disco<S> {
- services: Vec<(Key, S)>
- }
-}
-
-impl<S> Disco<S> {
- fn new(services: Vec<(Key, S)>) -> Self {
- Self { services }
- }
-}
-
-impl<S> Stream for Disco<S>
-where
- S: Service<Req, Response = Rsp, Error = Error>,
-{
- type Item = Result<Change<Key, S>, Error>;
-
- fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- match self.project().services.pop() {
- Some((k, service)) => Poll::Ready(Some(Ok(Change::Insert(k, service)))),
- None => {
- // there may be more later
- Poll::Pending
- }
- }
- }
-}
-
-fn gen_disco() -> impl Discover<
- Key = Key,
- Error = Error,
- Service = ConcurrencyLimit<
- impl Service<Req, Response = Rsp, Error = Error, Future = impl Send> + Send,
- >,
-> + Send {
- Disco::new(
- MAX_ENDPOINT_LATENCIES
- .iter()
- .enumerate()
- .map(|(instance, latency)| {
- let svc = tower::service_fn(move |_| {
- let start = Instant::now();
-
- let maxms = u64::from(latency.subsec_millis())
- .saturating_add(latency.as_secs().saturating_mul(1_000));
- let latency = Duration::from_millis(rand::thread_rng().gen_range(0..maxms));
-
- async move {
- time::sleep_until(start + latency).await;
- let latency = start.elapsed();
- Ok(Rsp { latency, instance })
- }
- });
-
- (instance, ConcurrencyLimit::new(svc, ENDPOINT_CAPACITY))
- })
- .collect(),
- )
-}
-
-async fn run<D>(name: &'static str, lb: lb::p2c::Balance<D, Req>)
-where
- D: Discover + Unpin + Send + 'static,
- D::Error: Into<Error>,
- D::Key: Clone + Send + Hash,
- D::Service: Service<Req, Response = Rsp> + load::Load + Send,
- <D::Service as Service<Req>>::Error: Into<Error>,
- <D::Service as Service<Req>>::Future: Send,
- <D::Service as load::Load>::Metric: std::fmt::Debug,
-{
- println!("{}", name);
-
- let requests = stream::repeat(Req).take(REQUESTS);
- let service = ConcurrencyLimit::new(lb, CONCURRENCY);
- let responses = service.call_all(requests).unordered();
-
- compute_histo(responses).await.unwrap().report();
-}
-
-async fn compute_histo<S>(mut times: S) -> Result<Summary, Error>
-where
- S: TryStream<Ok = Rsp, Error = Error> + 'static + Unpin,
-{
- let mut summary = Summary::new();
- while let Some(rsp) = times.try_next().await? {
- summary.count(rsp);
- }
- Ok(summary)
-}
-
-impl Summary {
- fn new() -> Self {
- Self {
- // The max delay is 2000ms. At 3 significant figures.
- latencies: Histogram::<u64>::new_with_max(3_000, 3).unwrap(),
- start: Instant::now(),
- count_by_instance: [0; 10],
- }
- }
-
- fn count(&mut self, rsp: Rsp) {
- let ms = rsp.latency.as_secs() * 1_000;
- let ms = ms + u64::from(rsp.latency.subsec_nanos()) / 1_000 / 1_000;
- self.latencies += ms;
- self.count_by_instance[rsp.instance] += 1;
- }
-
- fn report(&self) {
- let mut total = 0;
- for c in &self.count_by_instance {
- total += c;
- }
- for (i, c) in self.count_by_instance.iter().enumerate() {
- let p = *c as f64 / total as f64 * 100.0;
- println!(" [{:02}] {:>5.01}%", i, p);
- }
-
- println!(" wall {:4}s", self.start.elapsed().as_secs());
-
- if self.latencies.len() < 2 {
- return;
- }
- println!(" p50 {:4}ms", self.latencies.value_at_quantile(0.5));
-
- if self.latencies.len() < 10 {
- return;
- }
- println!(" p90 {:4}ms", self.latencies.value_at_quantile(0.9));
-
- if self.latencies.len() < 50 {
- return;
- }
- println!(" p95 {:4}ms", self.latencies.value_at_quantile(0.95));
-
- if self.latencies.len() < 100 {
- return;
- }
- println!(" p99 {:4}ms", self.latencies.value_at_quantile(0.99));
-
- if self.latencies.len() < 1000 {
- return;
- }
- println!(" p999 {:4}ms", self.latencies.value_at_quantile(0.999));
- }
-}
-
-#[derive(Debug, Clone)]
-struct Req;
-
-#[derive(Debug)]
-struct Rsp {
- latency: Duration,
- instance: usize,
-}
diff --git a/vendor/tower/src/balance/error.rs b/vendor/tower/src/balance/error.rs
deleted file mode 100644
index 4d47630c..00000000
--- a/vendor/tower/src/balance/error.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-//! Error types for the [`tower::balance`] middleware.
-//!
-//! [`tower::balance`]: crate::balance
-
-use std::fmt;
-
-/// The balancer's endpoint discovery stream failed.
-#[derive(Debug)]
-pub struct Discover(pub(crate) crate::BoxError);
-
-impl fmt::Display for Discover {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "load balancer discovery error: {}", self.0)
- }
-}
-
-impl std::error::Error for Discover {
- fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
- Some(&*self.0)
- }
-}
diff --git a/vendor/tower/src/balance/mod.rs b/vendor/tower/src/balance/mod.rs
deleted file mode 100644
index 7b4fc9c0..00000000
--- a/vendor/tower/src/balance/mod.rs
+++ /dev/null
@@ -1,50 +0,0 @@
-//! Middleware that allows balancing load among multiple services.
-//!
-//! In larger systems, multiple endpoints are often available for a given service. As load
-//! increases, you want to ensure that that load is spread evenly across the available services.
-//! Otherwise, clients could see spikes in latency if their request goes to a particularly loaded
-//! service, even when spare capacity is available to handle that request elsewhere.
-//!
-//! This module provides the [`p2c`] middleware, which implements the "[Power of
-//! Two Random Choices]" algorithm. This is a simple but robust technique for
-//! spreading load across services with only inexact load measurements. Use this
-//! if the set of available services is not within your control, and you simply
-//! want to spread load among that set of services.
-//!
-//! [Power of Two Random Choices]: http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
-//!
-//! # Examples
-//!
-//! ```rust
-//! # #[cfg(feature = "util")]
-//! # #[cfg(feature = "load")]
-//! # fn warnings_are_errors() {
-//! use tower::balance::p2c::Balance;
-//! use tower::load::Load;
-//! use tower::{Service, ServiceExt};
-//! use futures_util::pin_mut;
-//! # use futures_core::Stream;
-//! # use futures_util::StreamExt;
-//!
-//! async fn spread<Req, S: Service<Req> + Load>(svc1: S, svc2: S, reqs: impl Stream<Item = Req>)
-//! where
-//! S::Error: Into<tower::BoxError>,
-//! # // this bound is pretty unfortunate, and the compiler does _not_ help
-//! S::Metric: std::fmt::Debug,
-//! {
-//! // Spread load evenly across the two services
-//! let p2c = Balance::new(tower::discover::ServiceList::new(vec![svc1, svc2]));
-//!
-//! // Issue all the requests that come in.
-//! // Some will go to svc1, some will go to svc2.
-//! pin_mut!(reqs);
-//! let mut responses = p2c.call_all(reqs);
-//! while let Some(rsp) = responses.next().await {
-//! // ...
-//! }
-//! }
-//! # }
-//! ```
-
-pub mod error;
-pub mod p2c;
diff --git a/vendor/tower/src/balance/p2c/layer.rs b/vendor/tower/src/balance/p2c/layer.rs
deleted file mode 100644
index 6a2032c0..00000000
--- a/vendor/tower/src/balance/p2c/layer.rs
+++ /dev/null
@@ -1,60 +0,0 @@
-use super::MakeBalance;
-use std::{fmt, marker::PhantomData};
-use tower_layer::Layer;
-
-/// Construct load balancers ([`Balance`]) over dynamic service sets ([`Discover`]) produced by the
-/// "inner" service in response to requests coming from the "outer" service.
-///
-/// This construction may seem a little odd at first glance. This is not a layer that takes
-/// requests and produces responses in the traditional sense. Instead, it is more like
-/// [`MakeService`] in that it takes service _descriptors_ (see `Target` on [`MakeService`])
-/// and produces _services_. Since [`Balance`] spreads requests across a _set_ of services,
-/// the inner service should produce a [`Discover`], not just a single
-/// [`Service`], given a service descriptor.
-///
-/// See the [module-level documentation](crate::balance) for details on load balancing.
-///
-/// [`Balance`]: crate::balance::p2c::Balance
-/// [`Discover`]: crate::discover::Discover
-/// [`MakeService`]: crate::MakeService
-/// [`Service`]: crate::Service
-pub struct MakeBalanceLayer<D, Req> {
- _marker: PhantomData<fn(D, Req)>,
-}
-
-impl<D, Req> MakeBalanceLayer<D, Req> {
- /// Build balancers using operating system entropy.
- pub const fn new() -> Self {
- Self {
- _marker: PhantomData,
- }
- }
-}
-
-impl<D, Req> Default for MakeBalanceLayer<D, Req> {
- fn default() -> Self {
- Self::new()
- }
-}
-
-impl<D, Req> Clone for MakeBalanceLayer<D, Req> {
- fn clone(&self) -> Self {
- Self {
- _marker: PhantomData,
- }
- }
-}
-
-impl<S, Req> Layer<S> for MakeBalanceLayer<S, Req> {
- type Service = MakeBalance<S, Req>;
-
- fn layer(&self, make_discover: S) -> Self::Service {
- MakeBalance::new(make_discover)
- }
-}
-
-impl<D, Req> fmt::Debug for MakeBalanceLayer<D, Req> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("MakeBalanceLayer").finish()
- }
-}
diff --git a/vendor/tower/src/balance/p2c/make.rs b/vendor/tower/src/balance/p2c/make.rs
deleted file mode 100644
index 45a35d1b..00000000
--- a/vendor/tower/src/balance/p2c/make.rs
+++ /dev/null
@@ -1,125 +0,0 @@
-use super::Balance;
-use crate::discover::Discover;
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::hash::Hash;
-use std::marker::PhantomData;
-use std::{
- fmt,
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-/// Constructs load balancers over dynamic service sets produced by a wrapped "inner" service.
-///
-/// This is effectively an implementation of [`MakeService`] except that it forwards the service
-/// descriptors (`Target`) to an inner service (`S`), and expects that service to produce a
-/// service set in the form of a [`Discover`]. It then wraps the service set in a [`Balance`]
-/// before returning it as the "made" service.
-///
-/// See the [module-level documentation](crate::balance) for details on load balancing.
-///
-/// [`MakeService`]: crate::MakeService
-/// [`Discover`]: crate::discover::Discover
-/// [`Balance`]: crate::balance::p2c::Balance
-pub struct MakeBalance<S, Req> {
- inner: S,
- _marker: PhantomData<fn(Req)>,
-}
-
-pin_project! {
- /// A [`Balance`] in the making.
- ///
- /// [`Balance`]: crate::balance::p2c::Balance
- pub struct MakeFuture<F, Req> {
- #[pin]
- inner: F,
- _marker: PhantomData<fn(Req)>,
- }
-}
-
-impl<S, Req> MakeBalance<S, Req> {
- /// Build balancers using operating system entropy.
- pub const fn new(make_discover: S) -> Self {
- Self {
- inner: make_discover,
- _marker: PhantomData,
- }
- }
-}
-
-impl<S, Req> Clone for MakeBalance<S, Req>
-where
- S: Clone,
-{
- fn clone(&self) -> Self {
- Self {
- inner: self.inner.clone(),
- _marker: PhantomData,
- }
- }
-}
-
-impl<S, Target, Req> Service<Target> for MakeBalance<S, Req>
-where
- S: Service<Target>,
- S::Response: Discover,
- <S::Response as Discover>::Key: Hash,
- <S::Response as Discover>::Service: Service<Req>,
- <<S::Response as Discover>::Service as Service<Req>>::Error: Into<crate::BoxError>,
-{
- type Response = Balance<S::Response, Req>;
- type Error = S::Error;
- type Future = MakeFuture<S::Future, Req>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx)
- }
-
- fn call(&mut self, target: Target) -> Self::Future {
- MakeFuture {
- inner: self.inner.call(target),
- _marker: PhantomData,
- }
- }
-}
-
-impl<S, Req> fmt::Debug for MakeBalance<S, Req>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let Self { inner, _marker } = self;
- f.debug_struct("MakeBalance").field("inner", inner).finish()
- }
-}
-
-impl<F, T, E, Req> Future for MakeFuture<F, Req>
-where
- F: Future<Output = Result<T, E>>,
- T: Discover,
- <T as Discover>::Key: Hash,
- <T as Discover>::Service: Service<Req>,
- <<T as Discover>::Service as Service<Req>>::Error: Into<crate::BoxError>,
-{
- type Output = Result<Balance<T, Req>, E>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
- let inner = ready!(this.inner.poll(cx))?;
- let svc = Balance::new(inner);
- Poll::Ready(Ok(svc))
- }
-}
-
-impl<F, Req> fmt::Debug for MakeFuture<F, Req>
-where
- F: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let Self { inner, _marker } = self;
- f.debug_struct("MakeFuture").field("inner", inner).finish()
- }
-}
diff --git a/vendor/tower/src/balance/p2c/mod.rs b/vendor/tower/src/balance/p2c/mod.rs
deleted file mode 100644
index 8925e41b..00000000
--- a/vendor/tower/src/balance/p2c/mod.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-//! This module implements the "[Power of Two Random Choices]" load balancing algorithm.
-//!
-//! It is a simple but robust technique for spreading load across services with only inexact load
-//! measurements. As its name implies, whenever a request comes in, it samples two ready services
-//! at random, and issues the request to whichever service is less loaded. How loaded a service is
-//! is determined by the return value of [`Load`](crate::load::Load).
-//!
-//! As described in the [Finagle Guide][finagle]:
-//!
-//! > The algorithm randomly picks two services from the set of ready endpoints and
-//! > selects the least loaded of the two. By repeatedly using this strategy, we can
-//! > expect a manageable upper bound on the maximum load of any server.
-//! >
-//! > The maximum load variance between any two servers is bound by `ln(ln(n))` where
-//! > `n` is the number of servers in the cluster.
-//!
-//! The balance service and layer implementations rely on _service discovery_ to provide the
-//! underlying set of services to balance requests across. This happens through the
-//! [`Discover`](crate::discover::Discover) trait, which is essentially a [`Stream`] that indicates
-//! when services become available or go away. If you have a fixed set of services, consider using
-//! [`ServiceList`](crate::discover::ServiceList).
-//!
-//! Since the load balancer needs to perform _random_ choices, the constructors in this module
-//! usually come in two forms: one that uses randomness provided by the operating system, and one
-//! that lets you specify the random seed to use. Usually the former is what you'll want, though
-//! the latter may come in handy for reproducibility or to reduce reliance on the operating system.
-//!
-//! [Power of Two Random Choices]: http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
-//! [finagle]: https://twitter.github.io/finagle/guide/Clients.html#power-of-two-choices-p2c-least-loaded
-//! [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html
-
-mod layer;
-mod make;
-mod service;
-
-#[cfg(test)]
-mod test;
-
-pub use layer::MakeBalanceLayer;
-pub use make::{MakeBalance, MakeFuture};
-pub use service::Balance;
diff --git a/vendor/tower/src/balance/p2c/service.rs b/vendor/tower/src/balance/p2c/service.rs
deleted file mode 100644
index 30572fc8..00000000
--- a/vendor/tower/src/balance/p2c/service.rs
+++ /dev/null
@@ -1,259 +0,0 @@
-use super::super::error;
-use crate::discover::{Change, Discover};
-use crate::load::Load;
-use crate::ready_cache::{error::Failed, ReadyCache};
-use crate::util::rng::{sample_floyd2, HasherRng, Rng};
-use futures_core::ready;
-use futures_util::future::{self, TryFutureExt};
-use std::hash::Hash;
-use std::marker::PhantomData;
-use std::{
- fmt,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-use tracing::{debug, trace};
-
-/// Efficiently distributes requests across an arbitrary number of services.
-///
-/// See the [module-level documentation](..) for details.
-///
-/// Note that [`Balance`] requires that the [`Discover`] you use is [`Unpin`] in order to implement
-/// [`Service`]. This is because it needs to be accessed from [`Service::poll_ready`], which takes
-/// `&mut self`. You can achieve this easily by wrapping your [`Discover`] in [`Box::pin`] before you
-/// construct the [`Balance`] instance. For more details, see [#319].
-///
-/// [`Box::pin`]: std::boxed::Box::pin()
-/// [#319]: https://github.com/tower-rs/tower/issues/319
-pub struct Balance<D, Req>
-where
- D: Discover,
- D::Key: Hash,
-{
- discover: D,
-
- services: ReadyCache<D::Key, D::Service, Req>,
- ready_index: Option<usize>,
-
- rng: Box<dyn Rng + Send + Sync>,
-
- _req: PhantomData<Req>,
-}
-
-impl<D: Discover, Req> fmt::Debug for Balance<D, Req>
-where
- D: fmt::Debug,
- D::Key: Hash + fmt::Debug,
- D::Service: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Balance")
- .field("discover", &self.discover)
- .field("services", &self.services)
- .finish()
- }
-}
-
-impl<D, Req> Balance<D, Req>
-where
- D: Discover,
- D::Key: Hash,
- D::Service: Service<Req>,
- <D::Service as Service<Req>>::Error: Into<crate::BoxError>,
-{
- /// Constructs a load balancer that uses operating system entropy.
- pub fn new(discover: D) -> Self {
- Self::from_rng(discover, HasherRng::default())
- }
-
- /// Constructs a load balancer seeded with the provided random number generator.
- pub fn from_rng<R: Rng + Send + Sync + 'static>(discover: D, rng: R) -> Self {
- let rng = Box::new(rng);
- Self {
- rng,
- discover,
- services: ReadyCache::default(),
- ready_index: None,
-
- _req: PhantomData,
- }
- }
-
- /// Returns the number of endpoints currently tracked by the balancer.
- pub fn len(&self) -> usize {
- self.services.len()
- }
-
- /// Returns whether or not the balancer is empty.
- pub fn is_empty(&self) -> bool {
- self.services.is_empty()
- }
-}
-
-impl<D, Req> Balance<D, Req>
-where
- D: Discover + Unpin,
- D::Key: Hash + Clone,
- D::Error: Into<crate::BoxError>,
- D::Service: Service<Req> + Load,
- <D::Service as Load>::Metric: std::fmt::Debug,
- <D::Service as Service<Req>>::Error: Into<crate::BoxError>,
-{
- /// Polls `discover` for updates, adding new items to `not_ready`.
- ///
- /// Removals may alter the order of either `ready` or `not_ready`.
- fn update_pending_from_discover(
- &mut self,
- cx: &mut Context<'_>,
- ) -> Poll<Option<Result<(), error::Discover>>> {
- debug!("updating from discover");
- loop {
- match ready!(Pin::new(&mut self.discover).poll_discover(cx))
- .transpose()
- .map_err(|e| error::Discover(e.into()))?
- {
- None => return Poll::Ready(None),
- Some(Change::Remove(key)) => {
- trace!("remove");
- self.services.evict(&key);
- }
- Some(Change::Insert(key, svc)) => {
- trace!("insert");
- // If this service already existed in the set, it will be
- // replaced as the new one becomes ready.
- self.services.push(key, svc);
- }
- }
- }
- }
-
- fn promote_pending_to_ready(&mut self, cx: &mut Context<'_>) {
- loop {
- match self.services.poll_pending(cx) {
- Poll::Ready(Ok(())) => {
- // There are no remaining pending services.
- debug_assert_eq!(self.services.pending_len(), 0);
- break;
- }
- Poll::Pending => {
- // None of the pending services are ready.
- debug_assert!(self.services.pending_len() > 0);
- break;
- }
- Poll::Ready(Err(error)) => {
- // An individual service was lost; continue processing
- // pending services.
- debug!(%error, "dropping failed endpoint");
- }
- }
- }
- trace!(
- ready = %self.services.ready_len(),
- pending = %self.services.pending_len(),
- "poll_unready"
- );
- }
-
- /// Performs P2C on inner services to find a suitable endpoint.
- fn p2c_ready_index(&mut self) -> Option<usize> {
- match self.services.ready_len() {
- 0 => None,
- 1 => Some(0),
- len => {
- // Get two distinct random indexes (in a random order) and
- // compare the loads of the service at each index.
- let [aidx, bidx] = sample_floyd2(&mut self.rng, len as u64);
- debug_assert_ne!(aidx, bidx, "random indices must be distinct");
-
- let aload = self.ready_index_load(aidx as usize);
- let bload = self.ready_index_load(bidx as usize);
- let chosen = if aload <= bload { aidx } else { bidx };
-
- trace!(
- a.index = aidx,
- a.load = ?aload,
- b.index = bidx,
- b.load = ?bload,
- chosen = if chosen == aidx { "a" } else { "b" },
- "p2c",
- );
- Some(chosen as usize)
- }
- }
- }
-
- /// Accesses a ready endpoint by index and returns its current load.
- fn ready_index_load(&self, index: usize) -> <D::Service as Load>::Metric {
- let (_, svc) = self.services.get_ready_index(index).expect("invalid index");
- svc.load()
- }
-}
-
-impl<D, Req> Service<Req> for Balance<D, Req>
-where
- D: Discover + Unpin,
- D::Key: Hash + Clone,
- D::Error: Into<crate::BoxError>,
- D::Service: Service<Req> + Load,
- <D::Service as Load>::Metric: std::fmt::Debug,
- <D::Service as Service<Req>>::Error: Into<crate::BoxError>,
-{
- type Response = <D::Service as Service<Req>>::Response;
- type Error = crate::BoxError;
- type Future = future::MapErr<
- <D::Service as Service<Req>>::Future,
- fn(<D::Service as Service<Req>>::Error) -> crate::BoxError,
- >;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // `ready_index` may have already been set by a prior invocation. These
- // updates cannot disturb the order of existing ready services.
- let _ = self.update_pending_from_discover(cx)?;
- self.promote_pending_to_ready(cx);
-
- loop {
- // If a service has already been selected, ensure that it is ready.
- // This ensures that the underlying service is ready immediately
- // before a request is dispatched to it (i.e. in the same task
- // invocation). If, e.g., a failure detector has changed the state
- // of the service, it may be evicted from the ready set so that
- // another service can be selected.
- if let Some(index) = self.ready_index.take() {
- match self.services.check_ready_index(cx, index) {
- Ok(true) => {
- // The service remains ready.
- self.ready_index = Some(index);
- return Poll::Ready(Ok(()));
- }
- Ok(false) => {
- // The service is no longer ready. Try to find a new one.
- trace!("ready service became unavailable");
- }
- Err(Failed(_, error)) => {
- // The ready endpoint failed, so log the error and try
- // to find a new one.
- debug!(%error, "endpoint failed");
- }
- }
- }
-
- // Select a new service by comparing two at random and using the
- // lesser-loaded service.
- self.ready_index = self.p2c_ready_index();
- if self.ready_index.is_none() {
- debug_assert_eq!(self.services.ready_len(), 0);
- // We have previously registered interest in updates from
- // discover and pending services.
- return Poll::Pending;
- }
- }
- }
-
- fn call(&mut self, request: Req) -> Self::Future {
- let index = self.ready_index.take().expect("called before ready");
- self.services
- .call_ready_index(index, request)
- .map_err(Into::into)
- }
-}
diff --git a/vendor/tower/src/balance/p2c/test.rs b/vendor/tower/src/balance/p2c/test.rs
deleted file mode 100644
index 2370860a..00000000
--- a/vendor/tower/src/balance/p2c/test.rs
+++ /dev/null
@@ -1,125 +0,0 @@
-use crate::discover::ServiceList;
-use crate::load;
-use futures_util::pin_mut;
-use std::task::Poll;
-use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
-use tower_test::{assert_request_eq, mock};
-
-use super::*;
-
-#[tokio::test]
-async fn empty() {
- let empty: Vec<load::Constant<mock::Mock<(), &'static str>, usize>> = vec![];
- let disco = ServiceList::new(empty);
- let mut svc = mock::Spawn::new(Balance::new(disco));
- assert_pending!(svc.poll_ready());
-}
-
-#[tokio::test]
-async fn single_endpoint() {
- let (mut svc, mut handle) = mock::spawn_with(|s| {
- let mock = load::Constant::new(s, 0);
- let disco = ServiceList::new(vec![mock].into_iter());
- Balance::new(disco)
- });
-
- handle.allow(0);
- assert_pending!(svc.poll_ready());
- assert_eq!(
- svc.get_ref().len(),
- 1,
- "balancer must have discovered endpoint"
- );
-
- handle.allow(1);
- assert_ready_ok!(svc.poll_ready());
-
- let mut fut = task::spawn(svc.call(()));
-
- assert_request_eq!(handle, ()).send_response(1);
-
- assert_eq!(assert_ready_ok!(fut.poll()), 1);
- handle.allow(1);
- assert_ready_ok!(svc.poll_ready());
-
- handle.send_error("endpoint lost");
- assert_pending!(svc.poll_ready());
- assert!(
- svc.get_ref().is_empty(),
- "balancer must drop failed endpoints"
- );
-}
-
-#[tokio::test]
-async fn two_endpoints_with_equal_load() {
- let (mock_a, handle_a) = mock::pair();
- let (mock_b, handle_b) = mock::pair();
- let mock_a = load::Constant::new(mock_a, 1);
- let mock_b = load::Constant::new(mock_b, 1);
-
- pin_mut!(handle_a);
- pin_mut!(handle_b);
-
- let disco = ServiceList::new(vec![mock_a, mock_b].into_iter());
- let mut svc = mock::Spawn::new(Balance::new(disco));
-
- handle_a.allow(0);
- handle_b.allow(0);
- assert_pending!(svc.poll_ready());
- assert_eq!(
- svc.get_ref().len(),
- 2,
- "balancer must have discovered both endpoints"
- );
-
- handle_a.allow(1);
- handle_b.allow(0);
- assert_ready_ok!(
- svc.poll_ready(),
- "must be ready when one of two services is ready"
- );
- {
- let mut fut = task::spawn(svc.call(()));
- assert_request_eq!(handle_a, ()).send_response("a");
- assert_eq!(assert_ready_ok!(fut.poll()), "a");
- }
-
- handle_a.allow(0);
- handle_b.allow(1);
- assert_ready_ok!(
- svc.poll_ready(),
- "must be ready when both endpoints are ready"
- );
- {
- let mut fut = task::spawn(svc.call(()));
- assert_request_eq!(handle_b, ()).send_response("b");
- assert_eq!(assert_ready_ok!(fut.poll()), "b");
- }
-
- handle_a.allow(1);
- handle_b.allow(1);
- for _ in 0..2 {
- assert_ready_ok!(
- svc.poll_ready(),
- "must be ready when both endpoints are ready"
- );
- let mut fut = task::spawn(svc.call(()));
-
- for (ref mut h, c) in &mut [(&mut handle_a, "a"), (&mut handle_b, "b")] {
- if let Poll::Ready(Some((_, tx))) = h.as_mut().poll_request() {
- tracing::info!("using {}", c);
- tx.send_response(c);
- h.allow(0);
- }
- }
- assert_ready_ok!(fut.poll());
- }
-
- handle_a.send_error("endpoint lost");
- assert_pending!(svc.poll_ready());
- assert_eq!(
- svc.get_ref().len(),
- 1,
- "balancer must drop failed endpoints",
- );
-}
diff --git a/vendor/tower/src/buffer/error.rs b/vendor/tower/src/buffer/error.rs
deleted file mode 100644
index f046cbca..00000000
--- a/vendor/tower/src/buffer/error.rs
+++ /dev/null
@@ -1,68 +0,0 @@
-//! Error types for the `Buffer` middleware.
-
-use crate::BoxError;
-use std::{fmt, sync::Arc};
-
-/// An error produced by a [`Service`] wrapped by a [`Buffer`]
-///
-/// [`Service`]: crate::Service
-/// [`Buffer`]: crate::buffer::Buffer
-#[derive(Debug)]
-pub struct ServiceError {
- inner: Arc<BoxError>,
-}
-
-/// An error produced when the a buffer's worker closes unexpectedly.
-pub struct Closed {
- _p: (),
-}
-
-// ===== impl ServiceError =====
-
-impl ServiceError {
- pub(crate) fn new(inner: BoxError) -> ServiceError {
- let inner = Arc::new(inner);
- ServiceError { inner }
- }
-
- // Private to avoid exposing `Clone` trait as part of the public API
- pub(crate) fn clone(&self) -> ServiceError {
- ServiceError {
- inner: self.inner.clone(),
- }
- }
-}
-
-impl fmt::Display for ServiceError {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- write!(fmt, "buffered service failed: {}", self.inner)
- }
-}
-
-impl std::error::Error for ServiceError {
- fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
- Some(&**self.inner)
- }
-}
-
-// ===== impl Closed =====
-
-impl Closed {
- pub(crate) fn new() -> Self {
- Closed { _p: () }
- }
-}
-
-impl fmt::Debug for Closed {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_tuple("Closed").finish()
- }
-}
-
-impl fmt::Display for Closed {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.write_str("buffer's worker closed unexpectedly")
- }
-}
-
-impl std::error::Error for Closed {}
diff --git a/vendor/tower/src/buffer/future.rs b/vendor/tower/src/buffer/future.rs
deleted file mode 100644
index 41178900..00000000
--- a/vendor/tower/src/buffer/future.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-//! Future types for the [`Buffer`] middleware.
-//!
-//! [`Buffer`]: crate::buffer::Buffer
-
-use super::{error::Closed, message};
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-pin_project! {
- /// Future that completes when the buffered service eventually services the submitted request.
- #[derive(Debug)]
- pub struct ResponseFuture<T> {
- #[pin]
- state: ResponseState<T>,
- }
-}
-
-pin_project! {
- #[project = ResponseStateProj]
- #[derive(Debug)]
- enum ResponseState<T> {
- Failed {
- error: Option<crate::BoxError>,
- },
- Rx {
- #[pin]
- rx: message::Rx<T>,
- },
- Poll {
- #[pin]
- fut: T,
- },
- }
-}
-
-impl<T> ResponseFuture<T> {
- pub(crate) fn new(rx: message::Rx<T>) -> Self {
- ResponseFuture {
- state: ResponseState::Rx { rx },
- }
- }
-
- pub(crate) fn failed(err: crate::BoxError) -> Self {
- ResponseFuture {
- state: ResponseState::Failed { error: Some(err) },
- }
- }
-}
-
-impl<F, T, E> Future for ResponseFuture<F>
-where
- F: Future<Output = Result<T, E>>,
- E: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
-
- loop {
- match this.state.as_mut().project() {
- ResponseStateProj::Failed { error } => {
- return Poll::Ready(Err(error.take().expect("polled after error")));
- }
- ResponseStateProj::Rx { rx } => match ready!(rx.poll(cx)) {
- Ok(Ok(fut)) => this.state.set(ResponseState::Poll { fut }),
- Ok(Err(e)) => return Poll::Ready(Err(e.into())),
- Err(_) => return Poll::Ready(Err(Closed::new().into())),
- },
- ResponseStateProj::Poll { fut } => return fut.poll(cx).map_err(Into::into),
- }
- }
- }
-}
diff --git a/vendor/tower/src/buffer/layer.rs b/vendor/tower/src/buffer/layer.rs
deleted file mode 100644
index 3fc26ab5..00000000
--- a/vendor/tower/src/buffer/layer.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-use super::service::Buffer;
-use std::{fmt, marker::PhantomData};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Adds an mpsc buffer in front of an inner service.
-///
-/// The default Tokio executor is used to run the given service,
-/// which means that this layer can only be used on the Tokio runtime.
-///
-/// See the module documentation for more details.
-pub struct BufferLayer<Request> {
- bound: usize,
- _p: PhantomData<fn(Request)>,
-}
-
-impl<Request> BufferLayer<Request> {
- /// Creates a new [`BufferLayer`] with the provided `bound`.
- ///
- /// `bound` gives the maximal number of requests that can be queued for the service before
- /// backpressure is applied to callers.
- ///
- /// # A note on choosing a `bound`
- ///
- /// When [`Buffer`]'s implementation of [`poll_ready`] returns [`Poll::Ready`], it reserves a
- /// slot in the channel for the forthcoming [`call`]. However, if this call doesn't arrive,
- /// this reserved slot may be held up for a long time. As a result, it's advisable to set
- /// `bound` to be at least the maximum number of concurrent requests the [`Buffer`] will see.
- /// If you do not, all the slots in the buffer may be held up by futures that have just called
- /// [`poll_ready`] but will not issue a [`call`], which prevents other senders from issuing new
- /// requests.
- ///
- /// [`Poll::Ready`]: std::task::Poll::Ready
- /// [`call`]: crate::Service::call
- /// [`poll_ready`]: crate::Service::poll_ready
- pub const fn new(bound: usize) -> Self {
- BufferLayer {
- bound,
- _p: PhantomData,
- }
- }
-}
-
-impl<S, Request> Layer<S> for BufferLayer<Request>
-where
- S: Service<Request> + Send + 'static,
- S::Future: Send,
- S::Error: Into<crate::BoxError> + Send + Sync,
- Request: Send + 'static,
-{
- type Service = Buffer<Request, S::Future>;
-
- fn layer(&self, service: S) -> Self::Service {
- Buffer::new(service, self.bound)
- }
-}
-
-impl<Request> fmt::Debug for BufferLayer<Request> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("BufferLayer")
- .field("bound", &self.bound)
- .finish()
- }
-}
-
-impl<Request> Clone for BufferLayer<Request> {
- fn clone(&self) -> Self {
- Self {
- bound: self.bound,
- _p: PhantomData,
- }
- }
-}
-
-impl<Request> Copy for BufferLayer<Request> {}
diff --git a/vendor/tower/src/buffer/message.rs b/vendor/tower/src/buffer/message.rs
deleted file mode 100644
index 6d13aa12..00000000
--- a/vendor/tower/src/buffer/message.rs
+++ /dev/null
@@ -1,16 +0,0 @@
-use super::error::ServiceError;
-use tokio::sync::oneshot;
-
-/// Message sent over buffer
-#[derive(Debug)]
-pub(crate) struct Message<Request, Fut> {
- pub(crate) request: Request,
- pub(crate) tx: Tx<Fut>,
- pub(crate) span: tracing::Span,
-}
-
-/// Response sender
-pub(crate) type Tx<Fut> = oneshot::Sender<Result<Fut, ServiceError>>;
-
-/// Response receiver
-pub(crate) type Rx<Fut> = oneshot::Receiver<Result<Fut, ServiceError>>;
diff --git a/vendor/tower/src/buffer/mod.rs b/vendor/tower/src/buffer/mod.rs
deleted file mode 100644
index 923b4420..00000000
--- a/vendor/tower/src/buffer/mod.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-//! Middleware that provides a buffered mpsc channel to a service.
-//!
-//! Sometimes you want to give out multiple handles to a single service, and allow each handle to
-//! enqueue requests. That is, you want a [`Service`] to be [`Clone`]. This module allows you to do
-//! that by placing the service behind a multi-producer, single-consumer buffering channel. Clients
-//! enqueue requests by sending on the channel from any of the handles ([`Buffer`]), and the single
-//! service running elsewhere (usually spawned) receives and services the requests one by one. Each
-//! request is enqueued alongside a response channel that allows the service to report the result
-//! of the request back to the caller.
-//!
-//! # Examples
-//!
-//! ```rust
-//! # #[cfg(feature = "util")]
-//! use tower::buffer::Buffer;
-//! # #[cfg(feature = "util")]
-//! use tower::{Service, ServiceExt};
-//! # #[cfg(feature = "util")]
-//! async fn mass_produce<S: Service<usize>>(svc: S)
-//! where
-//! S: 'static + Send,
-//! S::Error: Send + Sync + std::error::Error,
-//! S::Future: Send
-//! {
-//! let svc = Buffer::new(svc, 10 /* buffer length */);
-//! for _ in 0..10 {
-//! let mut svc = svc.clone();
-//! tokio::spawn(async move {
-//! for i in 0usize.. {
-//! svc.ready().await.expect("service crashed").call(i).await;
-//! }
-//! });
-//! }
-//! }
-//! ```
-//!
-//! [`Service`]: crate::Service
-
-pub mod error;
-pub mod future;
-mod layer;
-mod message;
-mod service;
-mod worker;
-
-pub use self::layer::BufferLayer;
-pub use self::service::Buffer;
diff --git a/vendor/tower/src/buffer/service.rs b/vendor/tower/src/buffer/service.rs
deleted file mode 100644
index 9493f107..00000000
--- a/vendor/tower/src/buffer/service.rs
+++ /dev/null
@@ -1,144 +0,0 @@
-use super::{
- future::ResponseFuture,
- message::Message,
- worker::{Handle, Worker},
-};
-
-use std::{
- future::Future,
- task::{Context, Poll},
-};
-use tokio::sync::{mpsc, oneshot};
-use tokio_util::sync::PollSender;
-use tower_service::Service;
-
-/// Adds an mpsc buffer in front of an inner service.
-///
-/// See the module documentation for more details.
-#[derive(Debug)]
-pub struct Buffer<Req, F> {
- tx: PollSender<Message<Req, F>>,
- handle: Handle,
-}
-
-impl<Req, F> Buffer<Req, F>
-where
- F: 'static,
-{
- /// Creates a new [`Buffer`] wrapping `service`.
- ///
- /// `bound` gives the maximal number of requests that can be queued for the service before
- /// backpressure is applied to callers.
- ///
- /// The default Tokio executor is used to run the given service, which means that this method
- /// must be called while on the Tokio runtime.
- ///
- /// # A note on choosing a `bound`
- ///
- /// When [`Buffer`]'s implementation of [`poll_ready`] returns [`Poll::Ready`], it reserves a
- /// slot in the channel for the forthcoming [`call`]. However, if this call doesn't arrive,
- /// this reserved slot may be held up for a long time. As a result, it's advisable to set
- /// `bound` to be at least the maximum number of concurrent requests the [`Buffer`] will see.
- /// If you do not, all the slots in the buffer may be held up by futures that have just called
- /// [`poll_ready`] but will not issue a [`call`], which prevents other senders from issuing new
- /// requests.
- ///
- /// [`Poll::Ready`]: std::task::Poll::Ready
- /// [`call`]: crate::Service::call
- /// [`poll_ready`]: crate::Service::poll_ready
- pub fn new<S>(service: S, bound: usize) -> Self
- where
- S: Service<Req, Future = F> + Send + 'static,
- F: Send,
- S::Error: Into<crate::BoxError> + Send + Sync,
- Req: Send + 'static,
- {
- let (service, worker) = Self::pair(service, bound);
- tokio::spawn(worker);
- service
- }
-
- /// Creates a new [`Buffer`] wrapping `service`, but returns the background worker.
- ///
- /// This is useful if you do not want to spawn directly onto the tokio runtime
- /// but instead want to use your own executor. This will return the [`Buffer`] and
- /// the background `Worker` that you can then spawn.
- pub fn pair<S>(service: S, bound: usize) -> (Self, Worker<S, Req>)
- where
- S: Service<Req, Future = F> + Send + 'static,
- F: Send,
- S::Error: Into<crate::BoxError> + Send + Sync,
- Req: Send + 'static,
- {
- let (tx, rx) = mpsc::channel(bound);
- let (handle, worker) = Worker::new(service, rx);
- let buffer = Self {
- tx: PollSender::new(tx),
- handle,
- };
- (buffer, worker)
- }
-
- fn get_worker_error(&self) -> crate::BoxError {
- self.handle.get_error_on_closed()
- }
-}
-
-impl<Req, Rsp, F, E> Service<Req> for Buffer<Req, F>
-where
- F: Future<Output = Result<Rsp, E>> + Send + 'static,
- E: Into<crate::BoxError>,
- Req: Send + 'static,
-{
- type Response = Rsp;
- type Error = crate::BoxError;
- type Future = ResponseFuture<F>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // First, check if the worker is still alive.
- if self.tx.is_closed() {
- // If the inner service has errored, then we error here.
- return Poll::Ready(Err(self.get_worker_error()));
- }
-
- // Poll the sender to acquire a permit.
- self.tx
- .poll_reserve(cx)
- .map_err(|_| self.get_worker_error())
- }
-
- fn call(&mut self, request: Req) -> Self::Future {
- tracing::trace!("sending request to buffer worker");
-
- // get the current Span so that we can explicitly propagate it to the worker
- // if we didn't do this, events on the worker related to this span wouldn't be counted
- // towards that span since the worker would have no way of entering it.
- let span = tracing::Span::current();
-
- // If we've made it here, then a channel permit has already been
- // acquired, so we can freely allocate a oneshot.
- let (tx, rx) = oneshot::channel();
-
- match self.tx.send_item(Message { request, span, tx }) {
- Ok(_) => ResponseFuture::new(rx),
- // If the channel is closed, propagate the error from the worker.
- Err(_) => {
- tracing::trace!("buffer channel closed");
- ResponseFuture::failed(self.get_worker_error())
- }
- }
- }
-}
-
-impl<Req, F> Clone for Buffer<Req, F>
-where
- Req: Send + 'static,
- F: Send + 'static,
-{
- fn clone(&self) -> Self {
- Self {
- handle: self.handle.clone(),
- tx: self.tx.clone(),
- }
- }
-}
diff --git a/vendor/tower/src/buffer/worker.rs b/vendor/tower/src/buffer/worker.rs
deleted file mode 100644
index 7f4416d6..00000000
--- a/vendor/tower/src/buffer/worker.rs
+++ /dev/null
@@ -1,227 +0,0 @@
-use super::{
- error::{Closed, ServiceError},
- message::Message,
-};
-use futures_core::ready;
-use std::sync::{Arc, Mutex};
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tokio::sync::mpsc;
-use tower_service::Service;
-
-pin_project_lite::pin_project! {
- /// Task that handles processing the buffer. This type should not be used
- /// directly, instead `Buffer` requires an `Executor` that can accept this task.
- ///
- /// The struct is `pub` in the private module and the type is *not* re-exported
- /// as part of the public API. This is the "sealed" pattern to include "private"
- /// types in public traits that are not meant for consumers of the library to
- /// implement (only call).
- #[derive(Debug)]
- pub struct Worker<T, Request>
- where
- T: Service<Request>,
- {
- current_message: Option<Message<Request, T::Future>>,
- rx: mpsc::Receiver<Message<Request, T::Future>>,
- service: T,
- finish: bool,
- failed: Option<ServiceError>,
- handle: Handle,
- }
-}
-
-/// Get the error out
-#[derive(Debug)]
-pub(crate) struct Handle {
- inner: Arc<Mutex<Option<ServiceError>>>,
-}
-
-impl<T, Request> Worker<T, Request>
-where
- T: Service<Request>,
- T::Error: Into<crate::BoxError>,
-{
- pub(crate) fn new(
- service: T,
- rx: mpsc::Receiver<Message<Request, T::Future>>,
- ) -> (Handle, Worker<T, Request>) {
- let handle = Handle {
- inner: Arc::new(Mutex::new(None)),
- };
-
- let worker = Worker {
- current_message: None,
- finish: false,
- failed: None,
- rx,
- service,
- handle: handle.clone(),
- };
-
- (handle, worker)
- }
-
- /// Return the next queued Message that hasn't been canceled.
- ///
- /// If a `Message` is returned, the `bool` is true if this is the first time we received this
- /// message, and false otherwise (i.e., we tried to forward it to the backing service before).
- fn poll_next_msg(
- &mut self,
- cx: &mut Context<'_>,
- ) -> Poll<Option<(Message<Request, T::Future>, bool)>> {
- if self.finish {
- // We've already received None and are shutting down
- return Poll::Ready(None);
- }
-
- tracing::trace!("worker polling for next message");
- if let Some(msg) = self.current_message.take() {
- // If the oneshot sender is closed, then the receiver is dropped,
- // and nobody cares about the response. If this is the case, we
- // should continue to the next request.
- if !msg.tx.is_closed() {
- tracing::trace!("resuming buffered request");
- return Poll::Ready(Some((msg, false)));
- }
-
- tracing::trace!("dropping cancelled buffered request");
- }
-
- // Get the next request
- while let Some(msg) = ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
- if !msg.tx.is_closed() {
- tracing::trace!("processing new request");
- return Poll::Ready(Some((msg, true)));
- }
- // Otherwise, request is canceled, so pop the next one.
- tracing::trace!("dropping cancelled request");
- }
-
- Poll::Ready(None)
- }
-
- fn failed(&mut self, error: crate::BoxError) {
- // The underlying service failed when we called `poll_ready` on it with the given `error`. We
- // need to communicate this to all the `Buffer` handles. To do so, we wrap up the error in
- // an `Arc`, send that `Arc<E>` to all pending requests, and store it so that subsequent
- // requests will also fail with the same error.
-
- // Note that we need to handle the case where some handle is concurrently trying to send us
- // a request. We need to make sure that *either* the send of the request fails *or* it
- // receives an error on the `oneshot` it constructed. Specifically, we want to avoid the
- // case where we send errors to all outstanding requests, and *then* the caller sends its
- // request. We do this by *first* exposing the error, *then* closing the channel used to
- // send more requests (so the client will see the error when the send fails), and *then*
- // sending the error to all outstanding requests.
- let error = ServiceError::new(error);
-
- let mut inner = self.handle.inner.lock().unwrap();
-
- if inner.is_some() {
- // Future::poll was called after we've already errored out!
- return;
- }
-
- *inner = Some(error.clone());
- drop(inner);
-
- self.rx.close();
-
- // By closing the mpsc::Receiver, we know that poll_next_msg will soon return Ready(None),
- // which will trigger the `self.finish == true` phase. We just need to make sure that any
- // requests that we receive before we've exhausted the receiver receive the error:
- self.failed = Some(error);
- }
-}
-
-impl<T, Request> Future for Worker<T, Request>
-where
- T: Service<Request>,
- T::Error: Into<crate::BoxError>,
-{
- type Output = ();
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- if self.finish {
- return Poll::Ready(());
- }
-
- loop {
- match ready!(self.poll_next_msg(cx)) {
- Some((msg, first)) => {
- let _guard = msg.span.enter();
- if let Some(ref failed) = self.failed {
- tracing::trace!("notifying caller about worker failure");
- let _ = msg.tx.send(Err(failed.clone()));
- continue;
- }
-
- // Wait for the service to be ready
- tracing::trace!(
- resumed = !first,
- message = "worker received request; waiting for service readiness"
- );
- match self.service.poll_ready(cx) {
- Poll::Ready(Ok(())) => {
- tracing::debug!(service.ready = true, message = "processing request");
- let response = self.service.call(msg.request);
-
- // Send the response future back to the sender.
- //
- // An error means the request had been canceled in-between
- // our calls, the response future will just be dropped.
- tracing::trace!("returning response future");
- let _ = msg.tx.send(Ok(response));
- }
- Poll::Pending => {
- tracing::trace!(service.ready = false, message = "delay");
- // Put out current message back in its slot.
- drop(_guard);
- self.current_message = Some(msg);
- return Poll::Pending;
- }
- Poll::Ready(Err(e)) => {
- let error = e.into();
- tracing::debug!({ %error }, "service failed");
- drop(_guard);
- self.failed(error);
- let _ = msg.tx.send(Err(self
- .failed
- .as_ref()
- .expect("Worker::failed did not set self.failed?")
- .clone()));
- }
- }
- }
- None => {
- // No more more requests _ever_.
- self.finish = true;
- return Poll::Ready(());
- }
- }
- }
- }
-}
-
-impl Handle {
- pub(crate) fn get_error_on_closed(&self) -> crate::BoxError {
- self.inner
- .lock()
- .unwrap()
- .as_ref()
- .map(|svc_err| svc_err.clone().into())
- .unwrap_or_else(|| Closed::new().into())
- }
-}
-
-impl Clone for Handle {
- fn clone(&self) -> Handle {
- Handle {
- inner: self.inner.clone(),
- }
- }
-}
diff --git a/vendor/tower/src/builder/mod.rs b/vendor/tower/src/builder/mod.rs
deleted file mode 100644
index 8f081d23..00000000
--- a/vendor/tower/src/builder/mod.rs
+++ /dev/null
@@ -1,809 +0,0 @@
-//! Builder types to compose layers and services
-
-use tower_layer::{Identity, Layer, Stack};
-use tower_service::Service;
-
-use std::fmt;
-
-/// Declaratively construct [`Service`] values.
-///
-/// [`ServiceBuilder`] provides a [builder-like interface][builder] for composing
-/// layers to be applied to a [`Service`].
-///
-/// # Service
-///
-/// A [`Service`] is a trait representing an asynchronous function of a request
-/// to a response. It is similar to `async fn(Request) -> Result<Response, Error>`.
-///
-/// A [`Service`] is typically bound to a single transport, such as a TCP
-/// connection. It defines how _all_ inbound or outbound requests are handled
-/// by that connection.
-///
-/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
-///
-/// # Order
-///
-/// The order in which layers are added impacts how requests are handled. Layers
-/// that are added first will be called with the request first. The argument to
-/// `service` will be last to see the request.
-///
-/// ```
-/// # // this (and other) doctest is ignored because we don't have a way
-/// # // to say that it should only be run with cfg(feature = "...")
-/// # use tower::Service;
-/// # use tower::builder::ServiceBuilder;
-/// # #[cfg(all(feature = "buffer", feature = "limit"))]
-/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
-/// ServiceBuilder::new()
-/// .buffer(100)
-/// .concurrency_limit(10)
-/// .service(svc)
-/// # ;
-/// # }
-/// ```
-///
-/// In the above example, the buffer layer receives the request first followed
-/// by `concurrency_limit`. `buffer` enables up to 100 request to be in-flight
-/// **on top of** the requests that have already been forwarded to the next
-/// layer. Combined with `concurrency_limit`, this allows up to 110 requests to be
-/// in-flight.
-///
-/// ```
-/// # use tower::Service;
-/// # use tower::builder::ServiceBuilder;
-/// # #[cfg(all(feature = "buffer", feature = "limit"))]
-/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
-/// ServiceBuilder::new()
-/// .concurrency_limit(10)
-/// .buffer(100)
-/// .service(svc)
-/// # ;
-/// # }
-/// ```
-///
-/// The above example is similar, but the order of layers is reversed. Now,
-/// `concurrency_limit` applies first and only allows 10 requests to be in-flight
-/// total.
-///
-/// # Examples
-///
-/// A [`Service`] stack with a single layer:
-///
-/// ```
-/// # use tower::Service;
-/// # use tower::builder::ServiceBuilder;
-/// # #[cfg(feature = "limit")]
-/// # use tower::limit::concurrency::ConcurrencyLimitLayer;
-/// # #[cfg(feature = "limit")]
-/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
-/// ServiceBuilder::new()
-/// .concurrency_limit(5)
-/// .service(svc);
-/// # ;
-/// # }
-/// ```
-///
-/// A [`Service`] stack with _multiple_ layers that contain rate limiting,
-/// in-flight request limits, and a channel-backed, clonable [`Service`]:
-///
-/// ```
-/// # use tower::Service;
-/// # use tower::builder::ServiceBuilder;
-/// # use std::time::Duration;
-/// # #[cfg(all(feature = "buffer", feature = "limit"))]
-/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
-/// ServiceBuilder::new()
-/// .buffer(5)
-/// .concurrency_limit(5)
-/// .rate_limit(5, Duration::from_secs(1))
-/// .service(svc);
-/// # ;
-/// # }
-/// ```
-///
-/// [`Service`]: crate::Service
-#[derive(Clone)]
-pub struct ServiceBuilder<L> {
- layer: L,
-}
-
-impl Default for ServiceBuilder<Identity> {
- fn default() -> Self {
- Self::new()
- }
-}
-
-impl ServiceBuilder<Identity> {
- /// Create a new [`ServiceBuilder`].
- pub const fn new() -> Self {
- ServiceBuilder {
- layer: Identity::new(),
- }
- }
-}
-
-impl<L> ServiceBuilder<L> {
- /// Add a new layer `T` into the [`ServiceBuilder`].
- ///
- /// This wraps the inner service with the service provided by a user-defined
- /// [`Layer`]. The provided layer must implement the [`Layer`] trait.
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer<T>(self, layer: T) -> ServiceBuilder<Stack<T, L>> {
- ServiceBuilder {
- layer: Stack::new(layer, self.layer),
- }
- }
-
- /// Optionally add a new layer `T` into the [`ServiceBuilder`].
- ///
- /// ```
- /// # use std::time::Duration;
- /// # use tower::Service;
- /// # use tower::builder::ServiceBuilder;
- /// # use tower::timeout::TimeoutLayer;
- /// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
- /// # let timeout = Some(Duration::new(10, 0));
- /// // Apply a timeout if configured
- /// ServiceBuilder::new()
- /// .option_layer(timeout.map(TimeoutLayer::new))
- /// .service(svc)
- /// # ;
- /// # }
- /// ```
- #[cfg(feature = "util")]
- pub fn option_layer<T>(
- self,
- layer: Option<T>,
- ) -> ServiceBuilder<Stack<crate::util::Either<T, Identity>, L>> {
- self.layer(crate::util::option_layer(layer))
- }
-
- /// Add a [`Layer`] built from a function that accepts a service and returns another service.
- ///
- /// See the documentation for [`layer_fn`] for more details.
- ///
- /// [`layer_fn`]: crate::layer::layer_fn
- pub fn layer_fn<F>(self, f: F) -> ServiceBuilder<Stack<crate::layer::LayerFn<F>, L>> {
- self.layer(crate::layer::layer_fn(f))
- }
-
- /// Buffer requests when the next layer is not ready.
- ///
- /// This wraps the inner service with an instance of the [`Buffer`]
- /// middleware.
- ///
- /// [`Buffer`]: crate::buffer
- #[cfg(feature = "buffer")]
- pub fn buffer<Request>(
- self,
- bound: usize,
- ) -> ServiceBuilder<Stack<crate::buffer::BufferLayer<Request>, L>> {
- self.layer(crate::buffer::BufferLayer::new(bound))
- }
-
- /// Limit the max number of in-flight requests.
- ///
- /// A request is in-flight from the time the request is received until the
- /// response future completes. This includes the time spent in the next
- /// layers.
- ///
- /// This wraps the inner service with an instance of the
- /// [`ConcurrencyLimit`] middleware.
- ///
- /// [`ConcurrencyLimit`]: crate::limit::concurrency
- #[cfg(feature = "limit")]
- pub fn concurrency_limit(
- self,
- max: usize,
- ) -> ServiceBuilder<Stack<crate::limit::ConcurrencyLimitLayer, L>> {
- self.layer(crate::limit::ConcurrencyLimitLayer::new(max))
- }
-
- /// Drop requests when the next layer is unable to respond to requests.
- ///
- /// Usually, when a service or middleware does not have capacity to process a
- /// request (i.e., [`poll_ready`] returns [`Pending`]), the caller waits until
- /// capacity becomes available.
- ///
- /// [`LoadShed`] immediately responds with an error when the next layer is
- /// out of capacity.
- ///
- /// This wraps the inner service with an instance of the [`LoadShed`]
- /// middleware.
- ///
- /// [`LoadShed`]: crate::load_shed
- /// [`poll_ready`]: crate::Service::poll_ready
- /// [`Pending`]: std::task::Poll::Pending
- #[cfg(feature = "load-shed")]
- pub fn load_shed(self) -> ServiceBuilder<Stack<crate::load_shed::LoadShedLayer, L>> {
- self.layer(crate::load_shed::LoadShedLayer::new())
- }
-
- /// Limit requests to at most `num` per the given duration.
- ///
- /// This wraps the inner service with an instance of the [`RateLimit`]
- /// middleware.
- ///
- /// [`RateLimit`]: crate::limit::rate
- #[cfg(feature = "limit")]
- pub fn rate_limit(
- self,
- num: u64,
- per: std::time::Duration,
- ) -> ServiceBuilder<Stack<crate::limit::RateLimitLayer, L>> {
- self.layer(crate::limit::RateLimitLayer::new(num, per))
- }
-
- /// Retry failed requests according to the given [retry policy][policy].
- ///
- /// `policy` determines which failed requests will be retried. It must
- /// implement the [`retry::Policy`][policy] trait.
- ///
- /// This wraps the inner service with an instance of the [`Retry`]
- /// middleware.
- ///
- /// [`Retry`]: crate::retry
- /// [policy]: crate::retry::Policy
- #[cfg(feature = "retry")]
- pub fn retry<P>(self, policy: P) -> ServiceBuilder<Stack<crate::retry::RetryLayer<P>, L>> {
- self.layer(crate::retry::RetryLayer::new(policy))
- }
-
- /// Fail requests that take longer than `timeout`.
- ///
- /// If the next layer takes more than `timeout` to respond to a request,
- /// processing is terminated and an error is returned.
- ///
- /// This wraps the inner service with an instance of the [`timeout`]
- /// middleware.
- ///
- /// [`timeout`]: crate::timeout
- #[cfg(feature = "timeout")]
- pub fn timeout(
- self,
- timeout: std::time::Duration,
- ) -> ServiceBuilder<Stack<crate::timeout::TimeoutLayer, L>> {
- self.layer(crate::timeout::TimeoutLayer::new(timeout))
- }
-
- /// Conditionally reject requests based on `predicate`.
- ///
- /// `predicate` must implement the [`Predicate`] trait.
- ///
- /// This wraps the inner service with an instance of the [`Filter`]
- /// middleware.
- ///
- /// [`Filter`]: crate::filter
- /// [`Predicate`]: crate::filter::Predicate
- #[cfg(feature = "filter")]
- pub fn filter<P>(
- self,
- predicate: P,
- ) -> ServiceBuilder<Stack<crate::filter::FilterLayer<P>, L>> {
- self.layer(crate::filter::FilterLayer::new(predicate))
- }
-
- /// Conditionally reject requests based on an asynchronous `predicate`.
- ///
- /// `predicate` must implement the [`AsyncPredicate`] trait.
- ///
- /// This wraps the inner service with an instance of the [`AsyncFilter`]
- /// middleware.
- ///
- /// [`AsyncFilter`]: crate::filter::AsyncFilter
- /// [`AsyncPredicate`]: crate::filter::AsyncPredicate
- #[cfg(feature = "filter")]
- pub fn filter_async<P>(
- self,
- predicate: P,
- ) -> ServiceBuilder<Stack<crate::filter::AsyncFilterLayer<P>, L>> {
- self.layer(crate::filter::AsyncFilterLayer::new(predicate))
- }
-
- /// Map one request type to another.
- ///
- /// This wraps the inner service with an instance of the [`MapRequest`]
- /// middleware.
- ///
- /// # Examples
- ///
- /// Changing the type of a request:
- ///
- /// ```rust
- /// use tower::ServiceBuilder;
- /// use tower::ServiceExt;
- ///
- /// # #[tokio::main]
- /// # async fn main() -> Result<(), ()> {
- /// // Suppose we have some `Service` whose request type is `String`:
- /// let string_svc = tower::service_fn(|request: String| async move {
- /// println!("request: {}", request);
- /// Ok(())
- /// });
- ///
- /// // ...but we want to call that service with a `usize`. What do we do?
- ///
- /// let usize_svc = ServiceBuilder::new()
- /// // Add a middleware that converts the request type to a `String`:
- /// .map_request(|request: usize| format!("{}", request))
- /// // ...and wrap the string service with that middleware:
- /// .service(string_svc);
- ///
- /// // Now, we can call that service with a `usize`:
- /// usize_svc.oneshot(42).await?;
- /// # Ok(())
- /// # }
- /// ```
- ///
- /// Modifying the request value:
- ///
- /// ```rust
- /// use tower::ServiceBuilder;
- /// use tower::ServiceExt;
- ///
- /// # #[tokio::main]
- /// # async fn main() -> Result<(), ()> {
- /// // A service that takes a number and returns it:
- /// let svc = tower::service_fn(|request: usize| async move {
- /// Ok(request)
- /// });
- ///
- /// let svc = ServiceBuilder::new()
- /// // Add a middleware that adds 1 to each request
- /// .map_request(|request: usize| request + 1)
- /// .service(svc);
- ///
- /// let response = svc.oneshot(1).await?;
- /// assert_eq!(response, 2);
- /// # Ok(())
- /// # }
- /// ```
- ///
- /// [`MapRequest`]: crate::util::MapRequest
- #[cfg(feature = "util")]
- pub fn map_request<F, R1, R2>(
- self,
- f: F,
- ) -> ServiceBuilder<Stack<crate::util::MapRequestLayer<F>, L>>
- where
- F: FnMut(R1) -> R2 + Clone,
- {
- self.layer(crate::util::MapRequestLayer::new(f))
- }
-
- /// Map one response type to another.
- ///
- /// This wraps the inner service with an instance of the [`MapResponse`]
- /// middleware.
- ///
- /// See the documentation for the [`map_response` combinator] for details.
- ///
- /// [`MapResponse`]: crate::util::MapResponse
- /// [`map_response` combinator]: crate::util::ServiceExt::map_response
- #[cfg(feature = "util")]
- pub fn map_response<F>(
- self,
- f: F,
- ) -> ServiceBuilder<Stack<crate::util::MapResponseLayer<F>, L>> {
- self.layer(crate::util::MapResponseLayer::new(f))
- }
-
- /// Map one error type to another.
- ///
- /// This wraps the inner service with an instance of the [`MapErr`]
- /// middleware.
- ///
- /// See the documentation for the [`map_err` combinator] for details.
- ///
- /// [`MapErr`]: crate::util::MapErr
- /// [`map_err` combinator]: crate::util::ServiceExt::map_err
- #[cfg(feature = "util")]
- pub fn map_err<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapErrLayer<F>, L>> {
- self.layer(crate::util::MapErrLayer::new(f))
- }
-
- /// Composes a function that transforms futures produced by the service.
- ///
- /// This wraps the inner service with an instance of the [`MapFutureLayer`] middleware.
- ///
- /// See the documentation for the [`map_future`] combinator for details.
- ///
- /// [`MapFutureLayer`]: crate::util::MapFutureLayer
- /// [`map_future`]: crate::util::ServiceExt::map_future
- #[cfg(feature = "util")]
- pub fn map_future<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapFutureLayer<F>, L>> {
- self.layer(crate::util::MapFutureLayer::new(f))
- }
-
- /// Apply an asynchronous function after the service, regardless of whether the future
- /// succeeds or fails.
- ///
- /// This wraps the inner service with an instance of the [`Then`]
- /// middleware.
- ///
- /// This is similar to the [`map_response`] and [`map_err`] functions,
- /// except that the *same* function is invoked when the service's future
- /// completes, whether it completes successfully or fails. This function
- /// takes the [`Result`] returned by the service's future, and returns a
- /// [`Result`].
- ///
- /// See the documentation for the [`then` combinator] for details.
- ///
- /// [`Then`]: crate::util::Then
- /// [`then` combinator]: crate::util::ServiceExt::then
- /// [`map_response`]: ServiceBuilder::map_response
- /// [`map_err`]: ServiceBuilder::map_err
- #[cfg(feature = "util")]
- pub fn then<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::ThenLayer<F>, L>> {
- self.layer(crate::util::ThenLayer::new(f))
- }
-
- /// Executes a new future after this service's future resolves. This does
- /// not alter the behaviour of the [`poll_ready`] method.
- ///
- /// This method can be used to change the [`Response`] type of the service
- /// into a different type. You can use this method to chain along a computation once the
- /// service's response has been resolved.
- ///
- /// This wraps the inner service with an instance of the [`AndThen`]
- /// middleware.
- ///
- /// See the documentation for the [`and_then` combinator] for details.
- ///
- /// [`Response`]: crate::Service::Response
- /// [`poll_ready`]: crate::Service::poll_ready
- /// [`and_then` combinator]: crate::util::ServiceExt::and_then
- /// [`AndThen`]: crate::util::AndThen
- #[cfg(feature = "util")]
- pub fn and_then<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::AndThenLayer<F>, L>> {
- self.layer(crate::util::AndThenLayer::new(f))
- }
-
- /// Maps this service's result type (`Result<Self::Response, Self::Error>`)
- /// to a different value, regardless of whether the future succeeds or
- /// fails.
- ///
- /// This wraps the inner service with an instance of the [`MapResult`]
- /// middleware.
- ///
- /// See the documentation for the [`map_result` combinator] for details.
- ///
- /// [`map_result` combinator]: crate::util::ServiceExt::map_result
- /// [`MapResult`]: crate::util::MapResult
- #[cfg(feature = "util")]
- pub fn map_result<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapResultLayer<F>, L>> {
- self.layer(crate::util::MapResultLayer::new(f))
- }
-
- /// Returns the underlying `Layer` implementation.
- pub fn into_inner(self) -> L {
- self.layer
- }
-
- /// Wrap the service `S` with the middleware provided by this
- /// [`ServiceBuilder`]'s [`Layer`]'s, returning a new [`Service`].
- ///
- /// [`Layer`]: crate::Layer
- /// [`Service`]: crate::Service
- pub fn service<S>(&self, service: S) -> L::Service
- where
- L: Layer<S>,
- {
- self.layer.layer(service)
- }
-
- /// Wrap the async function `F` with the middleware provided by this [`ServiceBuilder`]'s
- /// [`Layer`]s, returning a new [`Service`].
- ///
- /// This is a convenience method which is equivalent to calling
- /// [`ServiceBuilder::service`] with a [`service_fn`], like this:
- ///
- /// ```rust
- /// # use tower::{ServiceBuilder, service_fn};
- /// # async fn handler_fn(_: ()) -> Result<(), ()> { Ok(()) }
- /// # let _ = {
- /// ServiceBuilder::new()
- /// // ...
- /// .service(service_fn(handler_fn))
- /// # };
- /// ```
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::time::Duration;
- /// use tower::{ServiceBuilder, ServiceExt, BoxError, service_fn};
- ///
- /// # #[tokio::main]
- /// # async fn main() -> Result<(), BoxError> {
- /// async fn handle(request: &'static str) -> Result<&'static str, BoxError> {
- /// Ok(request)
- /// }
- ///
- /// let svc = ServiceBuilder::new()
- /// .buffer(1024)
- /// .timeout(Duration::from_secs(10))
- /// .service_fn(handle);
- ///
- /// let response = svc.oneshot("foo").await?;
- ///
- /// assert_eq!(response, "foo");
- /// # Ok(())
- /// # }
- /// ```
- ///
- /// [`Layer`]: crate::Layer
- /// [`Service`]: crate::Service
- /// [`service_fn`]: crate::service_fn
- #[cfg(feature = "util")]
- pub fn service_fn<F>(self, f: F) -> L::Service
- where
- L: Layer<crate::util::ServiceFn<F>>,
- {
- self.service(crate::util::service_fn(f))
- }
-
- /// Check that the builder implements `Clone`.
- ///
- /// This can be useful when debugging type errors in `ServiceBuilder`s with lots of layers.
- ///
- /// Doesn't actually change the builder but serves as a type check.
- ///
- /// # Example
- ///
- /// ```rust
- /// use tower::ServiceBuilder;
- ///
- /// let builder = ServiceBuilder::new()
- /// // Do something before processing the request
- /// .map_request(|request: String| {
- /// println!("got request!");
- /// request
- /// })
- /// // Ensure our `ServiceBuilder` can be cloned
- /// .check_clone()
- /// // Do something after processing the request
- /// .map_response(|response: String| {
- /// println!("got response!");
- /// response
- /// });
- /// ```
- #[inline]
- pub fn check_clone(self) -> Self
- where
- Self: Clone,
- {
- self
- }
-
- /// Check that the builder when given a service of type `S` produces a service that implements
- /// `Clone`.
- ///
- /// This can be useful when debugging type errors in `ServiceBuilder`s with lots of layers.
- ///
- /// Doesn't actually change the builder but serves as a type check.
- ///
- /// # Example
- ///
- /// ```rust
- /// use tower::ServiceBuilder;
- ///
- /// # #[derive(Clone)]
- /// # struct MyService;
- /// #
- /// let builder = ServiceBuilder::new()
- /// // Do something before processing the request
- /// .map_request(|request: String| {
- /// println!("got request!");
- /// request
- /// })
- /// // Ensure that the service produced when given a `MyService` implements
- /// .check_service_clone::<MyService>()
- /// // Do something after processing the request
- /// .map_response(|response: String| {
- /// println!("got response!");
- /// response
- /// });
- /// ```
- #[inline]
- pub fn check_service_clone<S>(self) -> Self
- where
- L: Layer<S>,
- L::Service: Clone,
- {
- self
- }
-
- /// Check that the builder when given a service of type `S` produces a service with the given
- /// request, response, and error types.
- ///
- /// This can be useful when debugging type errors in `ServiceBuilder`s with lots of layers.
- ///
- /// Doesn't actually change the builder but serves as a type check.
- ///
- /// # Example
- ///
- /// ```rust
- /// use tower::ServiceBuilder;
- /// use std::task::{Poll, Context};
- /// use tower::{Service, ServiceExt};
- ///
- /// // An example service
- /// struct MyService;
- ///
- /// impl Service<Request> for MyService {
- /// type Response = Response;
- /// type Error = Error;
- /// type Future = futures_util::future::Ready<Result<Response, Error>>;
- ///
- /// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// // ...
- /// # todo!()
- /// }
- ///
- /// fn call(&mut self, request: Request) -> Self::Future {
- /// // ...
- /// # todo!()
- /// }
- /// }
- ///
- /// struct Request;
- /// struct Response;
- /// struct Error;
- ///
- /// struct WrappedResponse(Response);
- ///
- /// let builder = ServiceBuilder::new()
- /// // At this point in the builder if given a `MyService` it produces a service that
- /// // accepts `Request`s, produces `Response`s, and fails with `Error`s
- /// .check_service::<MyService, Request, Response, Error>()
- /// // Wrap responses in `WrappedResponse`
- /// .map_response(|response: Response| WrappedResponse(response))
- /// // Now the response type will be `WrappedResponse`
- /// .check_service::<MyService, _, WrappedResponse, _>();
- /// ```
- #[inline]
- pub fn check_service<S, T, U, E>(self) -> Self
- where
- L: Layer<S>,
- L::Service: Service<T, Response = U, Error = E>,
- {
- self
- }
-
- /// This wraps the inner service with the [`Layer`] returned by [`BoxService::layer()`].
- ///
- /// See that method for more details.
- ///
- /// # Example
- ///
- /// ```
- /// use tower::{Service, ServiceBuilder, BoxError, util::BoxService};
- /// use std::time::Duration;
- /// #
- /// # struct Request;
- /// # struct Response;
- /// # impl Response {
- /// # fn new() -> Self { Self }
- /// # }
- ///
- /// let service: BoxService<Request, Response, BoxError> = ServiceBuilder::new()
- /// .boxed()
- /// .load_shed()
- /// .concurrency_limit(64)
- /// .timeout(Duration::from_secs(10))
- /// .service_fn(|req: Request| async {
- /// Ok::<_, BoxError>(Response::new())
- /// });
- /// # let service = assert_service(service);
- /// # fn assert_service<S, R>(svc: S) -> S
- /// # where S: Service<R> { svc }
- /// ```
- ///
- /// [`BoxService::layer()`]: crate::util::BoxService::layer()
- #[cfg(feature = "util")]
- pub fn boxed<S, R>(
- self,
- ) -> ServiceBuilder<
- Stack<
- tower_layer::LayerFn<
- fn(
- L::Service,
- ) -> crate::util::BoxService<
- R,
- <L::Service as Service<R>>::Response,
- <L::Service as Service<R>>::Error,
- >,
- >,
- L,
- >,
- >
- where
- L: Layer<S>,
- L::Service: Service<R> + Send + 'static,
- <L::Service as Service<R>>::Future: Send + 'static,
- {
- self.layer(crate::util::BoxService::layer())
- }
-
- /// This wraps the inner service with the [`Layer`] returned by [`BoxCloneService::layer()`].
- ///
- /// This is similar to the [`boxed`] method, but it requires that `Self` implement
- /// [`Clone`], and the returned boxed service implements [`Clone`].
- ///
- /// See [`BoxCloneService`] for more details.
- ///
- /// # Example
- ///
- /// ```
- /// use tower::{Service, ServiceBuilder, BoxError, util::BoxCloneService};
- /// use std::time::Duration;
- /// #
- /// # struct Request;
- /// # struct Response;
- /// # impl Response {
- /// # fn new() -> Self { Self }
- /// # }
- ///
- /// let service: BoxCloneService<Request, Response, BoxError> = ServiceBuilder::new()
- /// .boxed_clone()
- /// .load_shed()
- /// .concurrency_limit(64)
- /// .timeout(Duration::from_secs(10))
- /// .service_fn(|req: Request| async {
- /// Ok::<_, BoxError>(Response::new())
- /// });
- /// # let service = assert_service(service);
- ///
- /// // The boxed service can still be cloned.
- /// service.clone();
- /// # fn assert_service<S, R>(svc: S) -> S
- /// # where S: Service<R> { svc }
- /// ```
- ///
- /// [`BoxCloneService::layer()`]: crate::util::BoxCloneService::layer()
- /// [`BoxCloneService`]: crate::util::BoxCloneService
- /// [`boxed`]: Self::boxed
- #[cfg(feature = "util")]
- pub fn boxed_clone<S, R>(
- self,
- ) -> ServiceBuilder<
- Stack<
- tower_layer::LayerFn<
- fn(
- L::Service,
- ) -> crate::util::BoxCloneService<
- R,
- <L::Service as Service<R>>::Response,
- <L::Service as Service<R>>::Error,
- >,
- >,
- L,
- >,
- >
- where
- L: Layer<S>,
- L::Service: Service<R> + Clone + Send + 'static,
- <L::Service as Service<R>>::Future: Send + 'static,
- {
- self.layer(crate::util::BoxCloneService::layer())
- }
-}
-
-impl<L: fmt::Debug> fmt::Debug for ServiceBuilder<L> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("ServiceBuilder").field(&self.layer).finish()
- }
-}
-
-impl<S, L> Layer<S> for ServiceBuilder<L>
-where
- L: Layer<S>,
-{
- type Service = L::Service;
-
- fn layer(&self, inner: S) -> Self::Service {
- self.layer.layer(inner)
- }
-}
diff --git a/vendor/tower/src/discover/list.rs b/vendor/tower/src/discover/list.rs
deleted file mode 100644
index 363bdd57..00000000
--- a/vendor/tower/src/discover/list.rs
+++ /dev/null
@@ -1,62 +0,0 @@
-use super::Change;
-use futures_core::Stream;
-use pin_project_lite::pin_project;
-use std::convert::Infallible;
-use std::iter::{Enumerate, IntoIterator};
-use std::{
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-pin_project! {
- /// Static service discovery based on a predetermined list of services.
- ///
- /// [`ServiceList`] is created with an initial list of services. The discovery
- /// process will yield this list once and do nothing after.
- #[derive(Debug)]
- pub struct ServiceList<T>
- where
- T: IntoIterator,
- {
- inner: Enumerate<T::IntoIter>,
- }
-}
-
-impl<T, U> ServiceList<T>
-where
- T: IntoIterator<Item = U>,
-{
- #[allow(missing_docs)]
- pub fn new<Request>(services: T) -> ServiceList<T>
- where
- U: Service<Request>,
- {
- ServiceList {
- inner: services.into_iter().enumerate(),
- }
- }
-}
-
-impl<T, U> Stream for ServiceList<T>
-where
- T: IntoIterator<Item = U>,
-{
- type Item = Result<Change<usize, U>, Infallible>;
-
- fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- match self.project().inner.next() {
- Some((i, service)) => Poll::Ready(Some(Ok(Change::Insert(i, service)))),
- None => Poll::Ready(None),
- }
- }
-}
-
-// check that List can be directly over collections
-#[cfg(test)]
-#[allow(dead_code)]
-type ListVecTest<T> = ServiceList<Vec<T>>;
-
-#[cfg(test)]
-#[allow(dead_code)]
-type ListVecIterTest<T> = ServiceList<::std::vec::IntoIter<T>>;
diff --git a/vendor/tower/src/discover/mod.rs b/vendor/tower/src/discover/mod.rs
deleted file mode 100644
index fc137dae..00000000
--- a/vendor/tower/src/discover/mod.rs
+++ /dev/null
@@ -1,106 +0,0 @@
-//! Service discovery
-//!
-//! This module provides the [`Change`] enum, which indicates the arrival or departure of a service
-//! from a collection of similar services. Most implementations should use the [`Discover`] trait
-//! in their bounds to indicate that they can handle services coming and going. [`Discover`] itself
-//! is primarily a convenience wrapper around [`TryStream<Ok = Change>`][`TryStream`].
-//!
-//! Every discovered service is assigned an identifier that is distinct among the currently active
-//! services. If that service later goes away, a [`Change::Remove`] is yielded with that service's
-//! identifier. From that point forward, the identifier may be re-used.
-//!
-//! # Examples
-//!
-//! ```rust
-//! use futures_util::{future::poll_fn, pin_mut};
-//! use tower::discover::{Change, Discover};
-//! async fn services_monitor<D: Discover>(services: D) {
-//! pin_mut!(services);
-//! while let Some(Ok(change)) = poll_fn(|cx| services.as_mut().poll_discover(cx)).await {
-//! match change {
-//! Change::Insert(key, svc) => {
-//! // a new service with identifier `key` was discovered
-//! # let _ = (key, svc);
-//! }
-//! Change::Remove(key) => {
-//! // the service with identifier `key` has gone away
-//! # let _ = (key);
-//! }
-//! }
-//! }
-//! }
-//! ```
-//!
-//! [`TryStream`]: https://docs.rs/futures/latest/futures/stream/trait.TryStream.html
-
-mod list;
-
-pub use self::list::ServiceList;
-
-use crate::sealed::Sealed;
-use futures_core::TryStream;
-use std::{
- pin::Pin,
- task::{Context, Poll},
-};
-
-/// A dynamically changing set of related services.
-///
-/// As new services arrive and old services are retired,
-/// [`Change`]s are returned which provide unique identifiers
-/// for the services.
-///
-/// See the module documentation for more details.
-pub trait Discover: Sealed<Change<(), ()>> {
- /// A unique identifier for each active service.
- ///
- /// An identifier can be re-used once a [`Change::Remove`] has been yielded for its service.
- type Key: Eq;
-
- /// The type of [`Service`] yielded by this [`Discover`].
- ///
- /// [`Service`]: crate::Service
- type Service;
-
- /// Error produced during discovery
- type Error;
-
- /// Yields the next discovery change set.
- fn poll_discover(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Option<Result<Change<Self::Key, Self::Service>, Self::Error>>>;
-}
-
-impl<K, S, E, D: ?Sized> Sealed<Change<(), ()>> for D
-where
- D: TryStream<Ok = Change<K, S>, Error = E>,
- K: Eq,
-{
-}
-
-impl<K, S, E, D: ?Sized> Discover for D
-where
- D: TryStream<Ok = Change<K, S>, Error = E>,
- K: Eq,
-{
- type Key = K;
- type Service = S;
- type Error = E;
-
- fn poll_discover(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Option<Result<D::Ok, D::Error>>> {
- TryStream::try_poll_next(self, cx)
- }
-}
-
-/// A change in the service set.
-#[derive(Debug, Clone)]
-pub enum Change<K, V> {
- /// A new service identified by key `K` was identified.
- Insert(K, V),
- /// The service identified by key `K` disappeared.
- Remove(K),
-}
diff --git a/vendor/tower/src/filter/future.rs b/vendor/tower/src/filter/future.rs
deleted file mode 100644
index 67772bbe..00000000
--- a/vendor/tower/src/filter/future.rs
+++ /dev/null
@@ -1,98 +0,0 @@
-//! Future types
-
-use super::AsyncPredicate;
-use crate::BoxError;
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-pin_project! {
- /// Filtered response future from [`AsyncFilter`] services.
- ///
- /// [`AsyncFilter`]: crate::filter::AsyncFilter
- #[derive(Debug)]
- pub struct AsyncResponseFuture<P, S, Request>
- where
- P: AsyncPredicate<Request>,
- S: Service<P::Request>,
- {
- #[pin]
- state: State<P::Future, S::Future>,
-
- // Inner service
- service: S,
- }
-}
-
-opaque_future! {
- /// Filtered response future from [`Filter`] services.
- ///
- /// [`Filter`]: crate::filter::Filter
- pub type ResponseFuture<R, F> =
- futures_util::future::Either<
- futures_util::future::Ready<Result<R, crate::BoxError>>,
- futures_util::future::ErrInto<F, crate::BoxError>
- >;
-}
-
-pin_project! {
- #[project = StateProj]
- #[derive(Debug)]
- enum State<F, G> {
- /// Waiting for the predicate future
- Check {
- #[pin]
- check: F
- },
- /// Waiting for the response future
- WaitResponse {
- #[pin]
- response: G
- },
- }
-}
-
-impl<P, S, Request> AsyncResponseFuture<P, S, Request>
-where
- P: AsyncPredicate<Request>,
- S: Service<P::Request>,
- S::Error: Into<BoxError>,
-{
- pub(crate) fn new(check: P::Future, service: S) -> Self {
- Self {
- state: State::Check { check },
- service,
- }
- }
-}
-
-impl<P, S, Request> Future for AsyncResponseFuture<P, S, Request>
-where
- P: AsyncPredicate<Request>,
- S: Service<P::Request>,
- S::Error: Into<crate::BoxError>,
-{
- type Output = Result<S::Response, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
-
- loop {
- match this.state.as_mut().project() {
- StateProj::Check { mut check } => {
- let request = ready!(check.as_mut().poll(cx))?;
- let response = this.service.call(request);
- this.state.set(State::WaitResponse { response });
- }
- StateProj::WaitResponse { response } => {
- return response.poll(cx).map_err(Into::into);
- }
- }
- }
- }
-}
diff --git a/vendor/tower/src/filter/layer.rs b/vendor/tower/src/filter/layer.rs
deleted file mode 100644
index 30cbcdec..00000000
--- a/vendor/tower/src/filter/layer.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-use super::{AsyncFilter, Filter};
-use tower_layer::Layer;
-
-/// Conditionally dispatch requests to the inner service based on a synchronous
-/// [predicate].
-///
-/// This [`Layer`] produces instances of the [`Filter`] service.
-///
-/// [predicate]: crate::filter::Predicate
-/// [`Layer`]: crate::Layer
-/// [`Filter`]: crate::filter::Filter
-#[derive(Debug, Clone)]
-pub struct FilterLayer<U> {
- predicate: U,
-}
-
-/// Conditionally dispatch requests to the inner service based on an asynchronous
-/// [predicate].
-///
-/// This [`Layer`] produces instances of the [`AsyncFilter`] service.
-///
-/// [predicate]: crate::filter::AsyncPredicate
-/// [`Layer`]: crate::Layer
-/// [`Filter`]: crate::filter::AsyncFilter
-#[derive(Debug, Clone)]
-pub struct AsyncFilterLayer<U> {
- predicate: U,
-}
-
-// === impl FilterLayer ===
-
-impl<U> FilterLayer<U> {
- /// Returns a new layer that produces [`Filter`] services with the given
- /// [`Predicate`].
- ///
- /// [`Predicate`]: crate::filter::Predicate
- /// [`Filter`]: crate::filter::Filter
- pub const fn new(predicate: U) -> Self {
- Self { predicate }
- }
-}
-
-impl<U: Clone, S> Layer<S> for FilterLayer<U> {
- type Service = Filter<S, U>;
-
- fn layer(&self, service: S) -> Self::Service {
- let predicate = self.predicate.clone();
- Filter::new(service, predicate)
- }
-}
-
-// === impl AsyncFilterLayer ===
-
-impl<U> AsyncFilterLayer<U> {
- /// Returns a new layer that produces [`AsyncFilter`] services with the given
- /// [`AsyncPredicate`].
- ///
- /// [`AsyncPredicate`]: crate::filter::AsyncPredicate
- /// [`Filter`]: crate::filter::Filter
- pub const fn new(predicate: U) -> Self {
- Self { predicate }
- }
-}
-
-impl<U: Clone, S> Layer<S> for AsyncFilterLayer<U> {
- type Service = AsyncFilter<S, U>;
-
- fn layer(&self, service: S) -> Self::Service {
- let predicate = self.predicate.clone();
- AsyncFilter::new(service, predicate)
- }
-}
diff --git a/vendor/tower/src/filter/mod.rs b/vendor/tower/src/filter/mod.rs
deleted file mode 100644
index c53ba404..00000000
--- a/vendor/tower/src/filter/mod.rs
+++ /dev/null
@@ -1,191 +0,0 @@
-//! Conditionally dispatch requests to the inner service based on the result of
-//! a predicate.
-//!
-//! A predicate takes some request type and returns a `Result<Request, Error>`.
-//! If the predicate returns [`Ok`], the inner service is called with the request
-//! returned by the predicate &mdash; which may be the original request or a
-//! modified one. If the predicate returns [`Err`], the request is rejected and
-//! the inner service is not called.
-//!
-//! Predicates may either be synchronous (simple functions from a `Request` to
-//! a [`Result`]) or asynchronous (functions returning [`Future`]s). Separate
-//! traits, [`Predicate`] and [`AsyncPredicate`], represent these two types of
-//! predicate. Note that when it is not necessary to await some other
-//! asynchronous operation in the predicate, the synchronous predicate should be
-//! preferred, as it introduces less overhead.
-//!
-//! The predicate traits are implemented for closures and function pointers.
-//! However, users may also implement them for other types, such as when the
-//! predicate requires some state carried between requests. For example,
-//! [`Predicate`] could be implemented for a type that rejects a fixed set of
-//! requests by checking if they are contained by a a [`HashSet`] or other
-//! collection.
-//!
-//! [`Future`]: std::future::Future
-//! [`HashSet`]: std::collections::HashSet
-pub mod future;
-mod layer;
-mod predicate;
-
-pub use self::{
- layer::{AsyncFilterLayer, FilterLayer},
- predicate::{AsyncPredicate, Predicate},
-};
-
-use self::future::{AsyncResponseFuture, ResponseFuture};
-use crate::BoxError;
-use futures_util::{future::Either, TryFutureExt};
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-/// Conditionally dispatch requests to the inner service based on a [predicate].
-///
-/// [predicate]: Predicate
-#[derive(Clone, Debug)]
-pub struct Filter<T, U> {
- inner: T,
- predicate: U,
-}
-
-/// Conditionally dispatch requests to the inner service based on an
-/// [asynchronous predicate].
-///
-/// [asynchronous predicate]: AsyncPredicate
-#[derive(Clone, Debug)]
-pub struct AsyncFilter<T, U> {
- inner: T,
- predicate: U,
-}
-
-// ==== impl Filter ====
-
-impl<T, U> Filter<T, U> {
- /// Returns a new [`Filter`] service wrapping `inner`.
- pub const fn new(inner: T, predicate: U) -> Self {
- Self { inner, predicate }
- }
-
- /// Returns a new [`Layer`] that wraps services with a [`Filter`] service
- /// with the given [`Predicate`].
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer(predicate: U) -> FilterLayer<U> {
- FilterLayer::new(predicate)
- }
-
- /// Check a `Request` value against this filter's predicate.
- pub fn check<R>(&mut self, request: R) -> Result<U::Request, BoxError>
- where
- U: Predicate<R>,
- {
- self.predicate.check(request)
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &T {
- &self.inner
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut T {
- &mut self.inner
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> T {
- self.inner
- }
-}
-
-impl<T, U, Request> Service<Request> for Filter<T, U>
-where
- U: Predicate<Request>,
- T: Service<U::Request>,
- T::Error: Into<BoxError>,
-{
- type Response = T::Response;
- type Error = BoxError;
- type Future = ResponseFuture<T::Response, T::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(Into::into)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- ResponseFuture::new(match self.predicate.check(request) {
- Ok(request) => Either::Right(self.inner.call(request).err_into()),
- Err(e) => Either::Left(futures_util::future::ready(Err(e))),
- })
- }
-}
-
-// ==== impl AsyncFilter ====
-
-impl<T, U> AsyncFilter<T, U> {
- /// Returns a new [`AsyncFilter`] service wrapping `inner`.
- pub const fn new(inner: T, predicate: U) -> Self {
- Self { inner, predicate }
- }
-
- /// Returns a new [`Layer`] that wraps services with an [`AsyncFilter`]
- /// service with the given [`AsyncPredicate`].
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer(predicate: U) -> FilterLayer<U> {
- FilterLayer::new(predicate)
- }
-
- /// Check a `Request` value against this filter's predicate.
- pub async fn check<R>(&mut self, request: R) -> Result<U::Request, BoxError>
- where
- U: AsyncPredicate<R>,
- {
- self.predicate.check(request).await
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &T {
- &self.inner
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut T {
- &mut self.inner
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> T {
- self.inner
- }
-}
-
-impl<T, U, Request> Service<Request> for AsyncFilter<T, U>
-where
- U: AsyncPredicate<Request>,
- T: Service<U::Request> + Clone,
- T::Error: Into<BoxError>,
-{
- type Response = T::Response;
- type Error = BoxError;
- type Future = AsyncResponseFuture<U, T, Request>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(Into::into)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- use std::mem;
-
- let inner = self.inner.clone();
- // In case the inner service has state that's driven to readiness and
- // not tracked by clones (such as `Buffer`), pass the version we have
- // already called `poll_ready` on into the future, and leave its clone
- // behind.
- let inner = mem::replace(&mut self.inner, inner);
-
- // Check the request
- let check = self.predicate.check(request);
-
- AsyncResponseFuture::new(check, inner)
- }
-}
diff --git a/vendor/tower/src/filter/predicate.rs b/vendor/tower/src/filter/predicate.rs
deleted file mode 100644
index 181bc965..00000000
--- a/vendor/tower/src/filter/predicate.rs
+++ /dev/null
@@ -1,65 +0,0 @@
-use crate::BoxError;
-use std::future::Future;
-
-/// Checks a request asynchronously.
-pub trait AsyncPredicate<Request> {
- /// The future returned by [`check`].
- ///
- /// [`check`]: crate::filter::AsyncPredicate::check
- type Future: Future<Output = Result<Self::Request, BoxError>>;
-
- /// The type of requests returned by [`check`].
- ///
- /// This request is forwarded to the inner service if the predicate
- /// succeeds.
- ///
- /// [`check`]: crate::filter::AsyncPredicate::check
- type Request;
-
- /// Check whether the given request should be forwarded.
- ///
- /// If the future resolves with [`Ok`], the request is forwarded to the inner service.
- fn check(&mut self, request: Request) -> Self::Future;
-}
-/// Checks a request synchronously.
-pub trait Predicate<Request> {
- /// The type of requests returned by [`check`].
- ///
- /// This request is forwarded to the inner service if the predicate
- /// succeeds.
- ///
- /// [`check`]: crate::filter::Predicate::check
- type Request;
-
- /// Check whether the given request should be forwarded.
- ///
- /// If the future resolves with [`Ok`], the request is forwarded to the inner service.
- fn check(&mut self, request: Request) -> Result<Self::Request, BoxError>;
-}
-
-impl<F, T, U, R, E> AsyncPredicate<T> for F
-where
- F: FnMut(T) -> U,
- U: Future<Output = Result<R, E>>,
- E: Into<BoxError>,
-{
- type Future = futures_util::future::ErrInto<U, BoxError>;
- type Request = R;
-
- fn check(&mut self, request: T) -> Self::Future {
- use futures_util::TryFutureExt;
- self(request).err_into()
- }
-}
-
-impl<F, T, R, E> Predicate<T> for F
-where
- F: FnMut(T) -> Result<R, E>,
- E: Into<BoxError>,
-{
- type Request = R;
-
- fn check(&mut self, request: T) -> Result<Self::Request, BoxError> {
- self(request).map_err(Into::into)
- }
-}
diff --git a/vendor/tower/src/hedge/delay.rs b/vendor/tower/src/hedge/delay.rs
deleted file mode 100644
index 7b45c0ef..00000000
--- a/vendor/tower/src/hedge/delay.rs
+++ /dev/null
@@ -1,126 +0,0 @@
-use futures_util::ready;
-use pin_project_lite::pin_project;
-use std::time::Duration;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-use crate::util::Oneshot;
-
-/// A policy which specifies how long each request should be delayed for.
-pub trait Policy<Request> {
- fn delay(&self, req: &Request) -> Duration;
-}
-
-/// A middleware which delays sending the request to the underlying service
-/// for an amount of time specified by the policy.
-#[derive(Debug)]
-pub struct Delay<P, S> {
- policy: P,
- service: S,
-}
-
-pin_project! {
- #[derive(Debug)]
- pub struct ResponseFuture<Request, S>
- where
- S: Service<Request>,
- {
- service: Option<S>,
- #[pin]
- state: State<Request, Oneshot<S, Request>>,
- }
-}
-
-pin_project! {
- #[project = StateProj]
- #[derive(Debug)]
- enum State<Request, F> {
- Delaying {
- #[pin]
- delay: tokio::time::Sleep,
- req: Option<Request>,
- },
- Called {
- #[pin]
- fut: F,
- },
- }
-}
-
-impl<Request, F> State<Request, F> {
- fn delaying(delay: tokio::time::Sleep, req: Option<Request>) -> Self {
- Self::Delaying { delay, req }
- }
-
- fn called(fut: F) -> Self {
- Self::Called { fut }
- }
-}
-
-impl<P, S> Delay<P, S> {
- pub const fn new<Request>(policy: P, service: S) -> Self
- where
- P: Policy<Request>,
- S: Service<Request> + Clone,
- S::Error: Into<crate::BoxError>,
- {
- Delay { policy, service }
- }
-}
-
-impl<Request, P, S> Service<Request> for Delay<P, S>
-where
- P: Policy<Request>,
- S: Service<Request> + Clone,
- S::Error: Into<crate::BoxError>,
-{
- type Response = S::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<Request, S>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // Calling self.service.poll_ready would reserve a slot for the delayed request,
- // potentially well in advance of actually making it. Instead, signal readiness here and
- // treat the service as a Oneshot in the future.
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- let delay = self.policy.delay(&request);
- ResponseFuture {
- service: Some(self.service.clone()),
- state: State::delaying(tokio::time::sleep(delay), Some(request)),
- }
- }
-}
-
-impl<Request, S, T, E> Future for ResponseFuture<Request, S>
-where
- E: Into<crate::BoxError>,
- S: Service<Request, Response = T, Error = E>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
-
- loop {
- match this.state.as_mut().project() {
- StateProj::Delaying { delay, req } => {
- ready!(delay.poll(cx));
- let req = req.take().expect("Missing request in delay");
- let svc = this.service.take().expect("Missing service in delay");
- let fut = Oneshot::new(svc, req);
- this.state.set(State::called(fut));
- }
- StateProj::Called { fut } => {
- return fut.poll(cx).map_err(Into::into);
- }
- };
- }
- }
-}
diff --git a/vendor/tower/src/hedge/latency.rs b/vendor/tower/src/hedge/latency.rs
deleted file mode 100644
index 723652aa..00000000
--- a/vendor/tower/src/hedge/latency.rs
+++ /dev/null
@@ -1,89 +0,0 @@
-use futures_util::ready;
-use pin_project_lite::pin_project;
-use std::time::Duration;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tokio::time::Instant;
-use tower_service::Service;
-
-/// Record is the interface for accepting request latency measurements. When
-/// a request completes, record is called with the elapsed duration between
-/// when the service was called and when the future completed.
-pub trait Record {
- fn record(&mut self, latency: Duration);
-}
-
-/// Latency is a middleware that measures request latency and records it to the
-/// provided Record instance.
-#[derive(Clone, Debug)]
-pub struct Latency<R, S> {
- rec: R,
- service: S,
-}
-
-pin_project! {
- #[derive(Debug)]
- pub struct ResponseFuture<R, F> {
- start: Instant,
- rec: R,
- #[pin]
- inner: F,
- }
-}
-
-impl<S, R> Latency<R, S>
-where
- R: Record + Clone,
-{
- pub const fn new<Request>(rec: R, service: S) -> Self
- where
- S: Service<Request>,
- S::Error: Into<crate::BoxError>,
- {
- Latency { rec, service }
- }
-}
-
-impl<S, R, Request> Service<Request> for Latency<R, S>
-where
- S: Service<Request>,
- S::Error: Into<crate::BoxError>,
- R: Record + Clone,
-{
- type Response = S::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<R, S::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.service.poll_ready(cx).map_err(Into::into)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- ResponseFuture {
- start: Instant::now(),
- rec: self.rec.clone(),
- inner: self.service.call(request),
- }
- }
-}
-
-impl<R, F, T, E> Future for ResponseFuture<R, F>
-where
- R: Record,
- F: Future<Output = Result<T, E>>,
- E: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
-
- let rsp = ready!(this.inner.poll(cx)).map_err(Into::into)?;
- let duration = Instant::now().saturating_duration_since(*this.start);
- this.rec.record(duration);
- Poll::Ready(Ok(rsp))
- }
-}
diff --git a/vendor/tower/src/hedge/mod.rs b/vendor/tower/src/hedge/mod.rs
deleted file mode 100644
index 3cd152e7..00000000
--- a/vendor/tower/src/hedge/mod.rs
+++ /dev/null
@@ -1,266 +0,0 @@
-//! Pre-emptively retry requests which have been outstanding for longer
-//! than a given latency percentile.
-
-#![warn(missing_debug_implementations, missing_docs, unreachable_pub)]
-
-use crate::filter::AsyncFilter;
-use futures_util::future;
-use pin_project_lite::pin_project;
-use std::sync::{Arc, Mutex};
-use std::time::Duration;
-use std::{
- pin::Pin,
- task::{Context, Poll},
-};
-use tracing::error;
-
-mod delay;
-mod latency;
-mod rotating_histogram;
-mod select;
-
-use delay::Delay;
-use latency::Latency;
-use rotating_histogram::RotatingHistogram;
-use select::Select;
-
-type Histo = Arc<Mutex<RotatingHistogram>>;
-type Service<S, P> = select::Select<
- SelectPolicy<P>,
- Latency<Histo, S>,
- Delay<DelayPolicy, AsyncFilter<Latency<Histo, S>, PolicyPredicate<P>>>,
->;
-
-/// A middleware that pre-emptively retries requests which have been outstanding
-/// for longer than a given latency percentile. If either of the original
-/// future or the retry future completes, that value is used.
-#[derive(Debug)]
-pub struct Hedge<S, P>(Service<S, P>);
-
-pin_project! {
- /// The [`Future`] returned by the [`Hedge`] service.
- ///
- /// [`Future`]: std::future::Future
- #[derive(Debug)]
- pub struct Future<S, Request>
- where
- S: tower_service::Service<Request>,
- {
- #[pin]
- inner: S::Future,
- }
-}
-
-/// A policy which describes which requests can be cloned and then whether those
-/// requests should be retried.
-pub trait Policy<Request> {
- /// Called when the request is first received to determine if the request is retryable.
- fn clone_request(&self, req: &Request) -> Option<Request>;
-
- /// Called after the hedge timeout to determine if the hedge retry should be issued.
- fn can_retry(&self, req: &Request) -> bool;
-}
-
-// NOTE: these are pub only because they appear inside a Future<F>
-
-#[doc(hidden)]
-#[derive(Clone, Debug)]
-pub struct PolicyPredicate<P>(P);
-
-#[doc(hidden)]
-#[derive(Debug)]
-pub struct DelayPolicy {
- histo: Histo,
- latency_percentile: f32,
-}
-
-#[doc(hidden)]
-#[derive(Debug)]
-pub struct SelectPolicy<P> {
- policy: P,
- histo: Histo,
- min_data_points: u64,
-}
-
-impl<S, P> Hedge<S, P> {
- /// Create a new hedge middleware.
- pub fn new<Request>(
- service: S,
- policy: P,
- min_data_points: u64,
- latency_percentile: f32,
- period: Duration,
- ) -> Hedge<S, P>
- where
- S: tower_service::Service<Request> + Clone,
- S::Error: Into<crate::BoxError>,
- P: Policy<Request> + Clone,
- {
- let histo = Arc::new(Mutex::new(RotatingHistogram::new(period)));
- Self::new_with_histo(service, policy, min_data_points, latency_percentile, histo)
- }
-
- /// A hedge middleware with a prepopulated latency histogram. This is usedful
- /// for integration tests.
- pub fn new_with_mock_latencies<Request>(
- service: S,
- policy: P,
- min_data_points: u64,
- latency_percentile: f32,
- period: Duration,
- latencies_ms: &[u64],
- ) -> Hedge<S, P>
- where
- S: tower_service::Service<Request> + Clone,
- S::Error: Into<crate::BoxError>,
- P: Policy<Request> + Clone,
- {
- let histo = Arc::new(Mutex::new(RotatingHistogram::new(period)));
- {
- let mut locked = histo.lock().unwrap();
- for latency in latencies_ms.iter() {
- locked.read().record(*latency).unwrap();
- }
- }
- Self::new_with_histo(service, policy, min_data_points, latency_percentile, histo)
- }
-
- fn new_with_histo<Request>(
- service: S,
- policy: P,
- min_data_points: u64,
- latency_percentile: f32,
- histo: Histo,
- ) -> Hedge<S, P>
- where
- S: tower_service::Service<Request> + Clone,
- S::Error: Into<crate::BoxError>,
- P: Policy<Request> + Clone,
- {
- // Clone the underlying service and wrap both copies in a middleware that
- // records the latencies in a rotating histogram.
- let recorded_a = Latency::new(histo.clone(), service.clone());
- let recorded_b = Latency::new(histo.clone(), service);
-
- // Check policy to see if the hedge request should be issued.
- let filtered = AsyncFilter::new(recorded_b, PolicyPredicate(policy.clone()));
-
- // Delay the second request by a percentile of the recorded request latency
- // histogram.
- let delay_policy = DelayPolicy {
- histo: histo.clone(),
- latency_percentile,
- };
- let delayed = Delay::new(delay_policy, filtered);
-
- // If the request is retryable, issue two requests -- the second one delayed
- // by a latency percentile. Use the first result to complete.
- let select_policy = SelectPolicy {
- policy,
- histo,
- min_data_points,
- };
- Hedge(Select::new(select_policy, recorded_a, delayed))
- }
-}
-
-impl<S, P, Request> tower_service::Service<Request> for Hedge<S, P>
-where
- S: tower_service::Service<Request> + Clone,
- S::Error: Into<crate::BoxError>,
- P: Policy<Request> + Clone,
-{
- type Response = S::Response;
- type Error = crate::BoxError;
- type Future = Future<Service<S, P>, Request>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.0.poll_ready(cx)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- Future {
- inner: self.0.call(request),
- }
- }
-}
-
-impl<S, Request> std::future::Future for Future<S, Request>
-where
- S: tower_service::Service<Request>,
- S::Error: Into<crate::BoxError>,
-{
- type Output = Result<S::Response, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.project().inner.poll(cx).map_err(Into::into)
- }
-}
-
-// TODO: Remove when Duration::as_millis() becomes stable.
-const NANOS_PER_MILLI: u32 = 1_000_000;
-const MILLIS_PER_SEC: u64 = 1_000;
-fn millis(duration: Duration) -> u64 {
- // Round up.
- let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
- duration
- .as_secs()
- .saturating_mul(MILLIS_PER_SEC)
- .saturating_add(u64::from(millis))
-}
-
-impl latency::Record for Histo {
- fn record(&mut self, latency: Duration) {
- let mut locked = self.lock().unwrap();
- locked.write().record(millis(latency)).unwrap_or_else(|e| {
- error!("Failed to write to hedge histogram: {:?}", e);
- })
- }
-}
-
-impl<P, Request> crate::filter::AsyncPredicate<Request> for PolicyPredicate<P>
-where
- P: Policy<Request>,
-{
- type Future = future::Either<
- future::Ready<Result<Request, crate::BoxError>>,
- future::Pending<Result<Request, crate::BoxError>>,
- >;
- type Request = Request;
-
- fn check(&mut self, request: Request) -> Self::Future {
- if self.0.can_retry(&request) {
- future::Either::Left(future::ready(Ok(request)))
- } else {
- // If the hedge retry should not be issued, we simply want to wait
- // for the result of the original request. Therefore we don't want
- // to return an error here. Instead, we use future::pending to ensure
- // that the original request wins the select.
- future::Either::Right(future::pending())
- }
- }
-}
-
-impl<Request> delay::Policy<Request> for DelayPolicy {
- fn delay(&self, _req: &Request) -> Duration {
- let mut locked = self.histo.lock().unwrap();
- let millis = locked
- .read()
- .value_at_quantile(self.latency_percentile.into());
- Duration::from_millis(millis)
- }
-}
-
-impl<P, Request> select::Policy<Request> for SelectPolicy<P>
-where
- P: Policy<Request>,
-{
- fn clone_request(&self, req: &Request) -> Option<Request> {
- self.policy.clone_request(req).filter(|_| {
- let mut locked = self.histo.lock().unwrap();
- // Do not attempt a retry if there are insufficiently many data
- // points in the histogram.
- locked.read().len() >= self.min_data_points
- })
- }
-}
diff --git a/vendor/tower/src/hedge/rotating_histogram.rs b/vendor/tower/src/hedge/rotating_histogram.rs
deleted file mode 100644
index 4b07a159..00000000
--- a/vendor/tower/src/hedge/rotating_histogram.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-use hdrhistogram::Histogram;
-use std::time::Duration;
-use tokio::time::Instant;
-use tracing::trace;
-
-/// This represents a "rotating" histogram which stores two histogram, one which
-/// should be read and one which should be written to. Every period, the read
-/// histogram is discarded and replaced by the write histogram. The idea here
-/// is that the read histogram should always contain a full period (the previous
-/// period) of write operations.
-#[derive(Debug)]
-pub struct RotatingHistogram {
- read: Histogram<u64>,
- write: Histogram<u64>,
- last_rotation: Instant,
- period: Duration,
-}
-
-impl RotatingHistogram {
- pub fn new(period: Duration) -> RotatingHistogram {
- RotatingHistogram {
- // Use an auto-resizing histogram to avoid choosing
- // a maximum latency bound for all users.
- read: Histogram::<u64>::new(3).expect("Invalid histogram params"),
- write: Histogram::<u64>::new(3).expect("Invalid histogram params"),
- last_rotation: Instant::now(),
- period,
- }
- }
-
- pub fn read(&mut self) -> &mut Histogram<u64> {
- self.maybe_rotate();
- &mut self.read
- }
-
- pub fn write(&mut self) -> &mut Histogram<u64> {
- self.maybe_rotate();
- &mut self.write
- }
-
- fn maybe_rotate(&mut self) {
- let delta = Instant::now().saturating_duration_since(self.last_rotation);
- // TODO: replace with delta.duration_div when it becomes stable.
- let rotations = (nanos(delta) / nanos(self.period)) as u32;
- if rotations >= 2 {
- trace!("Time since last rotation is {:?}. clearing!", delta);
- self.clear();
- } else if rotations == 1 {
- trace!("Time since last rotation is {:?}. rotating!", delta);
- self.rotate();
- }
- self.last_rotation += self.period * rotations;
- }
-
- fn rotate(&mut self) {
- std::mem::swap(&mut self.read, &mut self.write);
- trace!("Rotated {:?} points into read", self.read.len());
- self.write.clear();
- }
-
- fn clear(&mut self) {
- self.read.clear();
- self.write.clear();
- }
-}
-
-const NANOS_PER_SEC: u64 = 1_000_000_000;
-fn nanos(duration: Duration) -> u64 {
- duration
- .as_secs()
- .saturating_mul(NANOS_PER_SEC)
- .saturating_add(u64::from(duration.subsec_nanos()))
-}
diff --git a/vendor/tower/src/hedge/select.rs b/vendor/tower/src/hedge/select.rs
deleted file mode 100644
index e9f2660d..00000000
--- a/vendor/tower/src/hedge/select.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-/// A policy which decides which requests can be cloned and sent to the B
-/// service.
-pub trait Policy<Request> {
- fn clone_request(&self, req: &Request) -> Option<Request>;
-}
-
-/// Select is a middleware which attempts to clone the request and sends the
-/// original request to the A service and, if the request was able to be cloned,
-/// the cloned request to the B service. Both resulting futures will be polled
-/// and whichever future completes first will be used as the result.
-#[derive(Debug)]
-pub struct Select<P, A, B> {
- policy: P,
- a: A,
- b: B,
-}
-
-pin_project! {
- #[derive(Debug)]
- pub struct ResponseFuture<AF, BF> {
- #[pin]
- a_fut: AF,
- #[pin]
- b_fut: Option<BF>,
- }
-}
-
-impl<P, A, B> Select<P, A, B> {
- pub const fn new<Request>(policy: P, a: A, b: B) -> Self
- where
- P: Policy<Request>,
- A: Service<Request>,
- A::Error: Into<crate::BoxError>,
- B: Service<Request, Response = A::Response>,
- B::Error: Into<crate::BoxError>,
- {
- Select { policy, a, b }
- }
-}
-
-impl<P, A, B, Request> Service<Request> for Select<P, A, B>
-where
- P: Policy<Request>,
- A: Service<Request>,
- A::Error: Into<crate::BoxError>,
- B: Service<Request, Response = A::Response>,
- B::Error: Into<crate::BoxError>,
-{
- type Response = A::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<A::Future, B::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- match (self.a.poll_ready(cx), self.b.poll_ready(cx)) {
- (Poll::Ready(Ok(())), Poll::Ready(Ok(()))) => Poll::Ready(Ok(())),
- (Poll::Ready(Err(e)), _) => Poll::Ready(Err(e.into())),
- (_, Poll::Ready(Err(e))) => Poll::Ready(Err(e.into())),
- _ => Poll::Pending,
- }
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- let b_fut = if let Some(cloned_req) = self.policy.clone_request(&request) {
- Some(self.b.call(cloned_req))
- } else {
- None
- };
- ResponseFuture {
- a_fut: self.a.call(request),
- b_fut,
- }
- }
-}
-
-impl<AF, BF, T, AE, BE> Future for ResponseFuture<AF, BF>
-where
- AF: Future<Output = Result<T, AE>>,
- AE: Into<crate::BoxError>,
- BF: Future<Output = Result<T, BE>>,
- BE: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
-
- if let Poll::Ready(r) = this.a_fut.poll(cx) {
- return Poll::Ready(Ok(r.map_err(Into::into)?));
- }
- if let Some(b_fut) = this.b_fut.as_pin_mut() {
- if let Poll::Ready(r) = b_fut.poll(cx) {
- return Poll::Ready(Ok(r.map_err(Into::into)?));
- }
- }
- Poll::Pending
- }
-}
diff --git a/vendor/tower/src/layer.rs b/vendor/tower/src/layer.rs
deleted file mode 100644
index 3c40c6e8..00000000
--- a/vendor/tower/src/layer.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-//! A collection of [`Layer`] based tower services
-//!
-//! [`Layer`]: crate::Layer
-
-pub use tower_layer::{layer_fn, Layer, LayerFn};
-
-/// Utilities for combining layers
-///
-/// [`Identity`]: crate::layer::util::Identity
-/// [`Layer`]: crate::Layer
-/// [`Stack`]: crate::layer::util::Stack
-pub mod util {
- pub use tower_layer::{Identity, Stack};
-}
diff --git a/vendor/tower/src/lib.rs b/vendor/tower/src/lib.rs
deleted file mode 100644
index ce911e9d..00000000
--- a/vendor/tower/src/lib.rs
+++ /dev/null
@@ -1,228 +0,0 @@
-#![warn(
- missing_debug_implementations,
- missing_docs,
- rust_2018_idioms,
- unreachable_pub
-)]
-#![forbid(unsafe_code)]
-#![allow(elided_lifetimes_in_paths, clippy::type_complexity)]
-#![cfg_attr(test, allow(clippy::float_cmp))]
-#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))]
-// `rustdoc::broken_intra_doc_links` is checked on CI
-
-//! `async fn(Request) -> Result<Response, Error>`
-//!
-//! # Overview
-//!
-//! Tower is a library of modular and reusable components for building
-//! robust networking clients and servers.
-//!
-//! Tower provides a simple core abstraction, the [`Service`] trait, which
-//! represents an asynchronous function taking a request and returning either a
-//! response or an error. This abstraction can be used to model both clients and
-//! servers.
-//!
-//! Generic components, like [`timeout`], [rate limiting], and [load balancing],
-//! can be modeled as [`Service`]s that wrap some inner service and apply
-//! additional behavior before or after the inner service is called. This allows
-//! implementing these components in a protocol-agnostic, composable way. Typically,
-//! such services are referred to as _middleware_.
-//!
-//! An additional abstraction, the [`Layer`] trait, is used to compose
-//! middleware with [`Service`]s. If a [`Service`] can be thought of as an
-//! asynchronous function from a request type to a response type, a [`Layer`] is
-//! a function taking a [`Service`] of one type and returning a [`Service`] of a
-//! different type. The [`ServiceBuilder`] type is used to add middleware to a
-//! service by composing it with multiple [`Layer`]s.
-//!
-//! ## The Tower Ecosystem
-//!
-//! Tower is made up of the following crates:
-//!
-//! * [`tower`] (this crate)
-//! * [`tower-service`]
-//! * [`tower-layer`]
-//! * [`tower-test`]
-//!
-//! Since the [`Service`] and [`Layer`] traits are important integration points
-//! for all libraries using Tower, they are kept as stable as possible, and
-//! breaking changes are made rarely. Therefore, they are defined in separate
-//! crates, [`tower-service`] and [`tower-layer`]. This crate contains
-//! re-exports of those core traits, implementations of commonly-used
-//! middleware, and [utilities] for working with [`Service`]s and [`Layer`]s.
-//! Finally, the [`tower-test`] crate provides tools for testing programs using
-//! Tower.
-//!
-//! # Usage
-//!
-//! Tower provides an abstraction layer, and generic implementations of various
-//! middleware. This means that the `tower` crate on its own does *not* provide
-//! a working implementation of a network client or server. Instead, Tower's
-//! [`Service` trait][`Service`] provides an integration point between
-//! application code, libraries providing middleware implementations, and
-//! libraries that implement servers and/or clients for various network
-//! protocols.
-//!
-//! Depending on your particular use case, you might use Tower in several ways:
-//!
-//! * **Implementing application logic** for a networked program. You might
-//! use the [`Service`] trait to model your application's behavior, and use
-//! the middleware [provided by this crate](#modules) and by other libraries
-//! to add functionality to clients and servers provided by one or more
-//! protocol implementations.
-//! * **Implementing middleware** to add custom behavior to network clients and
-//! servers in a reusable manner. This might be general-purpose middleware
-//! (and if it is, please consider releasing your middleware as a library for
-//! other Tower users!) or application-specific behavior that needs to be
-//! shared between multiple clients or servers.
-//! * **Implementing a network protocol**. Libraries that implement network
-//! protocols (such as HTTP) can depend on `tower-service` to use the
-//! [`Service`] trait as an integration point between the protocol and user
-//! code. For example, a client for some protocol might implement [`Service`],
-//! allowing users to add arbitrary Tower middleware to those clients.
-//! Similarly, a server might be created from a user-provided [`Service`].
-//!
-//! Additionally, when a network protocol requires functionality already
-//! provided by existing Tower middleware, a protocol implementation might use
-//! Tower middleware internally, as well as as an integration point.
-//!
-//! ## Library Support
-//!
-//! A number of third-party libraries support Tower and the [`Service`] trait.
-//! The following is an incomplete list of such libraries:
-//!
-//! * [`hyper`]: A fast and correct low-level HTTP implementation.
-//! * [`tonic`]: A [gRPC-over-HTTP/2][grpc] implementation built on top of
-//! [`hyper`]. See [here][tonic-examples] for examples of using [`tonic`] with
-//! Tower.
-//! * [`warp`]: A lightweight, composable web framework. See
-//! [here][warp-service] for details on using [`warp`] with Tower.
-//! * [`tower-lsp`]: implementations of the [Language
-//! Server Protocol][lsp] based on Tower.
-//!
-//! [`hyper`]: https://crates.io/crates/hyper
-//! [`tonic`]: https://crates.io/crates/tonic
-//! [tonic-examples]: https://github.com/hyperium/tonic/tree/master/examples/src/tower
-//! [grpc]: https://grpc.io
-//! [`warp`]: https://crates.io/crates/warp
-//! [warp-service]: https://docs.rs/warp/0.2.5/warp/fn.service.html
-//! [`tower-lsp`]: https://crates.io/crates/tower-lsp
-//! [lsp]: https://microsoft.github.io/language-server-protocol/
-//!
-//! If you're the maintainer of a crate that supports Tower, we'd love to add
-//! your crate to this list! Please [open a PR] adding a brief description of
-//! your library!
-//!
-//! ## Getting Started
-//!
-//! If you're brand new to Tower and want to start with the basics, we recommend you
-//! check out some of our [guides].
-//!
-//! The various middleware implementations provided by this crate are feature
-//! flagged, so that users can only compile the parts of Tower they need. By
-//! default, all the optional middleware are disabled.
-//!
-//! To get started using all of Tower's optional middleware, add this to your
-//! `Cargo.toml`:
-//!
-//! ```toml
-//! tower = { version = "0.4", features = ["full"] }
-//! ```
-//!
-//! Alternatively, you can only enable some features. For example, to enable
-//! only the [`retry`] and [`timeout`] middleware, write:
-//!
-//! ```toml
-//! tower = { version = "0.4", features = ["retry", "timeout"] }
-//! ```
-//!
-//! See [here](#modules) for a complete list of all middleware provided by
-//! Tower.
-//!
-//!
-//! ## Supported Rust Versions
-//!
-//! Tower will keep a rolling MSRV (minimum supported Rust version) policy of **at
-//! least** 6 months. When increasing the MSRV, the new Rust version must have been
-//! released at least six months ago. The current MSRV is 1.64.0.
-//!
-//! [`Service`]: crate::Service
-//! [`Layer`]: crate::Layer
-//! [rate limiting]: crate::limit::rate
-//! [load balancing]: crate::balance
-//! [`ServiceBuilder`]: crate::ServiceBuilder
-//! [utilities]: crate::ServiceExt
-//! [`tower`]: https://crates.io/crates/tower
-//! [`tower-service`]: https://crates.io/crates/tower-service
-//! [`tower-layer`]: https://crates.io/crates/tower-layer
-//! [`tower-test`]: https://crates.io/crates/tower-test
-//! [`retry`]: crate::retry
-//! [open a PR]: https://github.com/tower-rs/tower/compare
-//! [guides]: https://github.com/tower-rs/tower/tree/master/guides
-
-#[macro_use]
-pub(crate) mod macros;
-#[cfg(feature = "balance")]
-pub mod balance;
-#[cfg(feature = "buffer")]
-pub mod buffer;
-#[cfg(feature = "discover")]
-pub mod discover;
-#[cfg(feature = "filter")]
-pub mod filter;
-#[cfg(feature = "hedge")]
-pub mod hedge;
-#[cfg(feature = "limit")]
-pub mod limit;
-#[cfg(feature = "load")]
-pub mod load;
-#[cfg(feature = "load-shed")]
-pub mod load_shed;
-
-#[cfg(feature = "make")]
-pub mod make;
-#[cfg(feature = "ready-cache")]
-pub mod ready_cache;
-#[cfg(feature = "reconnect")]
-pub mod reconnect;
-#[cfg(feature = "retry")]
-pub mod retry;
-#[cfg(feature = "spawn-ready")]
-pub mod spawn_ready;
-#[cfg(feature = "steer")]
-pub mod steer;
-#[cfg(feature = "timeout")]
-pub mod timeout;
-#[cfg(feature = "util")]
-pub mod util;
-
-pub mod builder;
-pub mod layer;
-
-#[cfg(feature = "util")]
-#[doc(inline)]
-#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
-pub use self::util::{service_fn, ServiceExt};
-
-#[doc(inline)]
-pub use crate::builder::ServiceBuilder;
-
-#[cfg(feature = "make")]
-#[doc(inline)]
-#[cfg_attr(docsrs, doc(cfg(feature = "make")))]
-pub use crate::make::MakeService;
-
-#[doc(inline)]
-pub use tower_layer::Layer;
-
-#[doc(inline)]
-pub use tower_service::Service;
-
-#[allow(unreachable_pub)]
-#[cfg(any(feature = "balance", feature = "discover", feature = "make"))]
-mod sealed {
- pub trait Sealed<T> {}
-}
-
-/// Alias for a type-erased error type.
-pub type BoxError = Box<dyn std::error::Error + Send + Sync>;
diff --git a/vendor/tower/src/limit/concurrency/future.rs b/vendor/tower/src/limit/concurrency/future.rs
deleted file mode 100644
index 6eb0100a..00000000
--- a/vendor/tower/src/limit/concurrency/future.rs
+++ /dev/null
@@ -1,41 +0,0 @@
-//! [`Future`] types
-//!
-//! [`Future`]: std::future::Future
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tokio::sync::OwnedSemaphorePermit;
-
-pin_project! {
- /// Future for the [`ConcurrencyLimit`] service.
- ///
- /// [`ConcurrencyLimit`]: crate::limit::ConcurrencyLimit
- #[derive(Debug)]
- pub struct ResponseFuture<T> {
- #[pin]
- inner: T,
- // Keep this around so that it is dropped when the future completes
- _permit: OwnedSemaphorePermit,
- }
-}
-
-impl<T> ResponseFuture<T> {
- pub(crate) fn new(inner: T, _permit: OwnedSemaphorePermit) -> ResponseFuture<T> {
- ResponseFuture { inner, _permit }
- }
-}
-
-impl<F, T, E> Future for ResponseFuture<F>
-where
- F: Future<Output = Result<T, E>>,
-{
- type Output = Result<T, E>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- Poll::Ready(ready!(self.project().inner.poll(cx)))
- }
-}
diff --git a/vendor/tower/src/limit/concurrency/layer.rs b/vendor/tower/src/limit/concurrency/layer.rs
deleted file mode 100644
index 30257c45..00000000
--- a/vendor/tower/src/limit/concurrency/layer.rs
+++ /dev/null
@@ -1,60 +0,0 @@
-use std::sync::Arc;
-
-use super::ConcurrencyLimit;
-use tokio::sync::Semaphore;
-use tower_layer::Layer;
-
-/// Enforces a limit on the concurrent number of requests the underlying
-/// service can handle.
-#[derive(Debug, Clone)]
-pub struct ConcurrencyLimitLayer {
- max: usize,
-}
-
-impl ConcurrencyLimitLayer {
- /// Create a new concurrency limit layer.
- pub const fn new(max: usize) -> Self {
- ConcurrencyLimitLayer { max }
- }
-}
-
-impl<S> Layer<S> for ConcurrencyLimitLayer {
- type Service = ConcurrencyLimit<S>;
-
- fn layer(&self, service: S) -> Self::Service {
- ConcurrencyLimit::new(service, self.max)
- }
-}
-
-/// Enforces a limit on the concurrent number of requests the underlying
-/// service can handle.
-///
-/// Unlike [`ConcurrencyLimitLayer`], which enforces a per-service concurrency
-/// limit, this layer accepts a owned semaphore (`Arc<Semaphore>`) which can be
-/// shared across multiple services.
-///
-/// Cloning this layer will not create a new semaphore.
-#[derive(Debug, Clone)]
-pub struct GlobalConcurrencyLimitLayer {
- semaphore: Arc<Semaphore>,
-}
-
-impl GlobalConcurrencyLimitLayer {
- /// Create a new `GlobalConcurrencyLimitLayer`.
- pub fn new(max: usize) -> Self {
- Self::with_semaphore(Arc::new(Semaphore::new(max)))
- }
-
- /// Create a new `GlobalConcurrencyLimitLayer` from a `Arc<Semaphore>`
- pub fn with_semaphore(semaphore: Arc<Semaphore>) -> Self {
- GlobalConcurrencyLimitLayer { semaphore }
- }
-}
-
-impl<S> Layer<S> for GlobalConcurrencyLimitLayer {
- type Service = ConcurrencyLimit<S>;
-
- fn layer(&self, service: S) -> Self::Service {
- ConcurrencyLimit::with_semaphore(service, self.semaphore.clone())
- }
-}
diff --git a/vendor/tower/src/limit/concurrency/mod.rs b/vendor/tower/src/limit/concurrency/mod.rs
deleted file mode 100644
index ac0be8a5..00000000
--- a/vendor/tower/src/limit/concurrency/mod.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-//! Limit the max number of requests being concurrently processed.
-
-pub mod future;
-mod layer;
-mod service;
-
-pub use self::{
- layer::{ConcurrencyLimitLayer, GlobalConcurrencyLimitLayer},
- service::ConcurrencyLimit,
-};
diff --git a/vendor/tower/src/limit/concurrency/service.rs b/vendor/tower/src/limit/concurrency/service.rs
deleted file mode 100644
index bb9cf5ee..00000000
--- a/vendor/tower/src/limit/concurrency/service.rs
+++ /dev/null
@@ -1,118 +0,0 @@
-use super::future::ResponseFuture;
-use tokio::sync::{OwnedSemaphorePermit, Semaphore};
-use tokio_util::sync::PollSemaphore;
-use tower_service::Service;
-
-use futures_core::ready;
-use std::{
- sync::Arc,
- task::{Context, Poll},
-};
-
-/// Enforces a limit on the concurrent number of requests the underlying
-/// service can handle.
-#[derive(Debug)]
-pub struct ConcurrencyLimit<T> {
- inner: T,
- semaphore: PollSemaphore,
- /// The currently acquired semaphore permit, if there is sufficient
- /// concurrency to send a new request.
- ///
- /// The permit is acquired in `poll_ready`, and taken in `call` when sending
- /// a new request.
- permit: Option<OwnedSemaphorePermit>,
-}
-
-impl<T> ConcurrencyLimit<T> {
- /// Create a new concurrency limiter.
- pub fn new(inner: T, max: usize) -> Self {
- Self::with_semaphore(inner, Arc::new(Semaphore::new(max)))
- }
-
- /// Create a new concurrency limiter with a provided shared semaphore
- pub fn with_semaphore(inner: T, semaphore: Arc<Semaphore>) -> Self {
- ConcurrencyLimit {
- inner,
- semaphore: PollSemaphore::new(semaphore),
- permit: None,
- }
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &T {
- &self.inner
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut T {
- &mut self.inner
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> T {
- self.inner
- }
-}
-
-impl<S, Request> Service<Request> for ConcurrencyLimit<S>
-where
- S: Service<Request>,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = ResponseFuture<S::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // If we haven't already acquired a permit from the semaphore, try to
- // acquire one first.
- if self.permit.is_none() {
- self.permit = ready!(self.semaphore.poll_acquire(cx));
- debug_assert!(
- self.permit.is_some(),
- "ConcurrencyLimit semaphore is never closed, so `poll_acquire` \
- should never fail",
- );
- }
-
- // Once we've acquired a permit (or if we already had one), poll the
- // inner service.
- self.inner.poll_ready(cx)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- // Take the permit
- let permit = self
- .permit
- .take()
- .expect("max requests in-flight; poll_ready must be called first");
-
- // Call the inner service
- let future = self.inner.call(request);
-
- ResponseFuture::new(future, permit)
- }
-}
-
-impl<T: Clone> Clone for ConcurrencyLimit<T> {
- fn clone(&self) -> Self {
- // Since we hold an `OwnedSemaphorePermit`, we can't derive `Clone`.
- // Instead, when cloning the service, create a new service with the
- // same semaphore, but with the permit in the un-acquired state.
- Self {
- inner: self.inner.clone(),
- semaphore: self.semaphore.clone(),
- permit: None,
- }
- }
-}
-
-#[cfg(feature = "load")]
-impl<S> crate::load::Load for ConcurrencyLimit<S>
-where
- S: crate::load::Load,
-{
- type Metric = S::Metric;
- fn load(&self) -> Self::Metric {
- self.inner.load()
- }
-}
diff --git a/vendor/tower/src/limit/mod.rs b/vendor/tower/src/limit/mod.rs
deleted file mode 100644
index 6a10dcae..00000000
--- a/vendor/tower/src/limit/mod.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-//! Tower middleware for limiting requests.
-
-pub mod concurrency;
-pub mod rate;
-
-pub use self::{
- concurrency::{ConcurrencyLimit, ConcurrencyLimitLayer, GlobalConcurrencyLimitLayer},
- rate::{RateLimit, RateLimitLayer},
-};
diff --git a/vendor/tower/src/limit/rate/layer.rs b/vendor/tower/src/limit/rate/layer.rs
deleted file mode 100644
index 5f8d31aa..00000000
--- a/vendor/tower/src/limit/rate/layer.rs
+++ /dev/null
@@ -1,26 +0,0 @@
-use super::{Rate, RateLimit};
-use std::time::Duration;
-use tower_layer::Layer;
-
-/// Enforces a rate limit on the number of requests the underlying
-/// service can handle over a period of time.
-#[derive(Debug, Clone)]
-pub struct RateLimitLayer {
- rate: Rate,
-}
-
-impl RateLimitLayer {
- /// Create new rate limit layer.
- pub const fn new(num: u64, per: Duration) -> Self {
- let rate = Rate::new(num, per);
- RateLimitLayer { rate }
- }
-}
-
-impl<S> Layer<S> for RateLimitLayer {
- type Service = RateLimit<S>;
-
- fn layer(&self, service: S) -> Self::Service {
- RateLimit::new(service, self.rate)
- }
-}
diff --git a/vendor/tower/src/limit/rate/mod.rs b/vendor/tower/src/limit/rate/mod.rs
deleted file mode 100644
index 52b179b8..00000000
--- a/vendor/tower/src/limit/rate/mod.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-//! Limit the rate at which requests are processed.
-
-mod layer;
-#[allow(clippy::module_inception)]
-mod rate;
-mod service;
-
-pub use self::{layer::RateLimitLayer, rate::Rate, service::RateLimit};
diff --git a/vendor/tower/src/limit/rate/rate.rs b/vendor/tower/src/limit/rate/rate.rs
deleted file mode 100644
index 66736dea..00000000
--- a/vendor/tower/src/limit/rate/rate.rs
+++ /dev/null
@@ -1,30 +0,0 @@
-use std::time::Duration;
-
-/// A rate of requests per time period.
-#[derive(Debug, Copy, Clone)]
-pub struct Rate {
- num: u64,
- per: Duration,
-}
-
-impl Rate {
- /// Create a new rate.
- ///
- /// # Panics
- ///
- /// This function panics if `num` or `per` is 0.
- pub const fn new(num: u64, per: Duration) -> Self {
- assert!(num > 0);
- assert!(per.as_nanos() > 0);
-
- Rate { num, per }
- }
-
- pub(crate) fn num(&self) -> u64 {
- self.num
- }
-
- pub(crate) fn per(&self) -> Duration {
- self.per
- }
-}
diff --git a/vendor/tower/src/limit/rate/service.rs b/vendor/tower/src/limit/rate/service.rs
deleted file mode 100644
index 0aa1d694..00000000
--- a/vendor/tower/src/limit/rate/service.rs
+++ /dev/null
@@ -1,130 +0,0 @@
-use super::Rate;
-use futures_core::ready;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tokio::time::{Instant, Sleep};
-use tower_service::Service;
-
-/// Enforces a rate limit on the number of requests the underlying
-/// service can handle over a period of time.
-#[derive(Debug)]
-pub struct RateLimit<T> {
- inner: T,
- rate: Rate,
- state: State,
- sleep: Pin<Box<Sleep>>,
-}
-
-#[derive(Debug)]
-enum State {
- // The service has hit its limit
- Limited,
- Ready { until: Instant, rem: u64 },
-}
-
-impl<T> RateLimit<T> {
- /// Create a new rate limiter
- pub fn new(inner: T, rate: Rate) -> Self {
- let until = Instant::now();
- let state = State::Ready {
- until,
- rem: rate.num(),
- };
-
- RateLimit {
- inner,
- rate,
- state,
- // The sleep won't actually be used with this duration, but
- // we create it eagerly so that we can reset it in place rather than
- // `Box::pin`ning a new `Sleep` every time we need one.
- sleep: Box::pin(tokio::time::sleep_until(until)),
- }
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &T {
- &self.inner
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut T {
- &mut self.inner
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> T {
- self.inner
- }
-}
-
-impl<S, Request> Service<Request> for RateLimit<S>
-where
- S: Service<Request>,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = S::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- match self.state {
- State::Ready { .. } => return Poll::Ready(ready!(self.inner.poll_ready(cx))),
- State::Limited => {
- if Pin::new(&mut self.sleep).poll(cx).is_pending() {
- tracing::trace!("rate limit exceeded; sleeping.");
- return Poll::Pending;
- }
- }
- }
-
- self.state = State::Ready {
- until: Instant::now() + self.rate.per(),
- rem: self.rate.num(),
- };
-
- Poll::Ready(ready!(self.inner.poll_ready(cx)))
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- match self.state {
- State::Ready { mut until, mut rem } => {
- let now = Instant::now();
-
- // If the period has elapsed, reset it.
- if now >= until {
- until = now + self.rate.per();
- rem = self.rate.num();
- }
-
- if rem > 1 {
- rem -= 1;
- self.state = State::Ready { until, rem };
- } else {
- // The service is disabled until further notice
- // Reset the sleep future in place, so that we don't have to
- // deallocate the existing box and allocate a new one.
- self.sleep.as_mut().reset(until);
- self.state = State::Limited;
- }
-
- // Call the inner future
- self.inner.call(request)
- }
- State::Limited => panic!("service not ready; poll_ready must be called first"),
- }
- }
-}
-
-#[cfg(feature = "load")]
-impl<S> crate::load::Load for RateLimit<S>
-where
- S: crate::load::Load,
-{
- type Metric = S::Metric;
- fn load(&self) -> Self::Metric {
- self.inner.load()
- }
-}
diff --git a/vendor/tower/src/load/completion.rs b/vendor/tower/src/load/completion.rs
deleted file mode 100644
index 857e0553..00000000
--- a/vendor/tower/src/load/completion.rs
+++ /dev/null
@@ -1,95 +0,0 @@
-//! Application-specific request completion semantics.
-
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-/// Attaches `H`-typed completion tracker to `V` typed values.
-///
-/// Handles (of type `H`) are intended to be RAII guards that primarily implement [`Drop`] and update
-/// load metric state as they are dropped. This trait allows implementors to "forward" the handle
-/// to later parts of the request-handling pipeline, so that the handle is only dropped when the
-/// request has truly completed.
-///
-/// This utility allows load metrics to have a protocol-agnostic means to track streams past their
-/// initial response future. For example, if `V` represents an HTTP response type, an
-/// implementation could add `H`-typed handles to each response's extensions to detect when all the
-/// response's extensions have been dropped.
-///
-/// A base `impl<H, V> TrackCompletion<H, V> for CompleteOnResponse` is provided to drop the handle
-/// once the response future is resolved. This is appropriate when a response is discrete and
-/// cannot comprise multiple messages.
-///
-/// In many cases, the `Output` type is simply `V`. However, [`TrackCompletion`] may alter the type
-/// in order to instrument it appropriately. For example, an HTTP [`TrackCompletion`] may modify
-/// the body type: so a [`TrackCompletion`] that takes values of type
-/// [`http::Response<A>`][response] may output values of type [`http::Response<B>`][response].
-///
-/// [response]: https://docs.rs/http/latest/http/response/struct.Response.html
-pub trait TrackCompletion<H, V>: Clone {
- /// The instrumented value type.
- type Output;
-
- /// Attaches a `H`-typed handle to a `V`-typed value.
- fn track_completion(&self, handle: H, value: V) -> Self::Output;
-}
-
-/// A [`TrackCompletion`] implementation that considers the request completed when the response
-/// future is resolved.
-#[derive(Clone, Copy, Debug, Default)]
-#[non_exhaustive]
-pub struct CompleteOnResponse;
-
-pin_project! {
- /// Attaches a `C`-typed completion tracker to the result of an `F`-typed [`Future`].
- #[derive(Debug)]
- pub struct TrackCompletionFuture<F, C, H> {
- #[pin]
- future: F,
- handle: Option<H>,
- completion: C,
- }
-}
-
-// ===== impl InstrumentFuture =====
-
-impl<F, C, H> TrackCompletionFuture<F, C, H> {
- /// Wraps a future, propagating the tracker into its value if successful.
- pub const fn new(completion: C, handle: H, future: F) -> Self {
- TrackCompletionFuture {
- future,
- completion,
- handle: Some(handle),
- }
- }
-}
-
-impl<F, C, H, T, E> Future for TrackCompletionFuture<F, C, H>
-where
- F: Future<Output = Result<T, E>>,
- C: TrackCompletion<H, T>,
-{
- type Output = Result<C::Output, E>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
- let rsp = ready!(this.future.poll(cx))?;
- let h = this.handle.take().expect("handle");
- Poll::Ready(Ok(this.completion.track_completion(h, rsp)))
- }
-}
-
-// ===== CompleteOnResponse =====
-
-impl<H, V> TrackCompletion<H, V> for CompleteOnResponse {
- type Output = V;
-
- fn track_completion(&self, handle: H, value: V) -> V {
- drop(handle);
- value
- }
-}
diff --git a/vendor/tower/src/load/constant.rs b/vendor/tower/src/load/constant.rs
deleted file mode 100644
index b53a10bf..00000000
--- a/vendor/tower/src/load/constant.rs
+++ /dev/null
@@ -1,79 +0,0 @@
-//! A constant [`Load`] implementation.
-
-#[cfg(feature = "discover")]
-use crate::discover::{Change, Discover};
-#[cfg(feature = "discover")]
-use futures_core::{ready, Stream};
-#[cfg(feature = "discover")]
-use std::pin::Pin;
-
-use super::Load;
-use pin_project_lite::pin_project;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-pin_project! {
- #[derive(Debug)]
- /// Wraps a type so that it implements [`Load`] and returns a constant load metric.
- ///
- /// This load estimator is primarily useful for testing.
- pub struct Constant<T, M> {
- inner: T,
- load: M,
- }
-}
-
-// ===== impl Constant =====
-
-impl<T, M: Copy> Constant<T, M> {
- /// Wraps a `T`-typed service with a constant `M`-typed load metric.
- pub const fn new(inner: T, load: M) -> Self {
- Self { inner, load }
- }
-}
-
-impl<T, M: Copy + PartialOrd> Load for Constant<T, M> {
- type Metric = M;
-
- fn load(&self) -> M {
- self.load
- }
-}
-
-impl<S, M, Request> Service<Request> for Constant<S, M>
-where
- S: Service<Request>,
- M: Copy,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = S::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx)
- }
-
- fn call(&mut self, req: Request) -> Self::Future {
- self.inner.call(req)
- }
-}
-
-/// Proxies [`Discover`] such that all changes are wrapped with a constant load.
-#[cfg(feature = "discover")]
-impl<D: Discover + Unpin, M: Copy> Stream for Constant<D, M> {
- type Item = Result<Change<D::Key, Constant<D::Service, M>>, D::Error>;
-
- /// Yields the next discovery change set.
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- use self::Change::*;
-
- let this = self.project();
- let change = match ready!(Pin::new(this.inner).poll_discover(cx)).transpose()? {
- None => return Poll::Ready(None),
- Some(Insert(k, svc)) => Insert(k, Constant::new(svc, *this.load)),
- Some(Remove(k)) => Remove(k),
- };
-
- Poll::Ready(Some(Ok(change)))
- }
-}
diff --git a/vendor/tower/src/load/mod.rs b/vendor/tower/src/load/mod.rs
deleted file mode 100644
index e47558b4..00000000
--- a/vendor/tower/src/load/mod.rs
+++ /dev/null
@@ -1,89 +0,0 @@
-//! Service load measurement
-//!
-//! This module provides the [`Load`] trait, which allows measuring how loaded a service is.
-//! It also provides several wrapper types that measure load in different ways:
-//!
-//! - [`Constant`] — Always returns the same constant load value for a service.
-//! - [`PendingRequests`] — Measures load by tracking the number of in-flight requests.
-//! - [`PeakEwma`] — Measures load using a moving average of the peak latency for the service.
-//!
-//! In general, you will want to use one of these when using the types in [`tower::balance`] which
-//! balance services depending on their load. Which load metric to use depends on your exact
-//! use-case, but the ones above should get you quite far!
-//!
-//! When the `discover` feature is enabled, wrapper types for [`Discover`] that
-//! wrap the discovered services with the given load estimator are also provided.
-//!
-//! # When does a request complete?
-//!
-//! For many applications, the request life-cycle is relatively simple: when a service responds to
-//! a request, that request is done, and the system can forget about it. However, for some
-//! applications, the service may respond to the initial request while other parts of the system
-//! are still acting on that request. In such an application, the system load must take these
-//! requests into account as well, or risk the system underestimating its own load.
-//!
-//! To support these use-cases, the load estimators in this module are parameterized by the
-//! [`TrackCompletion`] trait, with [`CompleteOnResponse`] as the default type. The behavior of
-//! [`CompleteOnResponse`] is what you would normally expect for a request-response cycle: when the
-//! response is produced, the request is considered "finished", and load goes down. This can be
-//! overridden by your own user-defined type to track more complex request completion semantics. See
-//! the documentation for [`completion`] for more details.
-//!
-//! # Examples
-//!
-//! ```rust
-//! # #[cfg(feature = "util")]
-//! use tower::util::ServiceExt;
-//! # #[cfg(feature = "util")]
-//! use tower::{load::Load, Service};
-//! # #[cfg(feature = "util")]
-//! async fn simple_balance<S1, S2, R>(
-//! svc1: &mut S1,
-//! svc2: &mut S2,
-//! request: R
-//! ) -> Result<S1::Response, S1::Error>
-//! where
-//! S1: Load + Service<R>,
-//! S2: Load<Metric = S1::Metric> + Service<R, Response = S1::Response, Error = S1::Error>
-//! {
-//! if svc1.load() < svc2.load() {
-//! svc1.ready().await?.call(request).await
-//! } else {
-//! svc2.ready().await?.call(request).await
-//! }
-//! }
-//! ```
-//!
-//! [`tower::balance`]: crate::balance
-//! [`Discover`]: crate::discover::Discover
-//! [`CompleteOnResponse`]: crate::load::completion::CompleteOnResponse
-// TODO: a custom completion example would be good here
-
-pub mod completion;
-mod constant;
-pub mod peak_ewma;
-pub mod pending_requests;
-
-pub use self::{
- completion::{CompleteOnResponse, TrackCompletion},
- constant::Constant,
- peak_ewma::PeakEwma,
- pending_requests::PendingRequests,
-};
-
-#[cfg(feature = "discover")]
-pub use self::{peak_ewma::PeakEwmaDiscover, pending_requests::PendingRequestsDiscover};
-
-/// Types that implement this trait can give an estimate of how loaded they are.
-///
-/// See the module documentation for more details.
-pub trait Load {
- /// A comparable load metric.
- ///
- /// Lesser values indicate that the service is less loaded, and should be preferred for new
- /// requests over another service with a higher value.
- type Metric: PartialOrd;
-
- /// Estimate the service's current load.
- fn load(&self) -> Self::Metric;
-}
diff --git a/vendor/tower/src/load/peak_ewma.rs b/vendor/tower/src/load/peak_ewma.rs
deleted file mode 100644
index c145ab60..00000000
--- a/vendor/tower/src/load/peak_ewma.rs
+++ /dev/null
@@ -1,406 +0,0 @@
-//! A `Load` implementation that measures load using the PeakEWMA response latency.
-
-#[cfg(feature = "discover")]
-use crate::discover::{Change, Discover};
-#[cfg(feature = "discover")]
-use futures_core::{ready, Stream};
-#[cfg(feature = "discover")]
-use pin_project_lite::pin_project;
-#[cfg(feature = "discover")]
-use std::pin::Pin;
-
-use super::completion::{CompleteOnResponse, TrackCompletion, TrackCompletionFuture};
-use super::Load;
-use std::task::{Context, Poll};
-use std::{
- sync::{Arc, Mutex},
- time::Duration,
-};
-use tokio::time::Instant;
-use tower_service::Service;
-use tracing::trace;
-
-/// Measures the load of the underlying service using Peak-EWMA load measurement.
-///
-/// [`PeakEwma`] implements [`Load`] with the [`Cost`] metric that estimates the amount of
-/// pending work to an endpoint. Work is calculated by multiplying the
-/// exponentially-weighted moving average (EWMA) of response latencies by the number of
-/// pending requests. The Peak-EWMA algorithm is designed to be especially sensitive to
-/// worst-case latencies. Over time, the peak latency value decays towards the moving
-/// average of latencies to the endpoint.
-///
-/// When no latency information has been measured for an endpoint, an arbitrary default
-/// RTT of 1 second is used to prevent the endpoint from being overloaded before a
-/// meaningful baseline can be established..
-///
-/// ## Note
-///
-/// This is derived from [Finagle][finagle], which is distributed under the Apache V2
-/// license. Copyright 2017, Twitter Inc.
-///
-/// [finagle]:
-/// https://github.com/twitter/finagle/blob/9cc08d15216497bb03a1cafda96b7266cfbbcff1/finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala
-#[derive(Debug)]
-pub struct PeakEwma<S, C = CompleteOnResponse> {
- service: S,
- decay_ns: f64,
- rtt_estimate: Arc<Mutex<RttEstimate>>,
- completion: C,
-}
-
-#[cfg(feature = "discover")]
-pin_project! {
- /// Wraps a `D`-typed stream of discovered services with `PeakEwma`.
- #[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
- #[derive(Debug)]
- pub struct PeakEwmaDiscover<D, C = CompleteOnResponse> {
- #[pin]
- discover: D,
- decay_ns: f64,
- default_rtt: Duration,
- completion: C,
- }
-}
-
-/// Represents the relative cost of communicating with a service.
-///
-/// The underlying value estimates the amount of pending work to a service: the Peak-EWMA
-/// latency estimate multiplied by the number of pending requests.
-#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
-pub struct Cost(f64);
-
-/// Tracks an in-flight request and updates the RTT-estimate on Drop.
-#[derive(Debug)]
-pub struct Handle {
- sent_at: Instant,
- decay_ns: f64,
- rtt_estimate: Arc<Mutex<RttEstimate>>,
-}
-
-/// Holds the current RTT estimate and the last time this value was updated.
-#[derive(Debug)]
-struct RttEstimate {
- update_at: Instant,
- rtt_ns: f64,
-}
-
-const NANOS_PER_MILLI: f64 = 1_000_000.0;
-
-// ===== impl PeakEwma =====
-
-impl<S, C> PeakEwma<S, C> {
- /// Wraps an `S`-typed service so that its load is tracked by the EWMA of its peak latency.
- pub fn new(service: S, default_rtt: Duration, decay_ns: f64, completion: C) -> Self {
- debug_assert!(decay_ns > 0.0, "decay_ns must be positive");
- Self {
- service,
- decay_ns,
- rtt_estimate: Arc::new(Mutex::new(RttEstimate::new(nanos(default_rtt)))),
- completion,
- }
- }
-
- fn handle(&self) -> Handle {
- Handle {
- decay_ns: self.decay_ns,
- sent_at: Instant::now(),
- rtt_estimate: self.rtt_estimate.clone(),
- }
- }
-}
-
-impl<S, C, Request> Service<Request> for PeakEwma<S, C>
-where
- S: Service<Request>,
- C: TrackCompletion<Handle, S::Response>,
-{
- type Response = C::Output;
- type Error = S::Error;
- type Future = TrackCompletionFuture<S::Future, C, Handle>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.service.poll_ready(cx)
- }
-
- fn call(&mut self, req: Request) -> Self::Future {
- TrackCompletionFuture::new(
- self.completion.clone(),
- self.handle(),
- self.service.call(req),
- )
- }
-}
-
-impl<S, C> Load for PeakEwma<S, C> {
- type Metric = Cost;
-
- fn load(&self) -> Self::Metric {
- let pending = Arc::strong_count(&self.rtt_estimate) as u32 - 1;
-
- // Update the RTT estimate to account for decay since the last update.
- // If an estimate has not been established, a default is provided
- let estimate = self.update_estimate();
-
- let cost = Cost(estimate * f64::from(pending + 1));
- trace!(
- "load estimate={:.0}ms pending={} cost={:?}",
- estimate / NANOS_PER_MILLI,
- pending,
- cost,
- );
- cost
- }
-}
-
-impl<S, C> PeakEwma<S, C> {
- fn update_estimate(&self) -> f64 {
- let mut rtt = self.rtt_estimate.lock().expect("peak ewma prior_estimate");
- rtt.decay(self.decay_ns)
- }
-}
-
-// ===== impl PeakEwmaDiscover =====
-
-#[cfg(feature = "discover")]
-impl<D, C> PeakEwmaDiscover<D, C> {
- /// Wraps a `D`-typed [`Discover`] so that services have a [`PeakEwma`] load metric.
- ///
- /// The provided `default_rtt` is used as the default RTT estimate for newly
- /// added services.
- ///
- /// They `decay` value determines over what time period a RTT estimate should
- /// decay.
- pub fn new<Request>(discover: D, default_rtt: Duration, decay: Duration, completion: C) -> Self
- where
- D: Discover,
- D::Service: Service<Request>,
- C: TrackCompletion<Handle, <D::Service as Service<Request>>::Response>,
- {
- PeakEwmaDiscover {
- discover,
- decay_ns: nanos(decay),
- default_rtt,
- completion,
- }
- }
-}
-
-#[cfg(feature = "discover")]
-impl<D, C> Stream for PeakEwmaDiscover<D, C>
-where
- D: Discover,
- C: Clone,
-{
- type Item = Result<Change<D::Key, PeakEwma<D::Service, C>>, D::Error>;
-
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- let this = self.project();
- let change = match ready!(this.discover.poll_discover(cx)).transpose()? {
- None => return Poll::Ready(None),
- Some(Change::Remove(k)) => Change::Remove(k),
- Some(Change::Insert(k, svc)) => {
- let peak_ewma = PeakEwma::new(
- svc,
- *this.default_rtt,
- *this.decay_ns,
- this.completion.clone(),
- );
- Change::Insert(k, peak_ewma)
- }
- };
-
- Poll::Ready(Some(Ok(change)))
- }
-}
-
-// ===== impl RttEstimate =====
-
-impl RttEstimate {
- fn new(rtt_ns: f64) -> Self {
- debug_assert!(0.0 < rtt_ns, "rtt must be positive");
- Self {
- rtt_ns,
- update_at: Instant::now(),
- }
- }
-
- /// Decays the RTT estimate with a decay period of `decay_ns`.
- fn decay(&mut self, decay_ns: f64) -> f64 {
- // Updates with a 0 duration so that the estimate decays towards 0.
- let now = Instant::now();
- self.update(now, now, decay_ns)
- }
-
- /// Updates the Peak-EWMA RTT estimate.
- ///
- /// The elapsed time from `sent_at` to `recv_at` is added
- fn update(&mut self, sent_at: Instant, recv_at: Instant, decay_ns: f64) -> f64 {
- debug_assert!(
- sent_at <= recv_at,
- "recv_at={:?} after sent_at={:?}",
- recv_at,
- sent_at
- );
- let rtt = nanos(recv_at.saturating_duration_since(sent_at));
-
- let now = Instant::now();
- debug_assert!(
- self.update_at <= now,
- "update_at={:?} in the future",
- self.update_at
- );
-
- self.rtt_ns = if self.rtt_ns < rtt {
- // For Peak-EWMA, always use the worst-case (peak) value as the estimate for
- // subsequent requests.
- trace!(
- "update peak rtt={}ms prior={}ms",
- rtt / NANOS_PER_MILLI,
- self.rtt_ns / NANOS_PER_MILLI,
- );
- rtt
- } else {
- // When an RTT is observed that is less than the estimated RTT, we decay the
- // prior estimate according to how much time has elapsed since the last
- // update. The inverse of the decay is used to scale the estimate towards the
- // observed RTT value.
- let elapsed = nanos(now.saturating_duration_since(self.update_at));
- let decay = (-elapsed / decay_ns).exp();
- let recency = 1.0 - decay;
- let next_estimate = (self.rtt_ns * decay) + (rtt * recency);
- trace!(
- "update rtt={:03.0}ms decay={:06.0}ns; next={:03.0}ms",
- rtt / NANOS_PER_MILLI,
- self.rtt_ns - next_estimate,
- next_estimate / NANOS_PER_MILLI,
- );
- next_estimate
- };
- self.update_at = now;
-
- self.rtt_ns
- }
-}
-
-// ===== impl Handle =====
-
-impl Drop for Handle {
- fn drop(&mut self) {
- let recv_at = Instant::now();
-
- if let Ok(mut rtt) = self.rtt_estimate.lock() {
- rtt.update(self.sent_at, recv_at, self.decay_ns);
- }
- }
-}
-
-// ===== impl Cost =====
-
-// Utility that converts durations to nanos in f64.
-//
-// Due to a lossy transformation, the maximum value that can be represented is ~585 years,
-// which, I hope, is more than enough to represent request latencies.
-fn nanos(d: Duration) -> f64 {
- const NANOS_PER_SEC: u64 = 1_000_000_000;
- let n = f64::from(d.subsec_nanos());
- let s = d.as_secs().saturating_mul(NANOS_PER_SEC) as f64;
- n + s
-}
-
-#[cfg(test)]
-mod tests {
- use futures_util::future;
- use std::time::Duration;
- use tokio::time;
- use tokio_test::{assert_ready, assert_ready_ok, task};
-
- use super::*;
-
- struct Svc;
- impl Service<()> for Svc {
- type Response = ();
- type Error = ();
- type Future = future::Ready<Result<(), ()>>;
-
- fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), ()>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, (): ()) -> Self::Future {
- future::ok(())
- }
- }
-
- /// The default RTT estimate decays, so that new nodes are considered if the
- /// default RTT is too high.
- #[tokio::test]
- async fn default_decay() {
- time::pause();
-
- let svc = PeakEwma::new(
- Svc,
- Duration::from_millis(10),
- NANOS_PER_MILLI * 1_000.0,
- CompleteOnResponse,
- );
- let Cost(load) = svc.load();
- assert_eq!(load, 10.0 * NANOS_PER_MILLI);
-
- time::advance(Duration::from_millis(100)).await;
- let Cost(load) = svc.load();
- assert!(9.0 * NANOS_PER_MILLI < load && load < 10.0 * NANOS_PER_MILLI);
-
- time::advance(Duration::from_millis(100)).await;
- let Cost(load) = svc.load();
- assert!(8.0 * NANOS_PER_MILLI < load && load < 9.0 * NANOS_PER_MILLI);
- }
-
- // The default RTT estimate decays, so that new nodes are considered if the default RTT is too
- // high.
- #[tokio::test]
- async fn compound_decay() {
- time::pause();
-
- let mut svc = PeakEwma::new(
- Svc,
- Duration::from_millis(20),
- NANOS_PER_MILLI * 1_000.0,
- CompleteOnResponse,
- );
- assert_eq!(svc.load(), Cost(20.0 * NANOS_PER_MILLI));
-
- time::advance(Duration::from_millis(100)).await;
- let mut rsp0 = task::spawn(svc.call(()));
- assert!(svc.load() > Cost(20.0 * NANOS_PER_MILLI));
-
- time::advance(Duration::from_millis(100)).await;
- let mut rsp1 = task::spawn(svc.call(()));
- assert!(svc.load() > Cost(40.0 * NANOS_PER_MILLI));
-
- time::advance(Duration::from_millis(100)).await;
- let () = assert_ready_ok!(rsp0.poll());
- assert_eq!(svc.load(), Cost(400_000_000.0));
-
- time::advance(Duration::from_millis(100)).await;
- let () = assert_ready_ok!(rsp1.poll());
- assert_eq!(svc.load(), Cost(200_000_000.0));
-
- // Check that values decay as time elapses
- time::advance(Duration::from_secs(1)).await;
- assert!(svc.load() < Cost(100_000_000.0));
-
- time::advance(Duration::from_secs(10)).await;
- assert!(svc.load() < Cost(100_000.0));
- }
-
- #[test]
- fn nanos() {
- assert_eq!(super::nanos(Duration::new(0, 0)), 0.0);
- assert_eq!(super::nanos(Duration::new(0, 123)), 123.0);
- assert_eq!(super::nanos(Duration::new(1, 23)), 1_000_000_023.0);
- assert_eq!(
- super::nanos(Duration::new(::std::u64::MAX, 999_999_999)),
- 18446744074709553000.0
- );
- }
-}
diff --git a/vendor/tower/src/load/pending_requests.rs b/vendor/tower/src/load/pending_requests.rs
deleted file mode 100644
index 2721633f..00000000
--- a/vendor/tower/src/load/pending_requests.rs
+++ /dev/null
@@ -1,217 +0,0 @@
-//! A [`Load`] implementation that measures load using the number of in-flight requests.
-
-#[cfg(feature = "discover")]
-use crate::discover::{Change, Discover};
-#[cfg(feature = "discover")]
-use futures_core::{ready, Stream};
-#[cfg(feature = "discover")]
-use pin_project_lite::pin_project;
-#[cfg(feature = "discover")]
-use std::pin::Pin;
-
-use super::completion::{CompleteOnResponse, TrackCompletion, TrackCompletionFuture};
-use super::Load;
-use std::sync::Arc;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-/// Measures the load of the underlying service using the number of currently-pending requests.
-#[derive(Debug)]
-pub struct PendingRequests<S, C = CompleteOnResponse> {
- service: S,
- ref_count: RefCount,
- completion: C,
-}
-
-/// Shared between instances of [`PendingRequests`] and [`Handle`] to track active references.
-#[derive(Clone, Debug, Default)]
-struct RefCount(Arc<()>);
-
-#[cfg(feature = "discover")]
-pin_project! {
- /// Wraps a `D`-typed stream of discovered services with [`PendingRequests`].
- #[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
- #[derive(Debug)]
- pub struct PendingRequestsDiscover<D, C = CompleteOnResponse> {
- #[pin]
- discover: D,
- completion: C,
- }
-}
-
-/// Represents the number of currently-pending requests to a given service.
-#[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Ord, Eq)]
-pub struct Count(usize);
-
-/// Tracks an in-flight request by reference count.
-#[derive(Debug)]
-#[allow(dead_code)]
-pub struct Handle(RefCount);
-
-// ===== impl PendingRequests =====
-
-impl<S, C> PendingRequests<S, C> {
- /// Wraps an `S`-typed service so that its load is tracked by the number of pending requests.
- pub fn new(service: S, completion: C) -> Self {
- Self {
- service,
- completion,
- ref_count: RefCount::default(),
- }
- }
-
- fn handle(&self) -> Handle {
- Handle(self.ref_count.clone())
- }
-}
-
-impl<S, C> Load for PendingRequests<S, C> {
- type Metric = Count;
-
- fn load(&self) -> Count {
- // Count the number of references that aren't `self`.
- Count(self.ref_count.ref_count() - 1)
- }
-}
-
-impl<S, C, Request> Service<Request> for PendingRequests<S, C>
-where
- S: Service<Request>,
- C: TrackCompletion<Handle, S::Response>,
-{
- type Response = C::Output;
- type Error = S::Error;
- type Future = TrackCompletionFuture<S::Future, C, Handle>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.service.poll_ready(cx)
- }
-
- fn call(&mut self, req: Request) -> Self::Future {
- TrackCompletionFuture::new(
- self.completion.clone(),
- self.handle(),
- self.service.call(req),
- )
- }
-}
-
-// ===== impl PendingRequestsDiscover =====
-
-#[cfg(feature = "discover")]
-impl<D, C> PendingRequestsDiscover<D, C> {
- /// Wraps a [`Discover`], wrapping all of its services with [`PendingRequests`].
- pub const fn new<Request>(discover: D, completion: C) -> Self
- where
- D: Discover,
- D::Service: Service<Request>,
- C: TrackCompletion<Handle, <D::Service as Service<Request>>::Response>,
- {
- Self {
- discover,
- completion,
- }
- }
-}
-
-#[cfg(feature = "discover")]
-impl<D, C> Stream for PendingRequestsDiscover<D, C>
-where
- D: Discover,
- C: Clone,
-{
- type Item = Result<Change<D::Key, PendingRequests<D::Service, C>>, D::Error>;
-
- /// Yields the next discovery change set.
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- use self::Change::*;
-
- let this = self.project();
- let change = match ready!(this.discover.poll_discover(cx)).transpose()? {
- None => return Poll::Ready(None),
- Some(Insert(k, svc)) => Insert(k, PendingRequests::new(svc, this.completion.clone())),
- Some(Remove(k)) => Remove(k),
- };
-
- Poll::Ready(Some(Ok(change)))
- }
-}
-
-// ==== RefCount ====
-
-impl RefCount {
- pub(crate) fn ref_count(&self) -> usize {
- Arc::strong_count(&self.0)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use futures_util::future;
- use std::task::{Context, Poll};
-
- struct Svc;
- impl Service<()> for Svc {
- type Response = ();
- type Error = ();
- type Future = future::Ready<Result<(), ()>>;
-
- fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), ()>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, (): ()) -> Self::Future {
- future::ok(())
- }
- }
-
- #[test]
- fn default() {
- let mut svc = PendingRequests::new(Svc, CompleteOnResponse);
- assert_eq!(svc.load(), Count(0));
-
- let rsp0 = svc.call(());
- assert_eq!(svc.load(), Count(1));
-
- let rsp1 = svc.call(());
- assert_eq!(svc.load(), Count(2));
-
- let () = tokio_test::block_on(rsp0).unwrap();
- assert_eq!(svc.load(), Count(1));
-
- let () = tokio_test::block_on(rsp1).unwrap();
- assert_eq!(svc.load(), Count(0));
- }
-
- #[test]
- fn with_completion() {
- #[derive(Clone)]
- struct IntoHandle;
- impl TrackCompletion<Handle, ()> for IntoHandle {
- type Output = Handle;
- fn track_completion(&self, i: Handle, (): ()) -> Handle {
- i
- }
- }
-
- let mut svc = PendingRequests::new(Svc, IntoHandle);
- assert_eq!(svc.load(), Count(0));
-
- let rsp = svc.call(());
- assert_eq!(svc.load(), Count(1));
- let i0 = tokio_test::block_on(rsp).unwrap();
- assert_eq!(svc.load(), Count(1));
-
- let rsp = svc.call(());
- assert_eq!(svc.load(), Count(2));
- let i1 = tokio_test::block_on(rsp).unwrap();
- assert_eq!(svc.load(), Count(2));
-
- drop(i1);
- assert_eq!(svc.load(), Count(1));
-
- drop(i0);
- assert_eq!(svc.load(), Count(0));
- }
-}
diff --git a/vendor/tower/src/load_shed/error.rs b/vendor/tower/src/load_shed/error.rs
deleted file mode 100644
index e11da582..00000000
--- a/vendor/tower/src/load_shed/error.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-//! Error types
-
-use std::fmt;
-
-/// An error returned by [`LoadShed`] when the underlying service
-/// is not ready to handle any requests at the time of being
-/// called.
-///
-/// [`LoadShed`]: crate::load_shed::LoadShed
-#[derive(Default)]
-pub struct Overloaded {
- _p: (),
-}
-
-impl Overloaded {
- /// Construct a new overloaded error
- pub const fn new() -> Self {
- Overloaded { _p: () }
- }
-}
-
-impl fmt::Debug for Overloaded {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("Overloaded")
- }
-}
-
-impl fmt::Display for Overloaded {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("service overloaded")
- }
-}
-
-impl std::error::Error for Overloaded {}
diff --git a/vendor/tower/src/load_shed/future.rs b/vendor/tower/src/load_shed/future.rs
deleted file mode 100644
index 64f394c9..00000000
--- a/vendor/tower/src/load_shed/future.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-//! Future types
-
-use std::fmt;
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use futures_core::ready;
-use pin_project_lite::pin_project;
-
-use super::error::Overloaded;
-
-pin_project! {
- /// Future for the [`LoadShed`] service.
- ///
- /// [`LoadShed`]: crate::load_shed::LoadShed
- pub struct ResponseFuture<F> {
- #[pin]
- state: ResponseState<F>,
- }
-}
-
-pin_project! {
- #[project = ResponseStateProj]
- enum ResponseState<F> {
- Called {
- #[pin]
- fut: F
- },
- Overloaded,
- }
-}
-
-impl<F> ResponseFuture<F> {
- pub(crate) fn called(fut: F) -> Self {
- ResponseFuture {
- state: ResponseState::Called { fut },
- }
- }
-
- pub(crate) fn overloaded() -> Self {
- ResponseFuture {
- state: ResponseState::Overloaded,
- }
- }
-}
-
-impl<F, T, E> Future for ResponseFuture<F>
-where
- F: Future<Output = Result<T, E>>,
- E: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- match self.project().state.project() {
- ResponseStateProj::Called { fut } => {
- Poll::Ready(ready!(fut.poll(cx)).map_err(Into::into))
- }
- ResponseStateProj::Overloaded => Poll::Ready(Err(Overloaded::new().into())),
- }
- }
-}
-
-impl<F> fmt::Debug for ResponseFuture<F>
-where
- // bounds for future-proofing...
- F: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("ResponseFuture")
- }
-}
diff --git a/vendor/tower/src/load_shed/layer.rs b/vendor/tower/src/load_shed/layer.rs
deleted file mode 100644
index 5585db79..00000000
--- a/vendor/tower/src/load_shed/layer.rs
+++ /dev/null
@@ -1,33 +0,0 @@
-use std::fmt;
-use tower_layer::Layer;
-
-use super::LoadShed;
-
-/// A [`Layer`] to wrap services in [`LoadShed`] middleware.
-///
-/// [`Layer`]: crate::Layer
-#[derive(Clone, Default)]
-pub struct LoadShedLayer {
- _p: (),
-}
-
-impl LoadShedLayer {
- /// Creates a new layer.
- pub const fn new() -> Self {
- LoadShedLayer { _p: () }
- }
-}
-
-impl<S> Layer<S> for LoadShedLayer {
- type Service = LoadShed<S>;
-
- fn layer(&self, service: S) -> Self::Service {
- LoadShed::new(service)
- }
-}
-
-impl fmt::Debug for LoadShedLayer {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("LoadShedLayer").finish()
- }
-}
diff --git a/vendor/tower/src/load_shed/mod.rs b/vendor/tower/src/load_shed/mod.rs
deleted file mode 100644
index 422f3088..00000000
--- a/vendor/tower/src/load_shed/mod.rs
+++ /dev/null
@@ -1,76 +0,0 @@
-//! Middleware for shedding load when inner services aren't ready.
-
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-pub mod error;
-pub mod future;
-mod layer;
-
-use self::future::ResponseFuture;
-pub use self::layer::LoadShedLayer;
-
-/// A [`Service`] that sheds load when the inner service isn't ready.
-///
-/// [`Service`]: crate::Service
-#[derive(Debug)]
-pub struct LoadShed<S> {
- inner: S,
- is_ready: bool,
-}
-
-// ===== impl LoadShed =====
-
-impl<S> LoadShed<S> {
- /// Wraps a service in [`LoadShed`] middleware.
- pub const fn new(inner: S) -> Self {
- LoadShed {
- inner,
- is_ready: false,
- }
- }
-}
-
-impl<S, Req> Service<Req> for LoadShed<S>
-where
- S: Service<Req>,
- S::Error: Into<crate::BoxError>,
-{
- type Response = S::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<S::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // We check for readiness here, so that we can know in `call` if
- // the inner service is overloaded or not.
- self.is_ready = match self.inner.poll_ready(cx) {
- Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
- r => r.is_ready(),
- };
-
- // But we always report Ready, so that layers above don't wait until
- // the inner service is ready (the entire point of this layer!)
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Req) -> Self::Future {
- if self.is_ready {
- // readiness only counts once, you need to check again!
- self.is_ready = false;
- ResponseFuture::called(self.inner.call(req))
- } else {
- ResponseFuture::overloaded()
- }
- }
-}
-
-impl<S: Clone> Clone for LoadShed<S> {
- fn clone(&self) -> Self {
- LoadShed {
- inner: self.inner.clone(),
- // new clones shouldn't carry the readiness state, as a cloneable
- // inner service likely tracks readiness per clone.
- is_ready: false,
- }
- }
-}
diff --git a/vendor/tower/src/macros.rs b/vendor/tower/src/macros.rs
deleted file mode 100644
index f3077566..00000000
--- a/vendor/tower/src/macros.rs
+++ /dev/null
@@ -1,42 +0,0 @@
-#[cfg(any(
- feature = "util",
- feature = "spawn-ready",
- feature = "filter",
- feature = "make"
-))]
-macro_rules! opaque_future {
- ($(#[$m:meta])* pub type $name:ident<$($param:ident),+> = $actual:ty;) => {
- pin_project_lite::pin_project! {
- $(#[$m])*
- pub struct $name<$($param),+> {
- #[pin]
- inner: $actual
- }
- }
-
- impl<$($param),+> $name<$($param),+> {
- pub(crate) fn new(inner: $actual) -> Self {
- Self {
- inner
- }
- }
- }
-
- impl<$($param),+> std::fmt::Debug for $name<$($param),+> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_tuple(stringify!($name)).field(&format_args!("...")).finish()
- }
- }
-
- impl<$($param),+> std::future::Future for $name<$($param),+>
- where
- $actual: std::future::Future,
- {
- type Output = <$actual as std::future::Future>::Output;
- #[inline]
- fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<Self::Output> {
- self.project().inner.poll(cx)
- }
- }
- }
-}
diff --git a/vendor/tower/src/make/make_connection.rs b/vendor/tower/src/make/make_connection.rs
deleted file mode 100644
index 9566cc68..00000000
--- a/vendor/tower/src/make/make_connection.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-use crate::sealed::Sealed;
-use std::future::Future;
-use std::task::{Context, Poll};
-use tokio::io::{AsyncRead, AsyncWrite};
-use tower_service::Service;
-
-/// The [`MakeConnection`] trait is used to create transports.
-///
-/// The goal of this service is to allow composable methods for creating
-/// `AsyncRead + AsyncWrite` transports. This could mean creating a TLS
-/// based connection or using some other method to authenticate the connection.
-pub trait MakeConnection<Target>: Sealed<(Target,)> {
- /// The transport provided by this service
- type Connection: AsyncRead + AsyncWrite;
-
- /// Errors produced by the connecting service
- type Error;
-
- /// The future that eventually produces the transport
- type Future: Future<Output = Result<Self::Connection, Self::Error>>;
-
- /// Returns `Poll::Ready(Ok(()))` when it is able to make more connections.
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>;
-
- /// Connect and return a transport asynchronously
- fn make_connection(&mut self, target: Target) -> Self::Future;
-}
-
-impl<S, Target> Sealed<(Target,)> for S where S: Service<Target> {}
-
-impl<C, Target> MakeConnection<Target> for C
-where
- C: Service<Target>,
- C::Response: AsyncRead + AsyncWrite,
-{
- type Connection = C::Response;
- type Error = C::Error;
- type Future = C::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- Service::poll_ready(self, cx)
- }
-
- fn make_connection(&mut self, target: Target) -> Self::Future {
- Service::call(self, target)
- }
-}
diff --git a/vendor/tower/src/make/make_service.rs b/vendor/tower/src/make/make_service.rs
deleted file mode 100644
index aa519d68..00000000
--- a/vendor/tower/src/make/make_service.rs
+++ /dev/null
@@ -1,251 +0,0 @@
-//! Contains [`MakeService`] which is a trait alias for a [`Service`] of [`Service`]s.
-
-use crate::sealed::Sealed;
-use std::fmt;
-use std::future::Future;
-use std::marker::PhantomData;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-pub(crate) mod shared;
-
-/// Creates new [`Service`] values.
-///
-/// Acts as a service factory. This is useful for cases where new [`Service`]
-/// values must be produced. One case is a TCP server listener. The listener
-/// accepts new TCP streams, obtains a new [`Service`] value using the
-/// [`MakeService`] trait, and uses that new [`Service`] value to process inbound
-/// requests on that new TCP stream.
-///
-/// This is essentially a trait alias for a [`Service`] of [`Service`]s.
-pub trait MakeService<Target, Request>: Sealed<(Target, Request)> {
- /// Responses given by the service
- type Response;
-
- /// Errors produced by the service
- type Error;
-
- /// The [`Service`] value created by this factory
- type Service: Service<Request, Response = Self::Response, Error = Self::Error>;
-
- /// Errors produced while building a service.
- type MakeError;
-
- /// The future of the [`Service`] instance.
- type Future: Future<Output = Result<Self::Service, Self::MakeError>>;
-
- /// Returns [`Poll::Ready`] when the factory is able to create more services.
- ///
- /// If the service is at capacity, then [`Poll::Pending`] is returned and the task
- /// is notified when the service becomes ready again. This function is
- /// expected to be called while on a task.
- ///
- /// [`Poll::Ready`]: std::task::Poll::Ready
- /// [`Poll::Pending`]: std::task::Poll::Pending
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::MakeError>>;
-
- /// Create and return a new service value asynchronously.
- fn make_service(&mut self, target: Target) -> Self::Future;
-
- /// Consume this [`MakeService`] and convert it into a [`Service`].
- ///
- /// # Example
- /// ```
- /// use std::convert::Infallible;
- /// use tower::Service;
- /// use tower::make::MakeService;
- /// use tower::service_fn;
- ///
- /// # fn main() {
- /// # async {
- /// // A `MakeService`
- /// let make_service = service_fn(|make_req: ()| async {
- /// Ok::<_, Infallible>(service_fn(|req: String| async {
- /// Ok::<_, Infallible>(req)
- /// }))
- /// });
- ///
- /// // Convert the `MakeService` into a `Service`
- /// let mut svc = make_service.into_service();
- ///
- /// // Make a new service
- /// let mut new_svc = svc.call(()).await.unwrap();
- ///
- /// // Call the service
- /// let res = new_svc.call("foo".to_string()).await.unwrap();
- /// # };
- /// # }
- /// ```
- fn into_service(self) -> IntoService<Self, Request>
- where
- Self: Sized,
- {
- IntoService {
- make: self,
- _marker: PhantomData,
- }
- }
-
- /// Convert this [`MakeService`] into a [`Service`] without consuming the original [`MakeService`].
- ///
- /// # Example
- /// ```
- /// use std::convert::Infallible;
- /// use tower::Service;
- /// use tower::make::MakeService;
- /// use tower::service_fn;
- ///
- /// # fn main() {
- /// # async {
- /// // A `MakeService`
- /// let mut make_service = service_fn(|make_req: ()| async {
- /// Ok::<_, Infallible>(service_fn(|req: String| async {
- /// Ok::<_, Infallible>(req)
- /// }))
- /// });
- ///
- /// // Convert the `MakeService` into a `Service`
- /// let mut svc = make_service.as_service();
- ///
- /// // Make a new service
- /// let mut new_svc = svc.call(()).await.unwrap();
- ///
- /// // Call the service
- /// let res = new_svc.call("foo".to_string()).await.unwrap();
- ///
- /// // The original `MakeService` is still accessible
- /// let new_svc = make_service.make_service(()).await.unwrap();
- /// # };
- /// # }
- /// ```
- fn as_service(&mut self) -> AsService<Self, Request>
- where
- Self: Sized,
- {
- AsService {
- make: self,
- _marker: PhantomData,
- }
- }
-}
-
-impl<M, S, Target, Request> Sealed<(Target, Request)> for M
-where
- M: Service<Target, Response = S>,
- S: Service<Request>,
-{
-}
-
-impl<M, S, Target, Request> MakeService<Target, Request> for M
-where
- M: Service<Target, Response = S>,
- S: Service<Request>,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Service = S;
- type MakeError = M::Error;
- type Future = M::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::MakeError>> {
- Service::poll_ready(self, cx)
- }
-
- fn make_service(&mut self, target: Target) -> Self::Future {
- Service::call(self, target)
- }
-}
-
-/// Service returned by [`MakeService::into_service`][into].
-///
-/// See the documentation on [`into_service`][into] for details.
-///
-/// [into]: MakeService::into_service
-pub struct IntoService<M, Request> {
- make: M,
- _marker: PhantomData<Request>,
-}
-
-impl<M, Request> Clone for IntoService<M, Request>
-where
- M: Clone,
-{
- fn clone(&self) -> Self {
- Self {
- make: self.make.clone(),
- _marker: PhantomData,
- }
- }
-}
-
-impl<M, Request> fmt::Debug for IntoService<M, Request>
-where
- M: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("IntoService")
- .field("make", &self.make)
- .finish()
- }
-}
-
-impl<M, S, Target, Request> Service<Target> for IntoService<M, Request>
-where
- M: Service<Target, Response = S>,
- S: Service<Request>,
-{
- type Response = M::Response;
- type Error = M::Error;
- type Future = M::Future;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.make.poll_ready(cx)
- }
-
- #[inline]
- fn call(&mut self, target: Target) -> Self::Future {
- self.make.make_service(target)
- }
-}
-
-/// Service returned by [`MakeService::as_service`][as].
-///
-/// See the documentation on [`as_service`][as] for details.
-///
-/// [as]: MakeService::as_service
-pub struct AsService<'a, M, Request> {
- make: &'a mut M,
- _marker: PhantomData<Request>,
-}
-
-impl<M, Request> fmt::Debug for AsService<'_, M, Request>
-where
- M: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("AsService")
- .field("make", &self.make)
- .finish()
- }
-}
-
-impl<M, S, Target, Request> Service<Target> for AsService<'_, M, Request>
-where
- M: Service<Target, Response = S>,
- S: Service<Request>,
-{
- type Response = M::Response;
- type Error = M::Error;
- type Future = M::Future;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.make.poll_ready(cx)
- }
-
- #[inline]
- fn call(&mut self, target: Target) -> Self::Future {
- self.make.make_service(target)
- }
-}
diff --git a/vendor/tower/src/make/make_service/shared.rs b/vendor/tower/src/make/make_service/shared.rs
deleted file mode 100644
index 2b2bc026..00000000
--- a/vendor/tower/src/make/make_service/shared.rs
+++ /dev/null
@@ -1,146 +0,0 @@
-use std::convert::Infallible;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-/// A [`MakeService`] that produces services by cloning an inner service.
-///
-/// [`MakeService`]: super::MakeService
-///
-/// # Example
-///
-/// ```
-/// # use std::task::{Context, Poll};
-/// # use std::pin::Pin;
-/// # use std::convert::Infallible;
-/// use tower::make::{MakeService, Shared};
-/// use tower::buffer::Buffer;
-/// use tower::Service;
-/// use futures::future::{Ready, ready};
-///
-/// // An example connection type
-/// struct Connection {}
-///
-/// // An example request type
-/// struct Request {}
-///
-/// // An example response type
-/// struct Response {}
-///
-/// // Some service that doesn't implement `Clone`
-/// struct MyService;
-///
-/// impl Service<Request> for MyService {
-/// type Response = Response;
-/// type Error = Infallible;
-/// type Future = Ready<Result<Response, Infallible>>;
-///
-/// fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
-/// Poll::Ready(Ok(()))
-/// }
-///
-/// fn call(&mut self, req: Request) -> Self::Future {
-/// ready(Ok(Response {}))
-/// }
-/// }
-///
-/// // Example function that runs a service by accepting new connections and using
-/// // `Make` to create new services that might be bound to the connection.
-/// //
-/// // This is similar to what you might find in hyper.
-/// async fn serve_make_service<Make>(make: Make)
-/// where
-/// Make: MakeService<Connection, Request>
-/// {
-/// // ...
-/// }
-///
-/// # async {
-/// // Our service
-/// let svc = MyService;
-///
-/// // Make it `Clone` by putting a channel in front
-/// let buffered = Buffer::new(svc, 1024);
-///
-/// // Convert it into a `MakeService`
-/// let make = Shared::new(buffered);
-///
-/// // Run the service and just ignore the `Connection`s as `MyService` doesn't need them
-/// serve_make_service(make).await;
-/// # };
-/// ```
-#[derive(Debug, Clone, Copy)]
-pub struct Shared<S> {
- service: S,
-}
-
-impl<S> Shared<S> {
- /// Create a new [`Shared`] from a service.
- pub const fn new(service: S) -> Self {
- Self { service }
- }
-}
-
-impl<S, T> Service<T> for Shared<S>
-where
- S: Clone,
-{
- type Response = S;
- type Error = Infallible;
- type Future = SharedFuture<S>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, _target: T) -> Self::Future {
- SharedFuture::new(futures_util::future::ready(Ok(self.service.clone())))
- }
-}
-
-opaque_future! {
- /// Response future from [`Shared`] services.
- pub type SharedFuture<S> = futures_util::future::Ready<Result<S, Infallible>>;
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::make::MakeService;
- use crate::service_fn;
- use futures::future::poll_fn;
-
- async fn echo<R>(req: R) -> Result<R, Infallible> {
- Ok(req)
- }
-
- #[tokio::test]
- async fn as_make_service() {
- let mut shared = Shared::new(service_fn(echo::<&'static str>));
-
- poll_fn(|cx| MakeService::<(), _>::poll_ready(&mut shared, cx))
- .await
- .unwrap();
- let mut svc = shared.make_service(()).await.unwrap();
-
- poll_fn(|cx| svc.poll_ready(cx)).await.unwrap();
- let res = svc.call("foo").await.unwrap();
-
- assert_eq!(res, "foo");
- }
-
- #[tokio::test]
- async fn as_make_service_into_service() {
- let shared = Shared::new(service_fn(echo::<&'static str>));
- let mut shared = MakeService::<(), _>::into_service(shared);
-
- poll_fn(|cx| Service::<()>::poll_ready(&mut shared, cx))
- .await
- .unwrap();
- let mut svc = shared.call(()).await.unwrap();
-
- poll_fn(|cx| svc.poll_ready(cx)).await.unwrap();
- let res = svc.call("foo").await.unwrap();
-
- assert_eq!(res, "foo");
- }
-}
diff --git a/vendor/tower/src/make/mod.rs b/vendor/tower/src/make/mod.rs
deleted file mode 100644
index a377f2a2..00000000
--- a/vendor/tower/src/make/mod.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-//! Trait aliases for Services that produce specific types of Responses.
-
-mod make_connection;
-mod make_service;
-
-pub use self::make_connection::MakeConnection;
-pub use self::make_service::shared::Shared;
-pub use self::make_service::{AsService, IntoService, MakeService};
-
-pub mod future {
- //! Future types
-
- pub use super::make_service::shared::SharedFuture;
-}
diff --git a/vendor/tower/src/ready_cache/cache.rs b/vendor/tower/src/ready_cache/cache.rs
deleted file mode 100644
index a6299033..00000000
--- a/vendor/tower/src/ready_cache/cache.rs
+++ /dev/null
@@ -1,503 +0,0 @@
-//! A cache of services.
-
-use super::error;
-use futures_core::Stream;
-use futures_util::{stream::FuturesUnordered, task::AtomicWaker};
-pub use indexmap::Equivalent;
-use indexmap::IndexMap;
-use std::fmt;
-use std::future::Future;
-use std::hash::Hash;
-use std::pin::Pin;
-use std::sync::atomic::{AtomicBool, Ordering};
-use std::sync::Arc;
-use std::task::{Context, Poll};
-use tower_service::Service;
-use tracing::{debug, trace};
-
-/// Drives readiness over a set of services.
-///
-/// The cache maintains two internal data structures:
-///
-/// * a set of _pending_ services that have not yet become ready; and
-/// * a set of _ready_ services that have previously polled ready.
-///
-/// As each `S` typed [`Service`] is added to the cache via [`ReadyCache::push`], it
-/// is added to the _pending set_. As [`ReadyCache::poll_pending`] is invoked,
-/// pending services are polled and added to the _ready set_.
-///
-/// [`ReadyCache::call_ready`] (or [`ReadyCache::call_ready_index`]) dispatches a
-/// request to the specified service, but panics if the specified service is not
-/// in the ready set. The `ReadyCache::check_*` functions can be used to ensure
-/// that a service is ready before dispatching a request.
-///
-/// The ready set can hold services for an arbitrarily long time. During this
-/// time, the runtime may process events that invalidate that ready state (for
-/// instance, if a keepalive detects a lost connection). In such cases, callers
-/// should use [`ReadyCache::check_ready`] (or [`ReadyCache::check_ready_index`])
-/// immediately before dispatching a request to ensure that the service has not
-/// become unavailable.
-///
-/// Once `ReadyCache::call_ready*` is invoked, the service is placed back into
-/// the _pending_ set to be driven to readiness again.
-///
-/// When `ReadyCache::check_ready*` returns `false`, it indicates that the
-/// specified service is _not_ ready. If an error is returned, this indicates that
-/// the server failed and has been removed from the cache entirely.
-///
-/// [`ReadyCache::evict`] can be used to remove a service from the cache (by key),
-/// though the service may not be dropped (if it is currently pending) until
-/// [`ReadyCache::poll_pending`] is invoked.
-///
-/// Note that the by-index accessors are provided to support use cases (like
-/// power-of-two-choices load balancing) where the caller does not care to keep
-/// track of each service's key. Instead, it needs only to access _some_ ready
-/// service. In such a case, it should be noted that calls to
-/// [`ReadyCache::poll_pending`] and [`ReadyCache::evict`] may perturb the order of
-/// the ready set, so any cached indexes should be discarded after such a call.
-pub struct ReadyCache<K, S, Req>
-where
- K: Eq + Hash,
-{
- /// A stream of services that are not yet ready.
- pending: FuturesUnordered<Pending<K, S, Req>>,
- /// An index of cancelation handles for pending streams.
- pending_cancel_txs: IndexMap<K, CancelTx>,
-
- /// Services that have previously become ready. Readiness can become stale,
- /// so a given service should be polled immediately before use.
- ///
- /// The cancelation oneshot is preserved (though unused) while the service is
- /// ready so that it need not be reallocated each time a request is
- /// dispatched.
- ready: IndexMap<K, (S, CancelPair)>,
-}
-
-// Safety: This is safe because we do not use `Pin::new_unchecked`.
-impl<S, K: Eq + Hash, Req> Unpin for ReadyCache<K, S, Req> {}
-
-#[derive(Debug)]
-struct Cancel {
- waker: AtomicWaker,
- canceled: AtomicBool,
-}
-
-#[derive(Debug)]
-struct CancelRx(Arc<Cancel>);
-
-#[derive(Debug)]
-struct CancelTx(Arc<Cancel>);
-
-type CancelPair = (CancelTx, CancelRx);
-
-#[derive(Debug)]
-enum PendingError<K, E> {
- Canceled(K),
- Inner(K, E),
-}
-
-pin_project_lite::pin_project! {
- /// A [`Future`] that becomes satisfied when an `S`-typed service is ready.
- ///
- /// May fail due to cancelation, i.e. if the service is evicted from the balancer.
- struct Pending<K, S, Req> {
- key: Option<K>,
- cancel: Option<CancelRx>,
- ready: Option<S>,
- _pd: std::marker::PhantomData<Req>,
- }
-}
-
-// === ReadyCache ===
-
-impl<K, S, Req> Default for ReadyCache<K, S, Req>
-where
- K: Eq + Hash,
- S: Service<Req>,
-{
- fn default() -> Self {
- Self {
- ready: IndexMap::default(),
- pending: FuturesUnordered::new(),
- pending_cancel_txs: IndexMap::default(),
- }
- }
-}
-
-impl<K, S, Req> fmt::Debug for ReadyCache<K, S, Req>
-where
- K: fmt::Debug + Eq + Hash,
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let Self {
- pending,
- pending_cancel_txs,
- ready,
- } = self;
- f.debug_struct("ReadyCache")
- .field("pending", pending)
- .field("pending_cancel_txs", pending_cancel_txs)
- .field("ready", ready)
- .finish()
- }
-}
-
-impl<K, S, Req> ReadyCache<K, S, Req>
-where
- K: Eq + Hash,
-{
- /// Returns the total number of services in the cache.
- pub fn len(&self) -> usize {
- self.ready_len() + self.pending_len()
- }
-
- /// Returns whether or not there are any services in the cache.
- pub fn is_empty(&self) -> bool {
- self.ready.is_empty() && self.pending.is_empty()
- }
-
- /// Returns the number of services in the ready set.
- pub fn ready_len(&self) -> usize {
- self.ready.len()
- }
-
- /// Returns the number of services in the unready set.
- pub fn pending_len(&self) -> usize {
- self.pending.len()
- }
-
- /// Returns true iff the given key is in the unready set.
- pub fn pending_contains<Q: Hash + Equivalent<K>>(&self, key: &Q) -> bool {
- self.pending_cancel_txs.contains_key(key)
- }
-
- /// Obtains a reference to a service in the ready set by key.
- pub fn get_ready<Q: Hash + Equivalent<K>>(&self, key: &Q) -> Option<(usize, &K, &S)> {
- self.ready.get_full(key).map(|(i, k, v)| (i, k, &v.0))
- }
-
- /// Obtains a mutable reference to a service in the ready set by key.
- pub fn get_ready_mut<Q: Hash + Equivalent<K>>(
- &mut self,
- key: &Q,
- ) -> Option<(usize, &K, &mut S)> {
- self.ready
- .get_full_mut(key)
- .map(|(i, k, v)| (i, k, &mut v.0))
- }
-
- /// Obtains a reference to a service in the ready set by index.
- pub fn get_ready_index(&self, idx: usize) -> Option<(&K, &S)> {
- self.ready.get_index(idx).map(|(k, v)| (k, &v.0))
- }
-
- /// Obtains a mutable reference to a service in the ready set by index.
- pub fn get_ready_index_mut(&mut self, idx: usize) -> Option<(&K, &mut S)> {
- self.ready.get_index_mut(idx).map(|(k, v)| (k, &mut v.0))
- }
-
- /// Returns an iterator over the ready keys and services.
- pub fn iter_ready(&self) -> impl Iterator<Item = (&K, &S)> {
- self.ready.iter().map(|(k, s)| (k, &s.0))
- }
-
- /// Returns a mutable iterator over the ready keys and services.
- pub fn iter_ready_mut(&mut self) -> impl Iterator<Item = (&K, &mut S)> {
- self.ready.iter_mut().map(|(k, s)| (k, &mut s.0))
- }
-
- /// Evicts an item from the cache.
- ///
- /// Returns true if a service was marked for eviction.
- ///
- /// Services are dropped from the ready set immediately. Services in the
- /// pending set are marked for cancellation, but [`ReadyCache::poll_pending`]
- /// must be called to cause the service to be dropped.
- pub fn evict<Q: Hash + Equivalent<K>>(&mut self, key: &Q) -> bool {
- let canceled = if let Some(c) = self.pending_cancel_txs.swap_remove(key) {
- c.cancel();
- true
- } else {
- false
- };
-
- self.ready
- .swap_remove_full(key)
- .map(|_| true)
- .unwrap_or(canceled)
- }
-}
-
-impl<K, S, Req> ReadyCache<K, S, Req>
-where
- K: Clone + Eq + Hash,
- S: Service<Req>,
- <S as Service<Req>>::Error: Into<crate::BoxError>,
- S::Error: Into<crate::BoxError>,
-{
- /// Pushes a new service onto the pending set.
- ///
- /// The service will be promoted to the ready set as [`poll_pending`] is invoked.
- ///
- /// Note that this does **not** remove services from the ready set. Once the
- /// old service is used, it will be dropped instead of being added back to
- /// the pending set; OR, when the new service becomes ready, it will replace
- /// the prior service in the ready set.
- ///
- /// [`poll_pending`]: crate::ready_cache::cache::ReadyCache::poll_pending
- pub fn push(&mut self, key: K, svc: S) {
- let cancel = cancelable();
- self.push_pending(key, svc, cancel);
- }
-
- fn push_pending(&mut self, key: K, svc: S, (cancel_tx, cancel_rx): CancelPair) {
- if let Some(c) = self.pending_cancel_txs.insert(key.clone(), cancel_tx) {
- // If there is already a service for this key, cancel it.
- c.cancel();
- }
- self.pending.push(Pending {
- key: Some(key),
- cancel: Some(cancel_rx),
- ready: Some(svc),
- _pd: std::marker::PhantomData,
- });
- }
-
- /// Polls services pending readiness, adding ready services to the ready set.
- ///
- /// Returns [`Poll::Ready`] when there are no remaining unready services.
- /// [`poll_pending`] should be called again after [`push`] or
- /// [`call_ready_index`] are invoked.
- ///
- /// Failures indicate that an individual pending service failed to become
- /// ready (and has been removed from the cache). In such a case,
- /// [`poll_pending`] should typically be called again to continue driving
- /// pending services to readiness.
- ///
- /// [`poll_pending`]: crate::ready_cache::cache::ReadyCache::poll_pending
- /// [`push`]: crate::ready_cache::cache::ReadyCache::push
- /// [`call_ready_index`]: crate::ready_cache::cache::ReadyCache::call_ready_index
- pub fn poll_pending(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), error::Failed<K>>> {
- loop {
- match Pin::new(&mut self.pending).poll_next(cx) {
- Poll::Pending => return Poll::Pending,
- Poll::Ready(None) => return Poll::Ready(Ok(())),
- Poll::Ready(Some(Ok((key, svc, cancel_rx)))) => {
- trace!("endpoint ready");
- let cancel_tx = self.pending_cancel_txs.swap_remove(&key);
- if let Some(cancel_tx) = cancel_tx {
- // Keep track of the cancelation so that it need not be
- // recreated after the service is used.
- self.ready.insert(key, (svc, (cancel_tx, cancel_rx)));
- } else {
- assert!(
- cancel_tx.is_some(),
- "services that become ready must have a pending cancelation"
- );
- }
- }
- Poll::Ready(Some(Err(PendingError::Canceled(_)))) => {
- debug!("endpoint canceled");
- // The cancellation for this service was removed in order to
- // cause this cancellation.
- }
- Poll::Ready(Some(Err(PendingError::Inner(key, e)))) => {
- let cancel_tx = self.pending_cancel_txs.swap_remove(&key);
- assert!(
- cancel_tx.is_some(),
- "services that return an error must have a pending cancelation"
- );
- return Err(error::Failed(key, e.into())).into();
- }
- }
- }
- }
-
- /// Checks whether the referenced endpoint is ready.
- ///
- /// Returns true if the endpoint is ready and false if it is not. An error is
- /// returned if the endpoint fails.
- pub fn check_ready<Q: Hash + Equivalent<K>>(
- &mut self,
- cx: &mut Context<'_>,
- key: &Q,
- ) -> Result<bool, error::Failed<K>> {
- match self.ready.get_full_mut(key) {
- Some((index, _, _)) => self.check_ready_index(cx, index),
- None => Ok(false),
- }
- }
-
- /// Checks whether the referenced endpoint is ready.
- ///
- /// If the service is no longer ready, it is moved back into the pending set
- /// and `false` is returned.
- ///
- /// If the service errors, it is removed and dropped and the error is returned.
- pub fn check_ready_index(
- &mut self,
- cx: &mut Context<'_>,
- index: usize,
- ) -> Result<bool, error::Failed<K>> {
- let svc = match self.ready.get_index_mut(index) {
- None => return Ok(false),
- Some((_, (svc, _))) => svc,
- };
- match svc.poll_ready(cx) {
- Poll::Ready(Ok(())) => Ok(true),
- Poll::Pending => {
- // became unready; so move it back there.
- let (key, (svc, cancel)) = self
- .ready
- .swap_remove_index(index)
- .expect("invalid ready index");
-
- // If a new version of this service has been added to the
- // unready set, don't overwrite it.
- if !self.pending_contains(&key) {
- self.push_pending(key, svc, cancel);
- }
-
- Ok(false)
- }
- Poll::Ready(Err(e)) => {
- // failed, so drop it.
- let (key, _) = self
- .ready
- .swap_remove_index(index)
- .expect("invalid ready index");
- Err(error::Failed(key, e.into()))
- }
- }
- }
-
- /// Calls a ready service by key.
- ///
- /// # Panics
- ///
- /// If the specified key does not exist in the ready
- pub fn call_ready<Q: Hash + Equivalent<K>>(&mut self, key: &Q, req: Req) -> S::Future {
- let (index, _, _) = self
- .ready
- .get_full_mut(key)
- .expect("check_ready was not called");
- self.call_ready_index(index, req)
- }
-
- /// Calls a ready service by index.
- ///
- /// # Panics
- ///
- /// If the specified index is out of range.
- pub fn call_ready_index(&mut self, index: usize, req: Req) -> S::Future {
- let (key, (mut svc, cancel)) = self
- .ready
- .swap_remove_index(index)
- .expect("check_ready_index was not called");
-
- let fut = svc.call(req);
-
- // If a new version of this service has been added to the
- // unready set, don't overwrite it.
- if !self.pending_contains(&key) {
- self.push_pending(key, svc, cancel);
- }
-
- fut
- }
-}
-
-// === impl Cancel ===
-
-/// Creates a cancelation sender and receiver.
-///
-/// A `tokio::sync::oneshot` is NOT used, as a `Receiver` is not guaranteed to
-/// observe results as soon as a `Sender` fires. Using an `AtomicBool` allows
-/// the state to be observed as soon as the cancelation is triggered.
-fn cancelable() -> CancelPair {
- let cx = Arc::new(Cancel {
- waker: AtomicWaker::new(),
- canceled: AtomicBool::new(false),
- });
- (CancelTx(cx.clone()), CancelRx(cx))
-}
-
-impl CancelTx {
- fn cancel(self) {
- self.0.canceled.store(true, Ordering::SeqCst);
- self.0.waker.wake();
- }
-}
-
-// === Pending ===
-
-impl<K, S, Req> Future for Pending<K, S, Req>
-where
- S: Service<Req>,
-{
- type Output = Result<(K, S, CancelRx), PendingError<K, S::Error>>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
- // Before checking whether the service is ready, check to see whether
- // readiness has been canceled.
- let CancelRx(cancel) = this.cancel.as_mut().expect("polled after complete");
- if cancel.canceled.load(Ordering::SeqCst) {
- let key = this.key.take().expect("polled after complete");
- return Err(PendingError::Canceled(key)).into();
- }
-
- match this
- .ready
- .as_mut()
- .expect("polled after ready")
- .poll_ready(cx)
- {
- Poll::Pending => {
- // Before returning Pending, register interest in cancelation so
- // that this future is polled again if the state changes.
- let CancelRx(cancel) = this.cancel.as_mut().expect("polled after complete");
- cancel.waker.register(cx.waker());
- // Because both the cancel receiver and cancel sender are held
- // by the `ReadyCache` (i.e., on a single task), then it must
- // not be possible for the cancelation state to change while
- // polling a `Pending` service.
- assert!(
- !cancel.canceled.load(Ordering::SeqCst),
- "cancelation cannot be notified while polling a pending service"
- );
- Poll::Pending
- }
- Poll::Ready(Ok(())) => {
- let key = this.key.take().expect("polled after complete");
- let cancel = this.cancel.take().expect("polled after complete");
- Ok((key, this.ready.take().expect("polled after ready"), cancel)).into()
- }
- Poll::Ready(Err(e)) => {
- let key = this.key.take().expect("polled after compete");
- Err(PendingError::Inner(key, e)).into()
- }
- }
- }
-}
-
-impl<K, S, Req> fmt::Debug for Pending<K, S, Req>
-where
- K: fmt::Debug,
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let Self {
- key,
- cancel,
- ready,
- _pd,
- } = self;
- f.debug_struct("Pending")
- .field("key", key)
- .field("cancel", cancel)
- .field("ready", ready)
- .finish()
- }
-}
diff --git a/vendor/tower/src/ready_cache/error.rs b/vendor/tower/src/ready_cache/error.rs
deleted file mode 100644
index 08115f49..00000000
--- a/vendor/tower/src/ready_cache/error.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-//! Errors
-
-/// An error indicating that the service with a `K`-typed key failed with an
-/// error.
-pub struct Failed<K>(pub K, pub crate::BoxError);
-
-// === Failed ===
-
-impl<K: std::fmt::Debug> std::fmt::Debug for Failed<K> {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- f.debug_tuple("Failed")
- .field(&self.0)
- .field(&self.1)
- .finish()
- }
-}
-
-impl<K> std::fmt::Display for Failed<K> {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- self.1.fmt(f)
- }
-}
-
-impl<K: std::fmt::Debug> std::error::Error for Failed<K> {
- fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
- Some(&*self.1)
- }
-}
diff --git a/vendor/tower/src/ready_cache/mod.rs b/vendor/tower/src/ready_cache/mod.rs
deleted file mode 100644
index ebf85ab4..00000000
--- a/vendor/tower/src/ready_cache/mod.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-//! A cache of services
-
-pub mod cache;
-pub mod error;
-
-pub use self::cache::ReadyCache;
diff --git a/vendor/tower/src/reconnect/future.rs b/vendor/tower/src/reconnect/future.rs
deleted file mode 100644
index 3c295b96..00000000
--- a/vendor/tower/src/reconnect/future.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-pin_project! {
- /// Future that resolves to the response or failure to connect.
- #[derive(Debug)]
- pub struct ResponseFuture<F, E> {
- #[pin]
- inner: Inner<F, E>,
- }
-}
-
-pin_project! {
- #[project = InnerProj]
- #[derive(Debug)]
- enum Inner<F, E> {
- Future {
- #[pin]
- fut: F,
- },
- Error {
- error: Option<E>,
- },
- }
-}
-
-impl<F, E> Inner<F, E> {
- fn future(fut: F) -> Self {
- Self::Future { fut }
- }
-
- fn error(error: Option<E>) -> Self {
- Self::Error { error }
- }
-}
-
-impl<F, E> ResponseFuture<F, E> {
- pub(crate) fn new(inner: F) -> Self {
- ResponseFuture {
- inner: Inner::future(inner),
- }
- }
-
- pub(crate) fn error(error: E) -> Self {
- ResponseFuture {
- inner: Inner::error(Some(error)),
- }
- }
-}
-
-impl<F, T, E, ME> Future for ResponseFuture<F, ME>
-where
- F: Future<Output = Result<T, E>>,
- E: Into<crate::BoxError>,
- ME: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let me = self.project();
- match me.inner.project() {
- InnerProj::Future { fut } => fut.poll(cx).map_err(Into::into),
- InnerProj::Error { error } => {
- let e = error.take().expect("Polled after ready.").into();
- Poll::Ready(Err(e))
- }
- }
- }
-}
diff --git a/vendor/tower/src/reconnect/mod.rs b/vendor/tower/src/reconnect/mod.rs
deleted file mode 100644
index 0062fe39..00000000
--- a/vendor/tower/src/reconnect/mod.rs
+++ /dev/null
@@ -1,171 +0,0 @@
-//! Reconnect services when they fail.
-//!
-//! Reconnect takes some [`MakeService`] and transforms it into a
-//! [`Service`]. It then attempts to lazily connect and
-//! reconnect on failure. The `Reconnect` service becomes unavailable
-//! when the inner `MakeService::poll_ready` returns an error. When the
-//! connection future returned from `MakeService::call` fails this will be
-//! returned in the next call to `Reconnect::call`. This allows the user to
-//! call the service again even if the inner `MakeService` was unable to
-//! connect on the last call.
-//!
-//! [`MakeService`]: crate::make::MakeService
-//! [`Service`]: crate::Service
-
-mod future;
-
-pub use future::ResponseFuture;
-
-use crate::make::MakeService;
-use std::fmt;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-use tracing::trace;
-
-/// Reconnect to failed services.
-pub struct Reconnect<M, Target>
-where
- M: Service<Target>,
-{
- mk_service: M,
- state: State<M::Future, M::Response>,
- target: Target,
- error: Option<M::Error>,
-}
-
-#[derive(Debug)]
-enum State<F, S> {
- Idle,
- Connecting(F),
- Connected(S),
-}
-
-impl<M, Target> Reconnect<M, Target>
-where
- M: Service<Target>,
-{
- /// Lazily connect and reconnect to a [`Service`].
- pub const fn new(mk_service: M, target: Target) -> Self {
- Reconnect {
- mk_service,
- state: State::Idle,
- target,
- error: None,
- }
- }
-
- /// Reconnect to a already connected [`Service`].
- pub const fn with_connection(init_conn: M::Response, mk_service: M, target: Target) -> Self {
- Reconnect {
- mk_service,
- state: State::Connected(init_conn),
- target,
- error: None,
- }
- }
-}
-
-impl<M, Target, S, Request> Service<Request> for Reconnect<M, Target>
-where
- M: Service<Target, Response = S>,
- S: Service<Request>,
- M::Future: Unpin,
- crate::BoxError: From<M::Error> + From<S::Error>,
- Target: Clone,
-{
- type Response = S::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<S::Future, M::Error>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- loop {
- match &mut self.state {
- State::Idle => {
- trace!("poll_ready; idle");
- match self.mk_service.poll_ready(cx) {
- Poll::Ready(r) => r?,
- Poll::Pending => {
- trace!("poll_ready; MakeService not ready");
- return Poll::Pending;
- }
- }
-
- let fut = self.mk_service.make_service(self.target.clone());
- self.state = State::Connecting(fut);
- continue;
- }
- State::Connecting(ref mut f) => {
- trace!("poll_ready; connecting");
- match Pin::new(f).poll(cx) {
- Poll::Ready(Ok(service)) => {
- self.state = State::Connected(service);
- }
- Poll::Pending => {
- trace!("poll_ready; not ready");
- return Poll::Pending;
- }
- Poll::Ready(Err(e)) => {
- trace!("poll_ready; error");
- self.state = State::Idle;
- self.error = Some(e);
- break;
- }
- }
- }
- State::Connected(ref mut inner) => {
- trace!("poll_ready; connected");
- match inner.poll_ready(cx) {
- Poll::Ready(Ok(())) => {
- trace!("poll_ready; ready");
- return Poll::Ready(Ok(()));
- }
- Poll::Pending => {
- trace!("poll_ready; not ready");
- return Poll::Pending;
- }
- Poll::Ready(Err(_)) => {
- trace!("poll_ready; error");
- self.state = State::Idle;
- }
- }
- }
- }
- }
-
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- if let Some(error) = self.error.take() {
- return ResponseFuture::error(error);
- }
-
- let service = match self.state {
- State::Connected(ref mut service) => service,
- _ => panic!("service not ready; poll_ready must be called first"),
- };
-
- let fut = service.call(request);
- ResponseFuture::new(fut)
- }
-}
-
-impl<M, Target> fmt::Debug for Reconnect<M, Target>
-where
- M: Service<Target> + fmt::Debug,
- M::Future: fmt::Debug,
- M::Response: fmt::Debug,
- Target: fmt::Debug,
-{
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("Reconnect")
- .field("mk_service", &self.mk_service)
- .field("state", &self.state)
- .field("target", &self.target)
- .finish()
- }
-}
diff --git a/vendor/tower/src/retry/backoff.rs b/vendor/tower/src/retry/backoff.rs
deleted file mode 100644
index 685063ec..00000000
--- a/vendor/tower/src/retry/backoff.rs
+++ /dev/null
@@ -1,263 +0,0 @@
-//! This module contains generic [backoff] utilities to be used with the retry
-//! layer.
-//!
-//! The [`Backoff`] trait is a generic way to represent backoffs that can use
-//! any timer type.
-//!
-//! [`ExponentialBackoffMaker`] implements the maker type for
-//! [`ExponentialBackoff`] which implements the [`Backoff`] trait and provides
-//! a batteries included exponential backoff and jitter strategy.
-//!
-//! [backoff]: https://en.wikipedia.org/wiki/Exponential_backoff
-
-use std::fmt::Display;
-use std::future::Future;
-use std::time::Duration;
-use tokio::time;
-
-use crate::util::rng::{HasherRng, Rng};
-
-/// Trait used to construct [`Backoff`] trait implementors.
-pub trait MakeBackoff {
- /// The backoff type produced by this maker.
- type Backoff: Backoff;
-
- /// Constructs a new backoff type.
- fn make_backoff(&mut self) -> Self::Backoff;
-}
-
-/// A backoff trait where a single mutable reference represents a single
-/// backoff session. Implementors must also implement [`Clone`] which will
-/// reset the backoff back to the default state for the next session.
-pub trait Backoff {
- /// The future associated with each backoff. This usually will be some sort
- /// of timer.
- type Future: Future<Output = ()>;
-
- /// Initiate the next backoff in the sequence.
- fn next_backoff(&mut self) -> Self::Future;
-}
-
-/// A maker type for [`ExponentialBackoff`].
-#[derive(Debug, Clone)]
-pub struct ExponentialBackoffMaker<R = HasherRng> {
- /// The minimum amount of time to wait before resuming an operation.
- min: time::Duration,
- /// The maximum amount of time to wait before resuming an operation.
- max: time::Duration,
- /// The ratio of the base timeout that may be randomly added to a backoff.
- ///
- /// Must be greater than or equal to 0.0.
- jitter: f64,
- rng: R,
-}
-
-/// A jittered [exponential backoff] strategy.
-///
-/// The backoff duration will increase exponentially for every subsequent
-/// backoff, up to a maximum duration. A small amount of [random jitter] is
-/// added to each backoff duration, in order to avoid retry spikes.
-///
-/// [exponential backoff]: https://en.wikipedia.org/wiki/Exponential_backoff
-/// [random jitter]: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
-#[derive(Debug, Clone)]
-pub struct ExponentialBackoff<R = HasherRng> {
- min: time::Duration,
- max: time::Duration,
- jitter: f64,
- rng: R,
- iterations: u32,
-}
-
-impl<R> ExponentialBackoffMaker<R>
-where
- R: Rng,
-{
- /// Create a new `ExponentialBackoff`.
- ///
- /// # Error
- ///
- /// Returns a config validation error if:
- /// - `min` > `max`
- /// - `max` > 0
- /// - `jitter` >= `0.0`
- /// - `jitter` < `100.0`
- /// - `jitter` is finite
- pub fn new(
- min: time::Duration,
- max: time::Duration,
- jitter: f64,
- rng: R,
- ) -> Result<Self, InvalidBackoff> {
- if min > max {
- return Err(InvalidBackoff("maximum must not be less than minimum"));
- }
- if max == time::Duration::from_millis(0) {
- return Err(InvalidBackoff("maximum must be non-zero"));
- }
- if jitter < 0.0 {
- return Err(InvalidBackoff("jitter must not be negative"));
- }
- if jitter > 100.0 {
- return Err(InvalidBackoff("jitter must not be greater than 100"));
- }
- if !jitter.is_finite() {
- return Err(InvalidBackoff("jitter must be finite"));
- }
-
- Ok(ExponentialBackoffMaker {
- min,
- max,
- jitter,
- rng,
- })
- }
-}
-
-impl<R> MakeBackoff for ExponentialBackoffMaker<R>
-where
- R: Rng + Clone,
-{
- type Backoff = ExponentialBackoff<R>;
-
- fn make_backoff(&mut self) -> Self::Backoff {
- ExponentialBackoff {
- max: self.max,
- min: self.min,
- jitter: self.jitter,
- rng: self.rng.clone(),
- iterations: 0,
- }
- }
-}
-
-impl<R: Rng> ExponentialBackoff<R> {
- fn base(&self) -> time::Duration {
- debug_assert!(
- self.min <= self.max,
- "maximum backoff must not be less than minimum backoff"
- );
- debug_assert!(
- self.max > time::Duration::from_millis(0),
- "Maximum backoff must be non-zero"
- );
- self.min
- .checked_mul(2_u32.saturating_pow(self.iterations))
- .unwrap_or(self.max)
- .min(self.max)
- }
-
- /// Returns a random, uniform duration on `[0, base*self.jitter]` no greater
- /// than `self.max`.
- fn jitter(&mut self, base: time::Duration) -> time::Duration {
- if self.jitter == 0.0 {
- time::Duration::default()
- } else {
- let jitter_factor = self.rng.next_f64();
- debug_assert!(
- jitter_factor > 0.0,
- "rng returns values between 0.0 and 1.0"
- );
- let rand_jitter = jitter_factor * self.jitter;
- let secs = (base.as_secs() as f64) * rand_jitter;
- let nanos = (base.subsec_nanos() as f64) * rand_jitter;
- let remaining = self.max - base;
- time::Duration::new(secs as u64, nanos as u32).min(remaining)
- }
- }
-}
-
-impl<R> Backoff for ExponentialBackoff<R>
-where
- R: Rng,
-{
- type Future = tokio::time::Sleep;
-
- fn next_backoff(&mut self) -> Self::Future {
- let base = self.base();
- let next = base + self.jitter(base);
-
- self.iterations += 1;
-
- tokio::time::sleep(next)
- }
-}
-
-impl Default for ExponentialBackoffMaker {
- fn default() -> Self {
- ExponentialBackoffMaker::new(
- Duration::from_millis(50),
- Duration::from_millis(u64::MAX),
- 0.99,
- HasherRng::default(),
- )
- .expect("Unable to create ExponentialBackoff")
- }
-}
-
-/// Backoff validation error.
-#[derive(Debug)]
-pub struct InvalidBackoff(&'static str);
-
-impl Display for InvalidBackoff {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "invalid backoff: {}", self.0)
- }
-}
-
-impl std::error::Error for InvalidBackoff {}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use quickcheck::*;
-
- quickcheck! {
- fn backoff_base_first(min_ms: u64, max_ms: u64) -> TestResult {
- let min = time::Duration::from_millis(min_ms);
- let max = time::Duration::from_millis(max_ms);
- let rng = HasherRng::default();
- let mut backoff = match ExponentialBackoffMaker::new(min, max, 0.0, rng) {
- Err(_) => return TestResult::discard(),
- Ok(backoff) => backoff,
- };
- let backoff = backoff.make_backoff();
-
- let delay = backoff.base();
- TestResult::from_bool(min == delay)
- }
-
- fn backoff_base(min_ms: u64, max_ms: u64, iterations: u32) -> TestResult {
- let min = time::Duration::from_millis(min_ms);
- let max = time::Duration::from_millis(max_ms);
- let rng = HasherRng::default();
- let mut backoff = match ExponentialBackoffMaker::new(min, max, 0.0, rng) {
- Err(_) => return TestResult::discard(),
- Ok(backoff) => backoff,
- };
- let mut backoff = backoff.make_backoff();
-
- backoff.iterations = iterations;
- let delay = backoff.base();
- TestResult::from_bool(min <= delay && delay <= max)
- }
-
- fn backoff_jitter(base_ms: u64, max_ms: u64, jitter: f64) -> TestResult {
- let base = time::Duration::from_millis(base_ms);
- let max = time::Duration::from_millis(max_ms);
- let rng = HasherRng::default();
- let mut backoff = match ExponentialBackoffMaker::new(base, max, jitter, rng) {
- Err(_) => return TestResult::discard(),
- Ok(backoff) => backoff,
- };
- let mut backoff = backoff.make_backoff();
-
- let j = backoff.jitter(base);
- if jitter == 0.0 || base_ms == 0 || max_ms == base_ms {
- TestResult::from_bool(j == time::Duration::default())
- } else {
- TestResult::from_bool(j > time::Duration::default())
- }
- }
- }
-}
diff --git a/vendor/tower/src/retry/budget/mod.rs b/vendor/tower/src/retry/budget/mod.rs
deleted file mode 100644
index 3d1d2b87..00000000
--- a/vendor/tower/src/retry/budget/mod.rs
+++ /dev/null
@@ -1,91 +0,0 @@
-//! A retry "budget" for allowing only a certain amount of retries over time.
-//!
-//! # Why budgets and not max retries?
-//!
-//! The most common way of configuring retries is to specify a maximum
-//! number of retry attempts to perform before giving up. This is a familiar idea to anyone
-//! who’s used a web browser: you try to load a webpage, and if it doesn’t load, you try again.
-//! If it still doesn’t load, you try a third time. Finally you give up.
-//!
-//! Unfortunately, there are at least two problems with configuring retries this way:
-//!
-//! **Choosing the maximum number of retry attempts is a guessing game.**
-//! You need to pick a number that’s high enough to make a difference when things are somewhat failing,
-//! but not so high that it generates extra load on the system when it’s really failing. In practice,
-//! you usually pick a maximum retry attempts number out of a hat (e.g. 3) and hope for the best.
-//!
-//! **Systems configured this way are vulnerable to retry storms.**
-//! A retry storm begins when one service starts to experience a larger than normal failure rate.
-//! This causes its clients to retry those failed requests. The extra load from the retries causes the
-//! service to slow down further and fail more requests, triggering more retries. If each client is
-//! configured to retry up to 3 times, this can quadruple the number of requests being sent! To make
-//! matters even worse, if any of the clients’ clients are configured with retries, the number of retries
-//! compounds multiplicatively and can turn a small number of errors into a self-inflicted denial of service attack.
-//!
-//! It's generally dangerous to implement retries without some limiting factor. [`Budget`]s are that limit.
-//!
-//! # Examples
-//!
-//! ```rust
-//! use std::sync::Arc;
-//!
-//! use futures_util::future;
-//! use tower::retry::{budget::{Budget, TpsBudget}, Policy};
-//!
-//! type Req = String;
-//! type Res = String;
-//!
-//! #[derive(Clone, Debug)]
-//! struct RetryPolicy {
-//! budget: Arc<TpsBudget>,
-//! }
-//!
-//! impl<E> Policy<Req, Res, E> for RetryPolicy {
-//! type Future = future::Ready<()>;
-//!
-//! fn retry(&mut self, req: &mut Req, result: &mut Result<Res, E>) -> Option<Self::Future> {
-//! match result {
-//! Ok(_) => {
-//! // Treat all `Response`s as success,
-//! // so deposit budget and don't retry...
-//! self.budget.deposit();
-//! None
-//! }
-//! Err(_) => {
-//! // Treat all errors as failures...
-//! // Withdraw the budget, don't retry if we overdrew.
-//! let withdrew = self.budget.withdraw();
-//! if !withdrew {
-//! return None;
-//! }
-//!
-//! // Try again!
-//! Some(future::ready(()))
-//! }
-//! }
-//! }
-//!
-//! fn clone_request(&mut self, req: &Req) -> Option<Req> {
-//! Some(req.clone())
-//! }
-//! }
-//! ```
-
-pub mod tps_budget;
-
-pub use tps_budget::TpsBudget;
-
-/// For more info about [`Budget`], please see the [module-level documentation].
-///
-/// [module-level documentation]: self
-pub trait Budget {
- /// Store a "deposit" in the budget, which will be used to permit future
- /// withdrawals.
- fn deposit(&self);
-
- /// Check whether there is enough "balance" in the budget to issue a new
- /// retry.
- ///
- /// If there is not enough, false is returned.
- fn withdraw(&self) -> bool;
-}
diff --git a/vendor/tower/src/retry/budget/tps_budget.rs b/vendor/tower/src/retry/budget/tps_budget.rs
deleted file mode 100644
index d6949980..00000000
--- a/vendor/tower/src/retry/budget/tps_budget.rs
+++ /dev/null
@@ -1,260 +0,0 @@
-//! Transactions Per Minute (Tps) Budget implementations
-
-use std::{
- fmt,
- sync::{
- atomic::{AtomicIsize, Ordering},
- Mutex,
- },
- time::Duration,
-};
-use tokio::time::Instant;
-
-use super::Budget;
-
-/// A Transactions Per Minute config for managing retry tokens.
-///
-/// [`TpsBudget`] uses a token bucket to decide if the request should be retried.
-///
-/// [`TpsBudget`] works by checking how much retries have been made in a certain period of time.
-/// Minimum allowed number of retries are effectively reset on an interval. Allowed number of
-/// retries depends on failed request count in recent time frame.
-///
-/// For more info about [`Budget`], please see the [module-level documentation].
-///
-/// [module-level documentation]: super
-pub struct TpsBudget {
- generation: Mutex<Generation>,
- /// Initial budget allowed for every second.
- reserve: isize,
- /// Slots of a the TTL divided evenly.
- slots: Box<[AtomicIsize]>,
- /// The amount of time represented by each slot.
- window: Duration,
- /// The changers for the current slot to be committed
- /// after the slot expires.
- writer: AtomicIsize,
- /// Amount of tokens to deposit for each put().
- deposit_amount: isize,
- /// Amount of tokens to withdraw for each try_get().
- withdraw_amount: isize,
-}
-
-#[derive(Debug)]
-struct Generation {
- /// Slot index of the last generation.
- index: usize,
- /// The timestamp since the last generation expired.
- time: Instant,
-}
-
-// ===== impl TpsBudget =====
-
-impl TpsBudget {
- /// Create a [`TpsBudget`] that allows for a certain percent of the total
- /// requests to be retried.
- ///
- /// - The `ttl` is the duration of how long a single `deposit` should be
- /// considered. Must be between 1 and 60 seconds.
- /// - The `min_per_sec` is the minimum rate of retries allowed to accommodate
- /// clients that have just started issuing requests, or clients that do
- /// not issue many requests per window.
- /// - The `retry_percent` is the percentage of calls to `deposit` that can
- /// be retried. This is in addition to any retries allowed for via
- /// `min_per_sec`. Must be between 0 and 1000.
- ///
- /// As an example, if `0.1` is used, then for every 10 calls to `deposit`,
- /// 1 retry will be allowed. If `2.0` is used, then every `deposit`
- /// allows for 2 retries.
- pub fn new(ttl: Duration, min_per_sec: u32, retry_percent: f32) -> Self {
- // assertions taken from finagle
- assert!(ttl >= Duration::from_secs(1));
- assert!(ttl <= Duration::from_secs(60));
- assert!(retry_percent >= 0.0);
- assert!(retry_percent <= 1000.0);
- assert!(min_per_sec < ::std::i32::MAX as u32);
-
- let (deposit_amount, withdraw_amount) = if retry_percent == 0.0 {
- // If there is no percent, then you gain nothing from deposits.
- // Withdrawals can only be made against the reserve, over time.
- (0, 1)
- } else if retry_percent <= 1.0 {
- (1, (1.0 / retry_percent) as isize)
- } else {
- // Support for when retry_percent is between 1.0 and 1000.0,
- // meaning for every deposit D, D * retry_percent withdrawals
- // can be made.
- (1000, (1000.0 / retry_percent) as isize)
- };
- let reserve = (min_per_sec as isize)
- .saturating_mul(ttl.as_secs() as isize) // ttl is between 1 and 60 seconds
- .saturating_mul(withdraw_amount);
-
- // AtomicIsize isn't clone, so the slots need to be built in a loop...
- let windows = 10u32;
- let mut slots = Vec::with_capacity(windows as usize);
- for _ in 0..windows {
- slots.push(AtomicIsize::new(0));
- }
-
- TpsBudget {
- generation: Mutex::new(Generation {
- index: 0,
- time: Instant::now(),
- }),
- reserve,
- slots: slots.into_boxed_slice(),
- window: ttl / windows,
- writer: AtomicIsize::new(0),
- deposit_amount,
- withdraw_amount,
- }
- }
-
- fn expire(&self) {
- let mut gen = self.generation.lock().expect("generation lock");
-
- let now = Instant::now();
- let diff = now.saturating_duration_since(gen.time);
- if diff < self.window {
- // not expired yet
- return;
- }
-
- let to_commit = self.writer.swap(0, Ordering::SeqCst);
- self.slots[gen.index].store(to_commit, Ordering::SeqCst);
-
- let mut diff = diff;
- let mut idx = (gen.index + 1) % self.slots.len();
- while diff > self.window {
- self.slots[idx].store(0, Ordering::SeqCst);
- diff -= self.window;
- idx = (idx + 1) % self.slots.len();
- }
-
- gen.index = idx;
- gen.time = now;
- }
-
- fn sum(&self) -> isize {
- let current = self.writer.load(Ordering::SeqCst);
- let windowed_sum: isize = self
- .slots
- .iter()
- .map(|slot| slot.load(Ordering::SeqCst))
- // fold() is used instead of sum() to determine overflow behavior
- .fold(0, isize::saturating_add);
-
- current
- .saturating_add(windowed_sum)
- .saturating_add(self.reserve)
- }
-
- fn put(&self, amt: isize) {
- self.expire();
- self.writer.fetch_add(amt, Ordering::SeqCst);
- }
-
- fn try_get(&self, amt: isize) -> bool {
- debug_assert!(amt >= 0);
-
- self.expire();
-
- let sum = self.sum();
- if sum >= amt {
- self.writer.fetch_add(-amt, Ordering::SeqCst);
- true
- } else {
- false
- }
- }
-}
-
-impl Budget for TpsBudget {
- fn deposit(&self) {
- self.put(self.deposit_amount)
- }
-
- fn withdraw(&self) -> bool {
- self.try_get(self.withdraw_amount)
- }
-}
-
-impl Default for TpsBudget {
- fn default() -> Self {
- TpsBudget::new(Duration::from_secs(10), 10, 0.2)
- }
-}
-
-impl fmt::Debug for TpsBudget {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Budget")
- .field("deposit", &self.deposit_amount)
- .field("withdraw", &self.withdraw_amount)
- .field("balance", &self.sum())
- .finish()
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::retry::budget::Budget;
-
- use super::*;
- use tokio::time;
-
- #[test]
- fn tps_empty() {
- let bgt = TpsBudget::new(Duration::from_secs(1), 0, 1.0);
- assert!(!bgt.withdraw());
- }
-
- #[tokio::test]
- async fn tps_leaky() {
- time::pause();
-
- let bgt = TpsBudget::new(Duration::from_secs(1), 0, 1.0);
- bgt.deposit();
-
- time::advance(Duration::from_secs(3)).await;
-
- assert!(!bgt.withdraw());
- }
-
- #[tokio::test]
- async fn tps_slots() {
- time::pause();
-
- let bgt = TpsBudget::new(Duration::from_secs(1), 0, 0.5);
- bgt.deposit();
- bgt.deposit();
- time::advance(Duration::from_millis(901)).await;
- // 900ms later, the deposit should still be valid
- assert!(bgt.withdraw());
-
- // blank slate
- time::advance(Duration::from_millis(2001)).await;
-
- bgt.deposit();
- time::advance(Duration::from_millis(301)).await;
- bgt.deposit();
- time::advance(Duration::from_millis(801)).await;
- bgt.deposit();
-
- // the first deposit is expired, but the 2nd should still be valid,
- // combining with the 3rd
- assert!(bgt.withdraw());
- }
-
- #[tokio::test]
- async fn tps_reserve() {
- let bgt = TpsBudget::new(Duration::from_secs(1), 5, 1.0);
- assert!(bgt.withdraw());
- assert!(bgt.withdraw());
- assert!(bgt.withdraw());
- assert!(bgt.withdraw());
- assert!(bgt.withdraw());
-
- assert!(!bgt.withdraw());
- }
-}
diff --git a/vendor/tower/src/retry/future.rs b/vendor/tower/src/retry/future.rs
deleted file mode 100644
index 7e221844..00000000
--- a/vendor/tower/src/retry/future.rs
+++ /dev/null
@@ -1,120 +0,0 @@
-//! Future types
-
-use super::{Policy, Retry};
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-pin_project! {
- /// The [`Future`] returned by a [`Retry`] service.
- #[derive(Debug)]
- pub struct ResponseFuture<P, S, Request>
- where
- P: Policy<Request, S::Response, S::Error>,
- S: Service<Request>,
- {
- request: Option<Request>,
- #[pin]
- retry: Retry<P, S>,
- #[pin]
- state: State<S::Future, P::Future>,
- }
-}
-
-pin_project! {
- #[project = StateProj]
- #[derive(Debug)]
- enum State<F, P> {
- // Polling the future from [`Service::call`]
- Called {
- #[pin]
- future: F
- },
- // Polling the future from [`Policy::retry`]
- Waiting {
- #[pin]
- waiting: P
- },
- // Polling [`Service::poll_ready`] after [`Waiting`] was OK.
- Retrying,
- }
-}
-
-impl<P, S, Request> ResponseFuture<P, S, Request>
-where
- P: Policy<Request, S::Response, S::Error>,
- S: Service<Request>,
-{
- pub(crate) fn new(
- request: Option<Request>,
- retry: Retry<P, S>,
- future: S::Future,
- ) -> ResponseFuture<P, S, Request> {
- ResponseFuture {
- request,
- retry,
- state: State::Called { future },
- }
- }
-}
-
-impl<P, S, Request> Future for ResponseFuture<P, S, Request>
-where
- P: Policy<Request, S::Response, S::Error>,
- S: Service<Request>,
-{
- type Output = Result<S::Response, S::Error>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
-
- loop {
- match this.state.as_mut().project() {
- StateProj::Called { future } => {
- let mut result = ready!(future.poll(cx));
- if let Some(req) = &mut this.request {
- match this.retry.policy.retry(req, &mut result) {
- Some(waiting) => {
- this.state.set(State::Waiting { waiting });
- }
- None => return Poll::Ready(result),
- }
- } else {
- // request wasn't cloned, so no way to retry it
- return Poll::Ready(result);
- }
- }
- StateProj::Waiting { waiting } => {
- ready!(waiting.poll(cx));
-
- this.state.set(State::Retrying);
- }
- StateProj::Retrying => {
- // NOTE: we assume here that
- //
- // this.retry.poll_ready()
- //
- // is equivalent to
- //
- // this.retry.service.poll_ready()
- //
- // we need to make that assumption to avoid adding an Unpin bound to the Policy
- // in Ready to make it Unpin so that we can get &mut Ready as needed to call
- // poll_ready on it.
- ready!(this.retry.as_mut().project().service.poll_ready(cx))?;
- let req = this
- .request
- .take()
- .expect("retrying requires cloned request");
- *this.request = this.retry.policy.clone_request(&req);
- this.state.set(State::Called {
- future: this.retry.as_mut().project().service.call(req),
- });
- }
- }
- }
- }
-}
diff --git a/vendor/tower/src/retry/layer.rs b/vendor/tower/src/retry/layer.rs
deleted file mode 100644
index f7f2c640..00000000
--- a/vendor/tower/src/retry/layer.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-use super::Retry;
-use tower_layer::Layer;
-
-/// Retry requests based on a policy
-#[derive(Debug, Clone)]
-pub struct RetryLayer<P> {
- policy: P,
-}
-
-impl<P> RetryLayer<P> {
- /// Creates a new [`RetryLayer`] from a retry policy.
- pub const fn new(policy: P) -> Self {
- RetryLayer { policy }
- }
-}
-
-impl<P, S> Layer<S> for RetryLayer<P>
-where
- P: Clone,
-{
- type Service = Retry<P, S>;
-
- fn layer(&self, service: S) -> Self::Service {
- let policy = self.policy.clone();
- Retry::new(policy, service)
- }
-}
diff --git a/vendor/tower/src/retry/mod.rs b/vendor/tower/src/retry/mod.rs
deleted file mode 100644
index 1bb5e29e..00000000
--- a/vendor/tower/src/retry/mod.rs
+++ /dev/null
@@ -1,94 +0,0 @@
-//! Middleware for retrying "failed" requests.
-
-pub mod backoff;
-pub mod budget;
-pub mod future;
-mod layer;
-mod policy;
-
-pub use self::layer::RetryLayer;
-pub use self::policy::Policy;
-
-use self::future::ResponseFuture;
-use pin_project_lite::pin_project;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-pin_project! {
- /// Configure retrying requests of "failed" responses.
- ///
- /// A [`Policy`] classifies what is a "failed" response.
- ///
- /// # Clone
- ///
- /// This middleware requires that the inner `Service` implements [`Clone`],
- /// because the `Service` must be stored in each [`ResponseFuture`] in
- /// order to retry the request in the event of a failure. If the inner
- /// `Service` type does not implement `Clone`, the [`Buffer`] middleware
- /// can be added to make any `Service` cloneable.
- ///
- /// [`Buffer`]: crate::buffer::Buffer
- ///
- /// The `Policy` must also implement `Clone`. This middleware will
- /// clone the policy for each _request session_. This means a new clone
- /// of the policy will be created for each initial request and any subsequent
- /// retries of that request. Therefore, any state stored in the `Policy` instance
- /// is for that request session only. In order to share data across request
- /// sessions, that shared state may be stored in an [`Arc`], so that all clones
- /// of the `Policy` type reference the same instance of the shared state.
- ///
- /// [`Arc`]: std::sync::Arc
- #[derive(Clone, Debug)]
- pub struct Retry<P, S> {
- policy: P,
- service: S,
- }
-}
-
-// ===== impl Retry =====
-
-impl<P, S> Retry<P, S> {
- /// Retry the inner service depending on this [`Policy`].
- pub const fn new(policy: P, service: S) -> Self {
- Retry { policy, service }
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &S {
- &self.service
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut S {
- &mut self.service
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> S {
- self.service
- }
-}
-
-impl<P, S, Request> Service<Request> for Retry<P, S>
-where
- P: Policy<Request, S::Response, S::Error> + Clone,
- S: Service<Request> + Clone,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = ResponseFuture<P, S, Request>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // NOTE: the Future::poll impl for ResponseFuture assumes that Retry::poll_ready is
- // equivalent to Ready.service.poll_ready. If this ever changes, that code must be updated
- // as well.
- self.service.poll_ready(cx)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- let cloned = self.policy.clone_request(&request);
- let future = self.service.call(request);
-
- ResponseFuture::new(cloned, self.clone(), future)
- }
-}
diff --git a/vendor/tower/src/retry/policy.rs b/vendor/tower/src/retry/policy.rs
deleted file mode 100644
index 57b80a71..00000000
--- a/vendor/tower/src/retry/policy.rs
+++ /dev/null
@@ -1,94 +0,0 @@
-use std::future::Future;
-
-/// A "retry policy" to classify if a request should be retried.
-///
-/// # Example
-///
-/// ```
-/// use tower::retry::Policy;
-/// use futures_util::future;
-///
-/// type Req = String;
-/// type Res = String;
-///
-/// struct Attempts(usize);
-///
-/// impl<E> Policy<Req, Res, E> for Attempts {
-/// type Future = future::Ready<()>;
-///
-/// fn retry(&mut self, req: &mut Req, result: &mut Result<Res, E>) -> Option<Self::Future> {
-/// match result {
-/// Ok(_) => {
-/// // Treat all `Response`s as success,
-/// // so don't retry...
-/// None
-/// },
-/// Err(_) => {
-/// // Treat all errors as failures...
-/// // But we limit the number of attempts...
-/// if self.0 > 0 {
-/// // Try again!
-/// self.0 -= 1;
-/// Some(future::ready(()))
-/// } else {
-/// // Used all our attempts, no retry...
-/// None
-/// }
-/// }
-/// }
-/// }
-///
-/// fn clone_request(&mut self, req: &Req) -> Option<Req> {
-/// Some(req.clone())
-/// }
-/// }
-/// ```
-pub trait Policy<Req, Res, E> {
- /// The [`Future`] type returned by [`Policy::retry`].
- type Future: Future<Output = ()>;
-
- /// Check the policy if a certain request should be retried.
- ///
- /// This method is passed a reference to the original request, and either
- /// the [`Service::Response`] or [`Service::Error`] from the inner service.
- ///
- /// If the request should **not** be retried, return `None`.
- ///
- /// If the request *should* be retried, return `Some` future that will delay
- /// the next retry of the request. This can be used to sleep for a certain
- /// duration, to wait for some external condition to be met before retrying,
- /// or resolve right away, if the request should be retried immediately.
- ///
- /// ## Mutating Requests
- ///
- /// The policy MAY chose to mutate the `req`: if the request is mutated, the
- /// mutated request will be sent to the inner service in the next retry.
- /// This can be helpful for use cases like tracking the retry count in a
- /// header.
- ///
- /// ## Mutating Results
- ///
- /// The policy MAY chose to mutate the result. This enables the retry
- /// policy to convert a failure into a success and vice versa. For example,
- /// if the policy is used to poll while waiting for a state change, the
- /// policy can switch the result to emit a specific error when retries are
- /// exhausted.
- ///
- /// The policy can also record metadata on the request to include
- /// information about the number of retries required or to record that a
- /// failure failed after exhausting all retries.
- ///
- /// [`Service::Response`]: crate::Service::Response
- /// [`Service::Error`]: crate::Service::Error
- fn retry(&mut self, req: &mut Req, result: &mut Result<Res, E>) -> Option<Self::Future>;
-
- /// Tries to clone a request before being passed to the inner service.
- ///
- /// If the request cannot be cloned, return [`None`]. Moreover, the retry
- /// function will not be called if the [`None`] is returned.
- fn clone_request(&mut self, req: &Req) -> Option<Req>;
-}
-
-// Ensure `Policy` is object safe
-#[cfg(test)]
-fn _obj_safe(_: Box<dyn Policy<(), (), (), Future = futures::future::Ready<()>>>) {}
diff --git a/vendor/tower/src/spawn_ready/future.rs b/vendor/tower/src/spawn_ready/future.rs
deleted file mode 100644
index c7d4da57..00000000
--- a/vendor/tower/src/spawn_ready/future.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-//! Background readiness types
-
-opaque_future! {
- /// Response future from [`SpawnReady`] services.
- ///
- /// [`SpawnReady`]: crate::spawn_ready::SpawnReady
- pub type ResponseFuture<F, E> = futures_util::future::MapErr<F, fn(E) -> crate::BoxError>;
-}
diff --git a/vendor/tower/src/spawn_ready/layer.rs b/vendor/tower/src/spawn_ready/layer.rs
deleted file mode 100644
index bec6e1c2..00000000
--- a/vendor/tower/src/spawn_ready/layer.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-/// Spawns tasks to drive its inner service to readiness.
-#[derive(Clone, Debug, Default)]
-pub struct SpawnReadyLayer(());
-
-impl SpawnReadyLayer {
- /// Builds a [`SpawnReadyLayer`].
- pub fn new() -> Self {
- Self::default()
- }
-}
-
-impl<S> tower_layer::Layer<S> for SpawnReadyLayer {
- type Service = super::SpawnReady<S>;
-
- fn layer(&self, service: S) -> Self::Service {
- super::SpawnReady::new(service)
- }
-}
diff --git a/vendor/tower/src/spawn_ready/mod.rs b/vendor/tower/src/spawn_ready/mod.rs
deleted file mode 100644
index 97e2ecf9..00000000
--- a/vendor/tower/src/spawn_ready/mod.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-//! When an underlying service is not ready, drive it to readiness on a
-//! background task.
-
-pub mod future;
-mod layer;
-mod service;
-
-pub use self::layer::SpawnReadyLayer;
-pub use self::service::SpawnReady;
diff --git a/vendor/tower/src/spawn_ready/service.rs b/vendor/tower/src/spawn_ready/service.rs
deleted file mode 100644
index d9573f50..00000000
--- a/vendor/tower/src/spawn_ready/service.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-use super::{future::ResponseFuture, SpawnReadyLayer};
-use crate::{util::ServiceExt, BoxError};
-use futures_core::ready;
-use futures_util::future::TryFutureExt;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-use tracing::Instrument;
-
-/// Spawns tasks to drive an inner service to readiness.
-///
-/// See crate level documentation for more details.
-#[derive(Debug)]
-pub struct SpawnReady<S> {
- inner: Inner<S>,
-}
-
-#[derive(Debug)]
-enum Inner<S> {
- Service(Option<S>),
- Future(tokio::task::JoinHandle<Result<S, BoxError>>),
-}
-
-impl<S> SpawnReady<S> {
- /// Creates a new [`SpawnReady`] wrapping `service`.
- pub const fn new(service: S) -> Self {
- Self {
- inner: Inner::Service(Some(service)),
- }
- }
-
- /// Creates a layer that wraps services with [`SpawnReady`].
- pub fn layer() -> SpawnReadyLayer {
- SpawnReadyLayer::default()
- }
-}
-
-impl<S> Drop for SpawnReady<S> {
- fn drop(&mut self) {
- if let Inner::Future(ref mut task) = self.inner {
- task.abort();
- }
- }
-}
-
-impl<S, Req> Service<Req> for SpawnReady<S>
-where
- Req: 'static,
- S: Service<Req> + Send + 'static,
- S::Error: Into<BoxError>,
-{
- type Response = S::Response;
- type Error = BoxError;
- type Future = ResponseFuture<S::Future, S::Error>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), BoxError>> {
- loop {
- self.inner = match self.inner {
- Inner::Service(ref mut svc) => {
- if let Poll::Ready(r) = svc.as_mut().expect("illegal state").poll_ready(cx) {
- return Poll::Ready(r.map_err(Into::into));
- }
-
- let svc = svc.take().expect("illegal state");
- let rx =
- tokio::spawn(svc.ready_oneshot().map_err(Into::into).in_current_span());
- Inner::Future(rx)
- }
- Inner::Future(ref mut fut) => {
- let svc = ready!(Pin::new(fut).poll(cx))??;
- Inner::Service(Some(svc))
- }
- }
- }
- }
-
- fn call(&mut self, request: Req) -> Self::Future {
- match self.inner {
- Inner::Service(Some(ref mut svc)) => {
- ResponseFuture::new(svc.call(request).map_err(Into::into))
- }
- _ => unreachable!("poll_ready must be called"),
- }
- }
-}
diff --git a/vendor/tower/src/steer/mod.rs b/vendor/tower/src/steer/mod.rs
deleted file mode 100644
index d6141965..00000000
--- a/vendor/tower/src/steer/mod.rs
+++ /dev/null
@@ -1,204 +0,0 @@
-//! This module provides functionality to aid managing routing requests between [`Service`]s.
-//!
-//! # Example
-//!
-//! [`Steer`] can for example be used to create a router, akin to what you might find in web
-//! frameworks.
-//!
-//! Here, `GET /` will be sent to the `root` service, while all other requests go to `not_found`.
-//!
-//! ```rust
-//! # use std::task::{Context, Poll};
-//! # use tower_service::Service;
-//! # use futures_util::future::{ready, Ready, poll_fn};
-//! # use tower::steer::Steer;
-//! # use tower::service_fn;
-//! # use tower::util::BoxService;
-//! # use tower::ServiceExt;
-//! # use std::convert::Infallible;
-//! use http::{Request, Response, StatusCode, Method};
-//!
-//! # #[tokio::main]
-//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
-//! // Service that responds to `GET /`
-//! let root = service_fn(|req: Request<String>| async move {
-//! # assert_eq!(req.uri().path(), "/");
-//! let res = Response::new("Hello, World!".to_string());
-//! Ok::<_, Infallible>(res)
-//! });
-//! // We have to box the service so its type gets erased and we can put it in a `Vec` with other
-//! // services
-//! let root = BoxService::new(root);
-//!
-//! // Service that responds with `404 Not Found` to all requests
-//! let not_found = service_fn(|req: Request<String>| async move {
-//! let res = Response::builder()
-//! .status(StatusCode::NOT_FOUND)
-//! .body(String::new())
-//! .expect("response is valid");
-//! Ok::<_, Infallible>(res)
-//! });
-//! // Box that as well
-//! let not_found = BoxService::new(not_found);
-//!
-//! let mut svc = Steer::new(
-//! // All services we route between
-//! vec![root, not_found],
-//! // How we pick which service to send the request to
-//! |req: &Request<String>, _services: &[_]| {
-//! if req.method() == Method::GET && req.uri().path() == "/" {
-//! 0 // Index of `root`
-//! } else {
-//! 1 // Index of `not_found`
-//! }
-//! },
-//! );
-//!
-//! // This request will get sent to `root`
-//! let req = Request::get("/").body(String::new()).unwrap();
-//! let res = svc.ready().await?.call(req).await?;
-//! assert_eq!(res.into_body(), "Hello, World!");
-//!
-//! // This request will get sent to `not_found`
-//! let req = Request::get("/does/not/exist").body(String::new()).unwrap();
-//! let res = svc.ready().await?.call(req).await?;
-//! assert_eq!(res.status(), StatusCode::NOT_FOUND);
-//! assert_eq!(res.into_body(), "");
-//! #
-//! # Ok(())
-//! # }
-//! ```
-use std::task::{Context, Poll};
-use std::{collections::VecDeque, fmt, marker::PhantomData};
-use tower_service::Service;
-
-/// This is how callers of [`Steer`] tell it which `Service` a `Req` corresponds to.
-pub trait Picker<S, Req> {
- /// Return an index into the iterator of `Service` passed to [`Steer::new`].
- fn pick(&mut self, r: &Req, services: &[S]) -> usize;
-}
-
-impl<S, F, Req> Picker<S, Req> for F
-where
- F: Fn(&Req, &[S]) -> usize,
-{
- fn pick(&mut self, r: &Req, services: &[S]) -> usize {
- self(r, services)
- }
-}
-
-/// [`Steer`] manages a list of [`Service`]s which all handle the same type of request.
-///
-/// An example use case is a sharded service.
-/// It accepts new requests, then:
-/// 1. Determines, via the provided [`Picker`], which [`Service`] the request corresponds to.
-/// 2. Waits (in [`Service::poll_ready`]) for *all* services to be ready.
-/// 3. Calls the correct [`Service`] with the request, and returns a future corresponding to the
-/// call.
-///
-/// Note that [`Steer`] must wait for all services to be ready since it can't know ahead of time
-/// which [`Service`] the next message will arrive for, and is unwilling to buffer items
-/// indefinitely. This will cause head-of-line blocking unless paired with a [`Service`] that does
-/// buffer items indefinitely, and thus always returns [`Poll::Ready`]. For example, wrapping each
-/// component service with a [`Buffer`] with a high enough limit (the maximum number of concurrent
-/// requests) will prevent head-of-line blocking in [`Steer`].
-///
-/// [`Buffer`]: crate::buffer::Buffer
-pub struct Steer<S, F, Req> {
- router: F,
- services: Vec<S>,
- not_ready: VecDeque<usize>,
- _phantom: PhantomData<Req>,
-}
-
-impl<S, F, Req> Steer<S, F, Req> {
- /// Make a new [`Steer`] with a list of [`Service`]'s and a [`Picker`].
- ///
- /// Note: the order of the [`Service`]'s is significant for [`Picker::pick`]'s return value.
- pub fn new(services: impl IntoIterator<Item = S>, router: F) -> Self {
- let services: Vec<_> = services.into_iter().collect();
- let not_ready: VecDeque<_> = services.iter().enumerate().map(|(i, _)| i).collect();
- Self {
- router,
- services,
- not_ready,
- _phantom: PhantomData,
- }
- }
-}
-
-impl<S, Req, F> Service<Req> for Steer<S, F, Req>
-where
- S: Service<Req>,
- F: Picker<S, Req>,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = S::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- loop {
- // must wait for *all* services to be ready.
- // this will cause head-of-line blocking unless the underlying services are always ready.
- if self.not_ready.is_empty() {
- return Poll::Ready(Ok(()));
- } else {
- if self.services[self.not_ready[0]]
- .poll_ready(cx)?
- .is_pending()
- {
- return Poll::Pending;
- }
-
- self.not_ready.pop_front();
- }
- }
- }
-
- fn call(&mut self, req: Req) -> Self::Future {
- assert!(
- self.not_ready.is_empty(),
- "Steer must wait for all services to be ready. Did you forget to call poll_ready()?"
- );
-
- let idx = self.router.pick(&req, &self.services[..]);
- let cl = &mut self.services[idx];
- self.not_ready.push_back(idx);
- cl.call(req)
- }
-}
-
-impl<S, F, Req> Clone for Steer<S, F, Req>
-where
- S: Clone,
- F: Clone,
-{
- fn clone(&self) -> Self {
- Self {
- router: self.router.clone(),
- services: self.services.clone(),
- not_ready: self.not_ready.clone(),
- _phantom: PhantomData,
- }
- }
-}
-
-impl<S, F, Req> fmt::Debug for Steer<S, F, Req>
-where
- S: fmt::Debug,
- F: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let Self {
- router,
- services,
- not_ready,
- _phantom,
- } = self;
- f.debug_struct("Steer")
- .field("router", router)
- .field("services", services)
- .field("not_ready", not_ready)
- .finish()
- }
-}
diff --git a/vendor/tower/src/timeout/error.rs b/vendor/tower/src/timeout/error.rs
deleted file mode 100644
index cc309b9c..00000000
--- a/vendor/tower/src/timeout/error.rs
+++ /dev/null
@@ -1,22 +0,0 @@
-//! Error types
-
-use std::{error, fmt};
-
-/// The timeout elapsed.
-#[derive(Debug, Default)]
-pub struct Elapsed(pub(super) ());
-
-impl Elapsed {
- /// Construct a new elapsed error
- pub const fn new() -> Self {
- Elapsed(())
- }
-}
-
-impl fmt::Display for Elapsed {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("request timed out")
- }
-}
-
-impl error::Error for Elapsed {}
diff --git a/vendor/tower/src/timeout/future.rs b/vendor/tower/src/timeout/future.rs
deleted file mode 100644
index b4eb3f4e..00000000
--- a/vendor/tower/src/timeout/future.rs
+++ /dev/null
@@ -1,53 +0,0 @@
-//! Future types
-
-use super::error::Elapsed;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tokio::time::Sleep;
-
-pin_project! {
- /// [`Timeout`] response future
- ///
- /// [`Timeout`]: crate::timeout::Timeout
- #[derive(Debug)]
- pub struct ResponseFuture<T> {
- #[pin]
- response: T,
- #[pin]
- sleep: Sleep,
- }
-}
-
-impl<T> ResponseFuture<T> {
- pub(crate) fn new(response: T, sleep: Sleep) -> Self {
- ResponseFuture { response, sleep }
- }
-}
-
-impl<F, T, E> Future for ResponseFuture<F>
-where
- F: Future<Output = Result<T, E>>,
- E: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
-
- // First, try polling the future
- match this.response.poll(cx) {
- Poll::Ready(v) => return Poll::Ready(v.map_err(Into::into)),
- Poll::Pending => {}
- }
-
- // Now check the sleep
- match this.sleep.poll(cx) {
- Poll::Pending => Poll::Pending,
- Poll::Ready(_) => Poll::Ready(Err(Elapsed(()).into())),
- }
- }
-}
diff --git a/vendor/tower/src/timeout/layer.rs b/vendor/tower/src/timeout/layer.rs
deleted file mode 100644
index d8cc2d1c..00000000
--- a/vendor/tower/src/timeout/layer.rs
+++ /dev/null
@@ -1,24 +0,0 @@
-use super::Timeout;
-use std::time::Duration;
-use tower_layer::Layer;
-
-/// Applies a timeout to requests via the supplied inner service.
-#[derive(Debug, Clone)]
-pub struct TimeoutLayer {
- timeout: Duration,
-}
-
-impl TimeoutLayer {
- /// Create a timeout from a duration
- pub const fn new(timeout: Duration) -> Self {
- TimeoutLayer { timeout }
- }
-}
-
-impl<S> Layer<S> for TimeoutLayer {
- type Service = Timeout<S>;
-
- fn layer(&self, service: S) -> Self::Service {
- Timeout::new(service, self.timeout)
- }
-}
diff --git a/vendor/tower/src/timeout/mod.rs b/vendor/tower/src/timeout/mod.rs
deleted file mode 100644
index da3bbf98..00000000
--- a/vendor/tower/src/timeout/mod.rs
+++ /dev/null
@@ -1,70 +0,0 @@
-//! Middleware that applies a timeout to requests.
-//!
-//! If the response does not complete within the specified timeout, the response
-//! will be aborted.
-
-pub mod error;
-pub mod future;
-mod layer;
-
-pub use self::layer::TimeoutLayer;
-
-use self::future::ResponseFuture;
-use std::task::{Context, Poll};
-use std::time::Duration;
-use tower_service::Service;
-
-/// Applies a timeout to requests.
-#[derive(Debug, Clone)]
-pub struct Timeout<T> {
- inner: T,
- timeout: Duration,
-}
-
-// ===== impl Timeout =====
-
-impl<T> Timeout<T> {
- /// Creates a new [`Timeout`]
- pub const fn new(inner: T, timeout: Duration) -> Self {
- Timeout { inner, timeout }
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &T {
- &self.inner
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut T {
- &mut self.inner
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> T {
- self.inner
- }
-}
-
-impl<S, Request> Service<Request> for Timeout<S>
-where
- S: Service<Request>,
- S::Error: Into<crate::BoxError>,
-{
- type Response = S::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<S::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- match self.inner.poll_ready(cx) {
- Poll::Pending => Poll::Pending,
- Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
- }
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- let response = self.inner.call(request);
- let sleep = tokio::time::sleep(self.timeout);
-
- ResponseFuture::new(response, sleep)
- }
-}
diff --git a/vendor/tower/src/util/and_then.rs b/vendor/tower/src/util/and_then.rs
deleted file mode 100644
index adb9ada7..00000000
--- a/vendor/tower/src/util/and_then.rs
+++ /dev/null
@@ -1,130 +0,0 @@
-use futures_core::TryFuture;
-use futures_util::{future, TryFutureExt};
-use std::fmt;
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Service returned by the [`and_then`] combinator.
-///
-/// [`and_then`]: crate::util::ServiceExt::and_then
-#[derive(Clone)]
-pub struct AndThen<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> fmt::Debug for AndThen<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("AndThen")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-pin_project_lite::pin_project! {
- /// Response future from [`AndThen`] services.
- ///
- /// [`AndThen`]: crate::util::AndThen
- pub struct AndThenFuture<F1, F2: TryFuture, N> {
- #[pin]
- inner: future::AndThen<future::ErrInto<F1, F2::Error>, F2, N>,
- }
-}
-
-impl<F1, F2: TryFuture, N> AndThenFuture<F1, F2, N> {
- pub(crate) fn new(inner: future::AndThen<future::ErrInto<F1, F2::Error>, F2, N>) -> Self {
- Self { inner }
- }
-}
-
-impl<F1, F2: TryFuture, N> std::fmt::Debug for AndThenFuture<F1, F2, N> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.debug_tuple("AndThenFuture")
- .field(&format_args!("..."))
- .finish()
- }
-}
-
-impl<F1, F2: TryFuture, N> Future for AndThenFuture<F1, F2, N>
-where
- future::AndThen<future::ErrInto<F1, F2::Error>, F2, N>: Future,
-{
- type Output = <future::AndThen<future::ErrInto<F1, F2::Error>, F2, N> as Future>::Output;
-
- #[inline]
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.project().inner.poll(cx)
- }
-}
-
-/// A [`Layer`] that produces a [`AndThen`] service.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Clone, Debug)]
-pub struct AndThenLayer<F> {
- f: F,
-}
-
-impl<S, F> AndThen<S, F> {
- /// Creates a new `AndThen` service.
- pub const fn new(inner: S, f: F) -> Self {
- AndThen { f, inner }
- }
-
- /// Returns a new [`Layer`] that produces [`AndThen`] services.
- ///
- /// This is a convenience function that simply calls [`AndThenLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> AndThenLayer<F> {
- AndThenLayer { f }
- }
-}
-
-impl<S, F, Request, Fut> Service<Request> for AndThen<S, F>
-where
- S: Service<Request>,
- S::Error: Into<Fut::Error>,
- F: FnOnce(S::Response) -> Fut + Clone,
- Fut: TryFuture,
-{
- type Response = Fut::Ok;
- type Error = Fut::Error;
- type Future = AndThenFuture<S::Future, Fut, F>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(Into::into)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- AndThenFuture::new(self.inner.call(request).err_into().and_then(self.f.clone()))
- }
-}
-
-impl<F> AndThenLayer<F> {
- /// Creates a new [`AndThenLayer`] layer.
- pub const fn new(f: F) -> Self {
- AndThenLayer { f }
- }
-}
-
-impl<S, F> Layer<S> for AndThenLayer<F>
-where
- F: Clone,
-{
- type Service = AndThen<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- AndThen {
- f: self.f.clone(),
- inner,
- }
- }
-}
diff --git a/vendor/tower/src/util/boxed/layer.rs b/vendor/tower/src/util/boxed/layer.rs
deleted file mode 100644
index 34e65fa4..00000000
--- a/vendor/tower/src/util/boxed/layer.rs
+++ /dev/null
@@ -1,97 +0,0 @@
-use crate::util::BoxService;
-use std::{fmt, sync::Arc};
-use tower_layer::{layer_fn, Layer};
-use tower_service::Service;
-
-/// A boxed [`Layer`] trait object.
-///
-/// [`BoxLayer`] turns a layer into a trait object, allowing both the [`Layer`] itself
-/// and the output [`Service`] to be dynamic, while having consistent types.
-///
-/// This [`Layer`] produces [`BoxService`] instances erasing the type of the
-/// [`Service`] produced by the wrapped [`Layer`].
-///
-/// # Example
-///
-/// `BoxLayer` can, for example, be useful to create layers dynamically that otherwise wouldn't have
-/// the same types. In this example, we include a [`Timeout`] layer
-/// only if an environment variable is set. We can use `BoxLayer`
-/// to return a consistent type regardless of runtime configuration:
-///
-/// ```
-/// use std::time::Duration;
-/// use tower::{Service, ServiceBuilder, BoxError, util::BoxLayer};
-///
-/// fn common_layer<S, T>() -> BoxLayer<S, T, S::Response, BoxError>
-/// where
-/// S: Service<T> + Send + 'static,
-/// S::Future: Send + 'static,
-/// S::Error: Into<BoxError> + 'static,
-/// {
-/// let builder = ServiceBuilder::new()
-/// .concurrency_limit(100);
-///
-/// if std::env::var("SET_TIMEOUT").is_ok() {
-/// let layer = builder
-/// .timeout(Duration::from_secs(30))
-/// .into_inner();
-///
-/// BoxLayer::new(layer)
-/// } else {
-/// let layer = builder
-/// .map_err(Into::into)
-/// .into_inner();
-///
-/// BoxLayer::new(layer)
-/// }
-/// }
-/// ```
-///
-/// [`Layer`]: tower_layer::Layer
-/// [`Service`]: tower_service::Service
-/// [`BoxService`]: super::BoxService
-/// [`Timeout`]: crate::timeout
-pub struct BoxLayer<In, T, U, E> {
- boxed: Arc<dyn Layer<In, Service = BoxService<T, U, E>> + Send + Sync + 'static>,
-}
-
-impl<In, T, U, E> BoxLayer<In, T, U, E> {
- /// Create a new [`BoxLayer`].
- pub fn new<L>(inner_layer: L) -> Self
- where
- L: Layer<In> + Send + Sync + 'static,
- L::Service: Service<T, Response = U, Error = E> + Send + 'static,
- <L::Service as Service<T>>::Future: Send + 'static,
- {
- let layer = layer_fn(move |inner: In| {
- let out = inner_layer.layer(inner);
- BoxService::new(out)
- });
-
- Self {
- boxed: Arc::new(layer),
- }
- }
-}
-
-impl<In, T, U, E> Layer<In> for BoxLayer<In, T, U, E> {
- type Service = BoxService<T, U, E>;
-
- fn layer(&self, inner: In) -> Self::Service {
- self.boxed.layer(inner)
- }
-}
-
-impl<In, T, U, E> Clone for BoxLayer<In, T, U, E> {
- fn clone(&self) -> Self {
- Self {
- boxed: Arc::clone(&self.boxed),
- }
- }
-}
-
-impl<In, T, U, E> fmt::Debug for BoxLayer<In, T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("BoxLayer").finish()
- }
-}
diff --git a/vendor/tower/src/util/boxed/layer_clone.rs b/vendor/tower/src/util/boxed/layer_clone.rs
deleted file mode 100644
index 1f626899..00000000
--- a/vendor/tower/src/util/boxed/layer_clone.rs
+++ /dev/null
@@ -1,128 +0,0 @@
-use crate::util::BoxCloneService;
-use std::{fmt, sync::Arc};
-use tower_layer::{layer_fn, Layer};
-use tower_service::Service;
-
-/// A [`Clone`] + [`Send`] boxed [`Layer`].
-///
-/// [`BoxCloneServiceLayer`] turns a layer into a trait object, allowing both the [`Layer`] itself
-/// and the output [`Service`] to be dynamic, while having consistent types.
-///
-/// This [`Layer`] produces [`BoxCloneService`] instances erasing the type of the
-/// [`Service`] produced by the wrapped [`Layer`].
-///
-/// This is similar to [`BoxLayer`](super::BoxLayer) except the layer and resulting
-/// service implements [`Clone`].
-///
-/// # Example
-///
-/// `BoxCloneServiceLayer` can, for example, be useful to create layers dynamically that otherwise wouldn't have
-/// the same types, when the underlying service must be clone (for example, when building a MakeService)
-/// In this example, we include a [`Timeout`] layer only if an environment variable is set. We can use
-/// `BoxCloneService` to return a consistent type regardless of runtime configuration:
-///
-/// ```
-/// use std::time::Duration;
-/// use tower::{Service, ServiceBuilder, BoxError};
-/// use tower::util::{BoxCloneServiceLayer, BoxCloneService};
-///
-/// #
-/// # struct Request;
-/// # struct Response;
-/// # impl Response {
-/// # fn new() -> Self { Self }
-/// # }
-///
-/// fn common_layer<S, T>() -> BoxCloneServiceLayer<S, T, S::Response, BoxError>
-/// where
-/// S: Service<T> + Clone + Send + 'static,
-/// S::Future: Send + 'static,
-/// S::Error: Into<BoxError> + 'static,
-/// {
-/// let builder = ServiceBuilder::new()
-/// .concurrency_limit(100);
-///
-/// if std::env::var("SET_TIMEOUT").is_ok() {
-/// let layer = builder
-/// .timeout(Duration::from_secs(30))
-/// .into_inner();
-///
-/// BoxCloneServiceLayer::new(layer)
-/// } else {
-/// let layer = builder
-/// .map_err(Into::into)
-/// .into_inner();
-///
-/// BoxCloneServiceLayer::new(layer)
-/// }
-/// }
-///
-/// // We can clone the layer (this is true of BoxLayer as well)
-/// let boxed_clone_layer = common_layer();
-///
-/// let cloned_layer = boxed_clone_layer.clone();
-///
-/// // Using the `BoxCloneServiceLayer` we can create a `BoxCloneService`
-/// let service: BoxCloneService<Request, Response, BoxError> = ServiceBuilder::new().layer(boxed_clone_layer)
-/// .service_fn(|req: Request| async {
-/// Ok::<_, BoxError>(Response::new())
-/// });
-///
-/// # let service = assert_service(service);
-///
-/// // And we can still clone the service
-/// let cloned_service = service.clone();
-/// #
-/// # fn assert_service<S, R>(svc: S) -> S
-/// # where S: Service<R> { svc }
-///
-/// ```
-///
-/// [`Layer`]: tower_layer::Layer
-/// [`Service`]: tower_service::Service
-/// [`BoxService`]: super::BoxService
-/// [`Timeout`]: crate::timeout
-pub struct BoxCloneServiceLayer<In, T, U, E> {
- boxed: Arc<dyn Layer<In, Service = BoxCloneService<T, U, E>> + Send + Sync + 'static>,
-}
-
-impl<In, T, U, E> BoxCloneServiceLayer<In, T, U, E> {
- /// Create a new [`BoxCloneServiceLayer`].
- pub fn new<L>(inner_layer: L) -> Self
- where
- L: Layer<In> + Send + Sync + 'static,
- L::Service: Service<T, Response = U, Error = E> + Send + Clone + 'static,
- <L::Service as Service<T>>::Future: Send + 'static,
- {
- let layer = layer_fn(move |inner: In| {
- let out = inner_layer.layer(inner);
- BoxCloneService::new(out)
- });
-
- Self {
- boxed: Arc::new(layer),
- }
- }
-}
-
-impl<In, T, U, E> Layer<In> for BoxCloneServiceLayer<In, T, U, E> {
- type Service = BoxCloneService<T, U, E>;
-
- fn layer(&self, inner: In) -> Self::Service {
- self.boxed.layer(inner)
- }
-}
-
-impl<In, T, U, E> Clone for BoxCloneServiceLayer<In, T, U, E> {
- fn clone(&self) -> Self {
- Self {
- boxed: Arc::clone(&self.boxed),
- }
- }
-}
-
-impl<In, T, U, E> fmt::Debug for BoxCloneServiceLayer<In, T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("BoxCloneServiceLayer").finish()
- }
-}
diff --git a/vendor/tower/src/util/boxed/layer_clone_sync.rs b/vendor/tower/src/util/boxed/layer_clone_sync.rs
deleted file mode 100644
index 950e66be..00000000
--- a/vendor/tower/src/util/boxed/layer_clone_sync.rs
+++ /dev/null
@@ -1,129 +0,0 @@
-use std::{fmt, sync::Arc};
-use tower_layer::{layer_fn, Layer};
-use tower_service::Service;
-
-use crate::util::BoxCloneSyncService;
-
-/// A [`Clone`] + [`Send`] + [`Sync`] boxed [`Layer`].
-///
-/// [`BoxCloneSyncServiceLayer`] turns a layer into a trait object, allowing both the [`Layer`] itself
-/// and the output [`Service`] to be dynamic, while having consistent types.
-///
-/// This [`Layer`] produces [`BoxCloneSyncService`] instances erasing the type of the
-/// [`Service`] produced by the wrapped [`Layer`].
-///
-/// This is similar to [`BoxCloneServiceLayer`](super::BoxCloneServiceLayer) except the layer and resulting
-/// service implements [`Sync`].
-///
-/// # Example
-///
-/// `BoxCloneSyncServiceLayer` can, for example, be useful to create layers dynamically that otherwise wouldn't have
-/// the same types, when the underlying service must be clone and sync (for example, when building a Hyper connector).
-/// In this example, we include a [`Timeout`] layer only if an environment variable is set. We can use
-/// `BoxCloneSyncServiceLayer` to return a consistent type regardless of runtime configuration:
-///
-/// ```
-/// use std::time::Duration;
-/// use tower::{Service, ServiceBuilder, BoxError};
-/// use tower::util::{BoxCloneSyncServiceLayer, BoxCloneSyncService};
-///
-/// #
-/// # struct Request;
-/// # struct Response;
-/// # impl Response {
-/// # fn new() -> Self { Self }
-/// # }
-///
-/// fn common_layer<S, T>() -> BoxCloneSyncServiceLayer<S, T, S::Response, BoxError>
-/// where
-/// S: Service<T> + Clone + Send + Sync + 'static,
-/// S::Future: Send + 'static,
-/// S::Error: Into<BoxError> + 'static,
-/// {
-/// let builder = ServiceBuilder::new()
-/// .concurrency_limit(100);
-///
-/// if std::env::var("SET_TIMEOUT").is_ok() {
-/// let layer = builder
-/// .timeout(Duration::from_secs(30))
-/// .into_inner();
-///
-/// BoxCloneSyncServiceLayer::new(layer)
-/// } else {
-/// let layer = builder
-/// .map_err(Into::into)
-/// .into_inner();
-///
-/// BoxCloneSyncServiceLayer::new(layer)
-/// }
-/// }
-///
-/// // We can clone the layer (this is true of BoxLayer as well)
-/// let boxed_clone_sync_layer = common_layer();
-///
-/// let cloned_sync_layer = boxed_clone_sync_layer.clone();
-///
-/// // Using the `BoxCloneSyncServiceLayer` we can create a `BoxCloneSyncService`
-/// let service: BoxCloneSyncService<Request, Response, BoxError> = ServiceBuilder::new().layer(cloned_sync_layer)
-/// .service_fn(|req: Request| async {
-/// Ok::<_, BoxError>(Response::new())
-/// });
-///
-/// # let service = assert_service(service);
-///
-/// // And we can still clone the service
-/// let cloned_service = service.clone();
-/// #
-/// # fn assert_service<S, R>(svc: S) -> S
-/// # where S: Service<R> { svc }
-///
-/// ```
-///
-/// [`Layer`]: tower_layer::Layer
-/// [`Service`]: tower_service::Service
-/// [`BoxService`]: super::BoxService
-/// [`Timeout`]: crate::timeout
-pub struct BoxCloneSyncServiceLayer<In, T, U, E> {
- boxed: Arc<dyn Layer<In, Service = BoxCloneSyncService<T, U, E>> + Send + Sync + 'static>,
-}
-
-impl<In, T, U, E> BoxCloneSyncServiceLayer<In, T, U, E> {
- /// Create a new [`BoxCloneSyncServiceLayer`].
- pub fn new<L>(inner_layer: L) -> Self
- where
- L: Layer<In> + Send + Sync + 'static,
- L::Service: Service<T, Response = U, Error = E> + Send + Sync + Clone + 'static,
- <L::Service as Service<T>>::Future: Send + 'static,
- {
- let layer = layer_fn(move |inner: In| {
- let out = inner_layer.layer(inner);
- BoxCloneSyncService::new(out)
- });
-
- Self {
- boxed: Arc::new(layer),
- }
- }
-}
-
-impl<In, T, U, E> Layer<In> for BoxCloneSyncServiceLayer<In, T, U, E> {
- type Service = BoxCloneSyncService<T, U, E>;
-
- fn layer(&self, inner: In) -> Self::Service {
- self.boxed.layer(inner)
- }
-}
-
-impl<In, T, U, E> Clone for BoxCloneSyncServiceLayer<In, T, U, E> {
- fn clone(&self) -> Self {
- Self {
- boxed: Arc::clone(&self.boxed),
- }
- }
-}
-
-impl<In, T, U, E> fmt::Debug for BoxCloneSyncServiceLayer<In, T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("BoxCloneSyncServiceLayer").finish()
- }
-}
diff --git a/vendor/tower/src/util/boxed/mod.rs b/vendor/tower/src/util/boxed/mod.rs
deleted file mode 100644
index 7da5d63c..00000000
--- a/vendor/tower/src/util/boxed/mod.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-mod layer;
-mod layer_clone;
-mod layer_clone_sync;
-mod sync;
-mod unsync;
-
-#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
-pub use self::{
- layer::BoxLayer, layer_clone::BoxCloneServiceLayer, layer_clone_sync::BoxCloneSyncServiceLayer,
- sync::BoxService, unsync::UnsyncBoxService,
-};
diff --git a/vendor/tower/src/util/boxed/sync.rs b/vendor/tower/src/util/boxed/sync.rs
deleted file mode 100644
index 57dcfec7..00000000
--- a/vendor/tower/src/util/boxed/sync.rs
+++ /dev/null
@@ -1,111 +0,0 @@
-use crate::ServiceExt;
-use tower_layer::{layer_fn, LayerFn};
-use tower_service::Service;
-
-use sync_wrapper::SyncWrapper;
-
-use std::fmt;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-/// A boxed `Service + Send` trait object.
-///
-/// [`BoxService`] turns a service into a trait object, allowing the response
-/// future type to be dynamic. This type requires both the service and the
-/// response future to be [`Send`].
-///
-/// If you need a boxed [`Service`] that implements [`Clone`] consider using
-/// [`BoxCloneService`](crate::util::BoxCloneService).
-///
-/// Dynamically dispatched [`Service`] objects allow for erasing the underlying
-/// [`Service`] type and using the `Service` instances as opaque handles. This can
-/// be useful when the service instance cannot be explicitly named for whatever
-/// reason.
-///
-/// # Examples
-///
-/// ```
-/// use futures_util::future::ready;
-/// # use tower_service::Service;
-/// # use tower::util::{BoxService, service_fn};
-/// // Respond to requests using a closure, but closures cannot be named...
-/// # pub fn main() {
-/// let svc = service_fn(|mut request: String| {
-/// request.push_str(" response");
-/// ready(Ok(request))
-/// });
-///
-/// let service: BoxService<String, String, ()> = BoxService::new(svc);
-/// # drop(service);
-/// }
-/// ```
-///
-/// [`Service`]: crate::Service
-/// [`Rc`]: std::rc::Rc
-pub struct BoxService<T, U, E> {
- inner:
- SyncWrapper<Box<dyn Service<T, Response = U, Error = E, Future = BoxFuture<U, E>> + Send>>,
-}
-
-/// A boxed `Future + Send` trait object.
-///
-/// This type alias represents a boxed future that is [`Send`] and can be moved
-/// across threads.
-type BoxFuture<T, E> = Pin<Box<dyn Future<Output = Result<T, E>> + Send>>;
-
-impl<T, U, E> BoxService<T, U, E> {
- #[allow(missing_docs)]
- pub fn new<S>(inner: S) -> Self
- where
- S: Service<T, Response = U, Error = E> + Send + 'static,
- S::Future: Send + 'static,
- {
- // rust can't infer the type
- let inner: Box<dyn Service<T, Response = U, Error = E, Future = BoxFuture<U, E>> + Send> =
- Box::new(inner.map_future(|f: S::Future| Box::pin(f) as _));
- let inner = SyncWrapper::new(inner);
- BoxService { inner }
- }
-
- /// Returns a [`Layer`] for wrapping a [`Service`] in a [`BoxService`]
- /// middleware.
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer<S>() -> LayerFn<fn(S) -> Self>
- where
- S: Service<T, Response = U, Error = E> + Send + 'static,
- S::Future: Send + 'static,
- {
- layer_fn(Self::new)
- }
-}
-
-impl<T, U, E> Service<T> for BoxService<T, U, E> {
- type Response = U;
- type Error = E;
- type Future = BoxFuture<U, E>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
- self.inner.get_mut().poll_ready(cx)
- }
-
- fn call(&mut self, request: T) -> BoxFuture<U, E> {
- self.inner.get_mut().call(request)
- }
-}
-
-impl<T, U, E> fmt::Debug for BoxService<T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("BoxService").finish()
- }
-}
-
-#[test]
-fn is_sync() {
- fn assert_sync<T: Sync>() {}
-
- assert_sync::<BoxService<(), (), ()>>();
-}
diff --git a/vendor/tower/src/util/boxed/unsync.rs b/vendor/tower/src/util/boxed/unsync.rs
deleted file mode 100644
index f645f169..00000000
--- a/vendor/tower/src/util/boxed/unsync.rs
+++ /dev/null
@@ -1,86 +0,0 @@
-use tower_layer::{layer_fn, LayerFn};
-use tower_service::Service;
-
-use std::fmt;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-/// A boxed [`Service`] trait object.
-pub struct UnsyncBoxService<T, U, E> {
- inner: Box<dyn Service<T, Response = U, Error = E, Future = UnsyncBoxFuture<U, E>>>,
-}
-
-/// A boxed [`Future`] trait object.
-///
-/// This type alias represents a boxed future that is *not* [`Send`] and must
-/// remain on the current thread.
-type UnsyncBoxFuture<T, E> = Pin<Box<dyn Future<Output = Result<T, E>>>>;
-
-#[derive(Debug)]
-struct UnsyncBoxed<S> {
- inner: S,
-}
-
-impl<T, U, E> UnsyncBoxService<T, U, E> {
- #[allow(missing_docs)]
- pub fn new<S>(inner: S) -> Self
- where
- S: Service<T, Response = U, Error = E> + 'static,
- S::Future: 'static,
- {
- let inner = Box::new(UnsyncBoxed { inner });
- UnsyncBoxService { inner }
- }
-
- /// Returns a [`Layer`] for wrapping a [`Service`] in an [`UnsyncBoxService`] middleware.
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer<S>() -> LayerFn<fn(S) -> Self>
- where
- S: Service<T, Response = U, Error = E> + 'static,
- S::Future: 'static,
- {
- layer_fn(Self::new)
- }
-}
-
-impl<T, U, E> Service<T> for UnsyncBoxService<T, U, E> {
- type Response = U;
- type Error = E;
- type Future = UnsyncBoxFuture<U, E>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
- self.inner.poll_ready(cx)
- }
-
- fn call(&mut self, request: T) -> UnsyncBoxFuture<U, E> {
- self.inner.call(request)
- }
-}
-
-impl<T, U, E> fmt::Debug for UnsyncBoxService<T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("UnsyncBoxService").finish()
- }
-}
-
-impl<S, Request> Service<Request> for UnsyncBoxed<S>
-where
- S: Service<Request> + 'static,
- S::Future: 'static,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = Pin<Box<dyn Future<Output = Result<S::Response, S::Error>>>>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx)
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- Box::pin(self.inner.call(request))
- }
-}
diff --git a/vendor/tower/src/util/boxed_clone.rs b/vendor/tower/src/util/boxed_clone.rs
deleted file mode 100644
index 1209fd2e..00000000
--- a/vendor/tower/src/util/boxed_clone.rs
+++ /dev/null
@@ -1,136 +0,0 @@
-use super::ServiceExt;
-use futures_util::future::BoxFuture;
-use std::{
- fmt,
- task::{Context, Poll},
-};
-use tower_layer::{layer_fn, LayerFn};
-use tower_service::Service;
-
-/// A [`Clone`] + [`Send`] boxed [`Service`].
-///
-/// [`BoxCloneService`] turns a service into a trait object, allowing the
-/// response future type to be dynamic, and allowing the service to be cloned.
-///
-/// This is similar to [`BoxService`](super::BoxService) except the resulting
-/// service implements [`Clone`].
-///
-/// # Example
-///
-/// ```
-/// use tower::{Service, ServiceBuilder, BoxError, util::BoxCloneService};
-/// use std::time::Duration;
-/// #
-/// # struct Request;
-/// # struct Response;
-/// # impl Response {
-/// # fn new() -> Self { Self }
-/// # }
-///
-/// // This service has a complex type that is hard to name
-/// let service = ServiceBuilder::new()
-/// .map_request(|req| {
-/// println!("received request");
-/// req
-/// })
-/// .map_response(|res| {
-/// println!("response produced");
-/// res
-/// })
-/// .load_shed()
-/// .concurrency_limit(64)
-/// .timeout(Duration::from_secs(10))
-/// .service_fn(|req: Request| async {
-/// Ok::<_, BoxError>(Response::new())
-/// });
-/// # let service = assert_service(service);
-///
-/// // `BoxCloneService` will erase the type so it's nameable
-/// let service: BoxCloneService<Request, Response, BoxError> = BoxCloneService::new(service);
-/// # let service = assert_service(service);
-///
-/// // And we can still clone the service
-/// let cloned_service = service.clone();
-/// #
-/// # fn assert_service<S, R>(svc: S) -> S
-/// # where S: Service<R> { svc }
-/// ```
-pub struct BoxCloneService<T, U, E>(
- Box<
- dyn CloneService<T, Response = U, Error = E, Future = BoxFuture<'static, Result<U, E>>>
- + Send,
- >,
-);
-
-impl<T, U, E> BoxCloneService<T, U, E> {
- /// Create a new `BoxCloneService`.
- pub fn new<S>(inner: S) -> Self
- where
- S: Service<T, Response = U, Error = E> + Clone + Send + 'static,
- S::Future: Send + 'static,
- {
- let inner = inner.map_future(|f| Box::pin(f) as _);
- BoxCloneService(Box::new(inner))
- }
-
- /// Returns a [`Layer`] for wrapping a [`Service`] in a [`BoxCloneService`]
- /// middleware.
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer<S>() -> LayerFn<fn(S) -> Self>
- where
- S: Service<T, Response = U, Error = E> + Clone + Send + 'static,
- S::Future: Send + 'static,
- {
- layer_fn(Self::new)
- }
-}
-
-impl<T, U, E> Service<T> for BoxCloneService<T, U, E> {
- type Response = U;
- type Error = E;
- type Future = BoxFuture<'static, Result<U, E>>;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
- self.0.poll_ready(cx)
- }
-
- #[inline]
- fn call(&mut self, request: T) -> Self::Future {
- self.0.call(request)
- }
-}
-
-impl<T, U, E> Clone for BoxCloneService<T, U, E> {
- fn clone(&self) -> Self {
- Self(self.0.clone_box())
- }
-}
-
-trait CloneService<R>: Service<R> {
- fn clone_box(
- &self,
- ) -> Box<
- dyn CloneService<R, Response = Self::Response, Error = Self::Error, Future = Self::Future>
- + Send,
- >;
-}
-
-impl<R, T> CloneService<R> for T
-where
- T: Service<R> + Send + Clone + 'static,
-{
- fn clone_box(
- &self,
- ) -> Box<dyn CloneService<R, Response = T::Response, Error = T::Error, Future = T::Future> + Send>
- {
- Box::new(self.clone())
- }
-}
-
-impl<T, U, E> fmt::Debug for BoxCloneService<T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("BoxCloneService").finish()
- }
-}
diff --git a/vendor/tower/src/util/boxed_clone_sync.rs b/vendor/tower/src/util/boxed_clone_sync.rs
deleted file mode 100644
index d62e8ff2..00000000
--- a/vendor/tower/src/util/boxed_clone_sync.rs
+++ /dev/null
@@ -1,101 +0,0 @@
-use super::ServiceExt;
-use futures_util::future::BoxFuture;
-use std::{
- fmt,
- task::{Context, Poll},
-};
-use tower_layer::{layer_fn, LayerFn};
-use tower_service::Service;
-
-/// A [`Clone`] + [`Send`] + [`Sync`] boxed [`Service`].
-///
-/// [`BoxCloneSyncService`] turns a service into a trait object, allowing the
-/// response future type to be dynamic, and allowing the service to be cloned and shared.
-///
-/// This is similar to [`BoxCloneService`](super::BoxCloneService) except the resulting
-/// service implements [`Sync`].
-/// ```
-pub struct BoxCloneSyncService<T, U, E>(
- Box<
- dyn CloneService<T, Response = U, Error = E, Future = BoxFuture<'static, Result<U, E>>>
- + Send
- + Sync,
- >,
-);
-
-impl<T, U, E> BoxCloneSyncService<T, U, E> {
- /// Create a new `BoxCloneSyncService`.
- pub fn new<S>(inner: S) -> Self
- where
- S: Service<T, Response = U, Error = E> + Clone + Send + Sync + 'static,
- S::Future: Send + 'static,
- {
- let inner = inner.map_future(|f| Box::pin(f) as _);
- BoxCloneSyncService(Box::new(inner))
- }
-
- /// Returns a [`Layer`] for wrapping a [`Service`] in a [`BoxCloneSyncService`]
- /// middleware.
- ///
- /// [`Layer`]: crate::Layer
- pub fn layer<S>() -> LayerFn<fn(S) -> Self>
- where
- S: Service<T, Response = U, Error = E> + Clone + Send + Sync + 'static,
- S::Future: Send + 'static,
- {
- layer_fn(Self::new)
- }
-}
-
-impl<T, U, E> Service<T> for BoxCloneSyncService<T, U, E> {
- type Response = U;
- type Error = E;
- type Future = BoxFuture<'static, Result<U, E>>;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
- self.0.poll_ready(cx)
- }
-
- #[inline]
- fn call(&mut self, request: T) -> Self::Future {
- self.0.call(request)
- }
-}
-
-impl<T, U, E> Clone for BoxCloneSyncService<T, U, E> {
- fn clone(&self) -> Self {
- Self(self.0.clone_box())
- }
-}
-
-trait CloneService<R>: Service<R> {
- fn clone_box(
- &self,
- ) -> Box<
- dyn CloneService<R, Response = Self::Response, Error = Self::Error, Future = Self::Future>
- + Send
- + Sync,
- >;
-}
-
-impl<R, T> CloneService<R> for T
-where
- T: Service<R> + Send + Sync + Clone + 'static,
-{
- fn clone_box(
- &self,
- ) -> Box<
- dyn CloneService<R, Response = T::Response, Error = T::Error, Future = T::Future>
- + Send
- + Sync,
- > {
- Box::new(self.clone())
- }
-}
-
-impl<T, U, E> fmt::Debug for BoxCloneSyncService<T, U, E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_struct("BoxCloneSyncService").finish()
- }
-}
diff --git a/vendor/tower/src/util/call_all/common.rs b/vendor/tower/src/util/call_all/common.rs
deleted file mode 100644
index 9f2490b6..00000000
--- a/vendor/tower/src/util/call_all/common.rs
+++ /dev/null
@@ -1,141 +0,0 @@
-use futures_core::{ready, Stream};
-use pin_project_lite::pin_project;
-use std::{
- fmt,
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-pin_project! {
- /// The [`Future`] returned by the [`ServiceExt::call_all`] combinator.
- pub(crate) struct CallAll<Svc, S, Q>
- where
- S: Stream,
- {
- service: Option<Svc>,
- #[pin]
- stream: S,
- queue: Q,
- eof: bool,
- curr_req: Option<S::Item>
- }
-}
-
-impl<Svc, S, Q> fmt::Debug for CallAll<Svc, S, Q>
-where
- Svc: fmt::Debug,
- S: Stream + fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("CallAll")
- .field("service", &self.service)
- .field("stream", &self.stream)
- .field("eof", &self.eof)
- .finish()
- }
-}
-
-pub(crate) trait Drive<F: Future> {
- fn is_empty(&self) -> bool;
-
- fn push(&mut self, future: F);
-
- fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<F::Output>>;
-}
-
-impl<Svc, S, Q> CallAll<Svc, S, Q>
-where
- Svc: Service<S::Item>,
- S: Stream,
- Q: Drive<Svc::Future>,
-{
- pub(crate) const fn new(service: Svc, stream: S, queue: Q) -> CallAll<Svc, S, Q> {
- CallAll {
- service: Some(service),
- stream,
- queue,
- eof: false,
- curr_req: None,
- }
- }
-
- /// Extract the wrapped [`Service`].
- pub(crate) fn into_inner(mut self) -> Svc {
- self.service.take().expect("Service already taken")
- }
-
- /// Extract the wrapped [`Service`].
- pub(crate) fn take_service(self: Pin<&mut Self>) -> Svc {
- self.project()
- .service
- .take()
- .expect("Service already taken")
- }
-
- pub(crate) fn unordered(mut self) -> super::CallAllUnordered<Svc, S> {
- assert!(self.queue.is_empty() && !self.eof);
-
- super::CallAllUnordered::new(self.service.take().unwrap(), self.stream)
- }
-}
-
-impl<Svc, S, Q> Stream for CallAll<Svc, S, Q>
-where
- Svc: Service<S::Item>,
- S: Stream,
- Q: Drive<Svc::Future>,
-{
- type Item = Result<Svc::Response, Svc::Error>;
-
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- let mut this = self.project();
-
- loop {
- // First, see if we have any responses to yield
- if let Poll::Ready(r) = this.queue.poll(cx) {
- if let Some(rsp) = r.transpose()? {
- return Poll::Ready(Some(Ok(rsp)));
- }
- }
-
- // If there are no more requests coming, check if we're done
- if *this.eof {
- if this.queue.is_empty() {
- return Poll::Ready(None);
- } else {
- return Poll::Pending;
- }
- }
-
- // If not done, and we don't have a stored request, gather the next request from the
- // stream (if there is one), or return `Pending` if the stream is not ready.
- if this.curr_req.is_none() {
- *this.curr_req = match ready!(this.stream.as_mut().poll_next(cx)) {
- Some(next_req) => Some(next_req),
- None => {
- // Mark that there will be no more requests.
- *this.eof = true;
- continue;
- }
- };
- }
-
- // Then, see that the service is ready for another request
- let svc = this
- .service
- .as_mut()
- .expect("Using CallAll after extracting inner Service");
-
- if let Err(e) = ready!(svc.poll_ready(cx)) {
- // Set eof to prevent the service from being called again after a `poll_ready` error
- *this.eof = true;
- return Poll::Ready(Some(Err(e)));
- }
-
- // Unwrap: The check above always sets `this.curr_req` if none.
- this.queue.push(svc.call(this.curr_req.take().unwrap()));
- }
- }
-}
diff --git a/vendor/tower/src/util/call_all/mod.rs b/vendor/tower/src/util/call_all/mod.rs
deleted file mode 100644
index 0cac72d1..00000000
--- a/vendor/tower/src/util/call_all/mod.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-//! [`Stream<Item = Request>`][stream] + [`Service<Request>`] => [`Stream<Item = Response>`][stream].
-//!
-//! [`Service<Request>`]: crate::Service
-//! [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
-
-mod common;
-mod ordered;
-mod unordered;
-
-#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
-pub use self::{ordered::CallAll, unordered::CallAllUnordered};
diff --git a/vendor/tower/src/util/call_all/ordered.rs b/vendor/tower/src/util/call_all/ordered.rs
deleted file mode 100644
index 9a283916..00000000
--- a/vendor/tower/src/util/call_all/ordered.rs
+++ /dev/null
@@ -1,177 +0,0 @@
-//! [`Stream<Item = Request>`][stream] + [`Service<Request>`] => [`Stream<Item = Response>`][stream].
-//!
-//! [`Service<Request>`]: crate::Service
-//! [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
-
-use super::common;
-use futures_core::Stream;
-use futures_util::stream::FuturesOrdered;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-pin_project! {
- /// This is a [`Stream`] of responses resulting from calling the wrapped [`Service`] for each
- /// request received on the wrapped [`Stream`].
- ///
- /// ```rust
- /// # use std::task::{Poll, Context};
- /// # use std::cell::Cell;
- /// # use std::error::Error;
- /// # use std::rc::Rc;
- /// #
- /// use futures::future::{ready, Ready};
- /// use futures::StreamExt;
- /// use futures::channel::mpsc;
- /// use tower_service::Service;
- /// use tower::util::ServiceExt;
- ///
- /// // First, we need to have a Service to process our requests.
- /// #[derive(Debug, Eq, PartialEq)]
- /// struct FirstLetter;
- /// impl Service<&'static str> for FirstLetter {
- /// type Response = &'static str;
- /// type Error = Box<dyn Error + Send + Sync>;
- /// type Future = Ready<Result<Self::Response, Self::Error>>;
- ///
- /// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// Poll::Ready(Ok(()))
- /// }
- ///
- /// fn call(&mut self, req: &'static str) -> Self::Future {
- /// ready(Ok(&req[..1]))
- /// }
- /// }
- ///
- /// #[tokio::main]
- /// async fn main() {
- /// // Next, we need a Stream of requests.
- // TODO(eliza): when `tokio-util` has a nice way to convert MPSCs to streams,
- // tokio::sync::mpsc again?
- /// let (mut reqs, rx) = mpsc::unbounded();
- /// // Note that we have to help Rust out here by telling it what error type to use.
- /// // Specifically, it has to be From<Service::Error> + From<Stream::Error>.
- /// let mut rsps = FirstLetter.call_all(rx);
- ///
- /// // Now, let's send a few requests and then check that we get the corresponding responses.
- /// reqs.unbounded_send("one").unwrap();
- /// reqs.unbounded_send("two").unwrap();
- /// reqs.unbounded_send("three").unwrap();
- /// drop(reqs);
- ///
- /// // We then loop over the response `Stream` that we get back from call_all.
- /// let mut i = 0usize;
- /// while let Some(rsp) = rsps.next().await {
- /// // Each response is a Result (we could also have used TryStream::try_next)
- /// match (i + 1, rsp.unwrap()) {
- /// (1, "o") |
- /// (2, "t") |
- /// (3, "t") => {}
- /// (n, i) => {
- /// unreachable!("{}. response was '{}'", n, i);
- /// }
- /// }
- /// i += 1;
- /// }
- ///
- /// // And at the end, we can get the Service back when there are no more requests.
- /// assert_eq!(rsps.into_inner(), FirstLetter);
- /// }
- /// ```
- ///
- /// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
- #[derive(Debug)]
- pub struct CallAll<Svc, S>
- where
- Svc: Service<S::Item>,
- S: Stream,
- {
- #[pin]
- inner: common::CallAll<Svc, S, FuturesOrdered<Svc::Future>>,
- }
-}
-
-impl<Svc, S> CallAll<Svc, S>
-where
- Svc: Service<S::Item>,
- S: Stream,
-{
- /// Create new [`CallAll`] combinator.
- ///
- /// Each request yielded by `stream` is passed to `svc`, and the resulting responses are
- /// yielded in the same order by the implementation of [`Stream`] for [`CallAll`].
- ///
- /// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
- pub fn new(service: Svc, stream: S) -> CallAll<Svc, S> {
- CallAll {
- inner: common::CallAll::new(service, stream, FuturesOrdered::new()),
- }
- }
-
- /// Extract the wrapped [`Service`].
- ///
- /// # Panics
- ///
- /// Panics if [`take_service`] was already called.
- ///
- /// [`take_service`]: crate::util::CallAll::take_service
- pub fn into_inner(self) -> Svc {
- self.inner.into_inner()
- }
-
- /// Extract the wrapped [`Service`].
- ///
- /// This [`CallAll`] can no longer be used after this function has been called.
- ///
- /// # Panics
- ///
- /// Panics if [`take_service`] was already called.
- ///
- /// [`take_service`]: crate::util::CallAll::take_service
- pub fn take_service(self: Pin<&mut Self>) -> Svc {
- self.project().inner.take_service()
- }
-
- /// Return responses as they are ready, regardless of the initial order.
- ///
- /// This function must be called before the stream is polled.
- ///
- /// # Panics
- ///
- /// Panics if [`poll`] was called.
- ///
- /// [`poll`]: std::future::Future::poll
- pub fn unordered(self) -> super::CallAllUnordered<Svc, S> {
- self.inner.unordered()
- }
-}
-
-impl<Svc, S> Stream for CallAll<Svc, S>
-where
- Svc: Service<S::Item>,
- S: Stream,
-{
- type Item = Result<Svc::Response, Svc::Error>;
-
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- self.project().inner.poll_next(cx)
- }
-}
-
-impl<F: Future> common::Drive<F> for FuturesOrdered<F> {
- fn is_empty(&self) -> bool {
- FuturesOrdered::is_empty(self)
- }
-
- fn push(&mut self, future: F) {
- FuturesOrdered::push_back(self, future)
- }
-
- fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<F::Output>> {
- Stream::poll_next(Pin::new(self), cx)
- }
-}
diff --git a/vendor/tower/src/util/call_all/unordered.rs b/vendor/tower/src/util/call_all/unordered.rs
deleted file mode 100644
index 3038932f..00000000
--- a/vendor/tower/src/util/call_all/unordered.rs
+++ /dev/null
@@ -1,98 +0,0 @@
-//! [`Stream<Item = Request>`][stream] + [`Service<Request>`] => [`Stream<Item = Response>`][stream].
-//!
-//! [`Service<Request>`]: crate::Service
-//! [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
-
-use super::common;
-use futures_core::Stream;
-use futures_util::stream::FuturesUnordered;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-pin_project! {
- /// A stream of responses received from the inner service in received order.
- ///
- /// Similar to [`CallAll`] except, instead of yielding responses in request order,
- /// responses are returned as they are available.
- ///
- /// [`CallAll`]: crate::util::CallAll
- #[derive(Debug)]
- pub struct CallAllUnordered<Svc, S>
- where
- Svc: Service<S::Item>,
- S: Stream,
- {
- #[pin]
- inner: common::CallAll<Svc, S, FuturesUnordered<Svc::Future>>,
- }
-}
-
-impl<Svc, S> CallAllUnordered<Svc, S>
-where
- Svc: Service<S::Item>,
- S: Stream,
-{
- /// Create new [`CallAllUnordered`] combinator.
- ///
- /// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
- pub fn new(service: Svc, stream: S) -> CallAllUnordered<Svc, S> {
- CallAllUnordered {
- inner: common::CallAll::new(service, stream, FuturesUnordered::new()),
- }
- }
-
- /// Extract the wrapped [`Service`].
- ///
- /// # Panics
- ///
- /// Panics if [`take_service`] was already called.
- ///
- /// [`take_service`]: crate::util::CallAllUnordered::take_service
- pub fn into_inner(self) -> Svc {
- self.inner.into_inner()
- }
-
- /// Extract the wrapped `Service`.
- ///
- /// This [`CallAllUnordered`] can no longer be used after this function has been called.
- ///
- /// # Panics
- ///
- /// Panics if [`take_service`] was already called.
- ///
- /// [`take_service`]: crate::util::CallAllUnordered::take_service
- pub fn take_service(self: Pin<&mut Self>) -> Svc {
- self.project().inner.take_service()
- }
-}
-
-impl<Svc, S> Stream for CallAllUnordered<Svc, S>
-where
- Svc: Service<S::Item>,
- S: Stream,
-{
- type Item = Result<Svc::Response, Svc::Error>;
-
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- self.project().inner.poll_next(cx)
- }
-}
-
-impl<F: Future> common::Drive<F> for FuturesUnordered<F> {
- fn is_empty(&self) -> bool {
- FuturesUnordered::is_empty(self)
- }
-
- fn push(&mut self, future: F) {
- FuturesUnordered::push(self, future)
- }
-
- fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<F::Output>> {
- Stream::poll_next(Pin::new(self), cx)
- }
-}
diff --git a/vendor/tower/src/util/either.rs b/vendor/tower/src/util/either.rs
deleted file mode 100644
index 371abb4d..00000000
--- a/vendor/tower/src/util/either.rs
+++ /dev/null
@@ -1,103 +0,0 @@
-//! Contains [`Either`] and related types and functions.
-//!
-//! See [`Either`] documentation for more details.
-
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Combine two different service types into a single type.
-///
-/// Both services must be of the same request, response, and error types.
-/// [`Either`] is useful for handling conditional branching in service middleware
-/// to different inner service types.
-#[derive(Clone, Copy, Debug)]
-pub enum Either<A, B> {
- #[allow(missing_docs)]
- Left(A),
- #[allow(missing_docs)]
- Right(B),
-}
-
-impl<A, B, Request> Service<Request> for Either<A, B>
-where
- A: Service<Request>,
- B: Service<Request, Response = A::Response, Error = A::Error>,
-{
- type Response = A::Response;
- type Error = A::Error;
- type Future = EitherResponseFuture<A::Future, B::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- match self {
- Either::Left(service) => service.poll_ready(cx),
- Either::Right(service) => service.poll_ready(cx),
- }
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- match self {
- Either::Left(service) => EitherResponseFuture {
- kind: Kind::Left {
- inner: service.call(request),
- },
- },
- Either::Right(service) => EitherResponseFuture {
- kind: Kind::Right {
- inner: service.call(request),
- },
- },
- }
- }
-}
-
-pin_project! {
- /// Response future for [`Either`].
- pub struct EitherResponseFuture<A, B> {
- #[pin]
- kind: Kind<A, B>
- }
-}
-
-pin_project! {
- #[project = KindProj]
- enum Kind<A, B> {
- Left { #[pin] inner: A },
- Right { #[pin] inner: B },
- }
-}
-
-impl<A, B> Future for EitherResponseFuture<A, B>
-where
- A: Future,
- B: Future<Output = A::Output>,
-{
- type Output = A::Output;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- match self.project().kind.project() {
- KindProj::Left { inner } => inner.poll(cx),
- KindProj::Right { inner } => inner.poll(cx),
- }
- }
-}
-
-impl<S, A, B> Layer<S> for Either<A, B>
-where
- A: Layer<S>,
- B: Layer<S>,
-{
- type Service = Either<A::Service, B::Service>;
-
- fn layer(&self, inner: S) -> Self::Service {
- match self {
- Either::Left(layer) => Either::Left(layer.layer(inner)),
- Either::Right(layer) => Either::Right(layer.layer(inner)),
- }
- }
-}
diff --git a/vendor/tower/src/util/future_service.rs b/vendor/tower/src/util/future_service.rs
deleted file mode 100644
index c0a36df2..00000000
--- a/vendor/tower/src/util/future_service.rs
+++ /dev/null
@@ -1,215 +0,0 @@
-use std::fmt;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-/// Returns a new [`FutureService`] for the given future.
-///
-/// A [`FutureService`] allows you to treat a future that resolves to a service as a service. This
-/// can be useful for services that are created asynchronously.
-///
-/// # Example
-/// ```
-/// use tower::{service_fn, Service, ServiceExt};
-/// use tower::util::future_service;
-/// use std::convert::Infallible;
-///
-/// # fn main() {
-/// # async {
-/// // A future which outputs a type implementing `Service`.
-/// let future_of_a_service = async {
-/// let svc = service_fn(|_req: ()| async { Ok::<_, Infallible>("ok") });
-/// Ok::<_, Infallible>(svc)
-/// };
-///
-/// // Wrap the future with a `FutureService`, allowing it to be used
-/// // as a service without awaiting the future's completion:
-/// let mut svc = future_service(Box::pin(future_of_a_service));
-///
-/// // Now, when we wait for the service to become ready, it will
-/// // drive the future to completion internally.
-/// let svc = svc.ready().await.unwrap();
-/// let res = svc.call(()).await.unwrap();
-/// # };
-/// # }
-/// ```
-///
-/// # Regarding the [`Unpin`] bound
-///
-/// The [`Unpin`] bound on `F` is necessary because the future will be polled in
-/// [`Service::poll_ready`] which doesn't have a pinned receiver (it takes `&mut self` and not `self:
-/// Pin<&mut Self>`). So we cannot put the future into a `Pin` without requiring `Unpin`.
-///
-/// This will most likely come up if you're calling `future_service` with an async block. In that
-/// case you can use `Box::pin(async { ... })` as shown in the example.
-pub fn future_service<F, S, R, E>(future: F) -> FutureService<F, S>
-where
- F: Future<Output = Result<S, E>> + Unpin,
- S: Service<R, Error = E>,
-{
- FutureService::new(future)
-}
-
-/// A type that implements [`Service`] for a [`Future`] that produces a [`Service`].
-///
-/// See [`future_service`] for more details.
-#[derive(Clone)]
-pub struct FutureService<F, S> {
- state: State<F, S>,
-}
-
-impl<F, S> FutureService<F, S> {
- /// Returns a new [`FutureService`] for the given future.
- ///
- /// A [`FutureService`] allows you to treat a future that resolves to a service as a service. This
- /// can be useful for services that are created asynchronously.
- ///
- /// # Example
- /// ```
- /// use tower::{service_fn, Service, ServiceExt};
- /// use tower::util::FutureService;
- /// use std::convert::Infallible;
- ///
- /// # fn main() {
- /// # async {
- /// // A future which outputs a type implementing `Service`.
- /// let future_of_a_service = async {
- /// let svc = service_fn(|_req: ()| async { Ok::<_, Infallible>("ok") });
- /// Ok::<_, Infallible>(svc)
- /// };
- ///
- /// // Wrap the future with a `FutureService`, allowing it to be used
- /// // as a service without awaiting the future's completion:
- /// let mut svc = FutureService::new(Box::pin(future_of_a_service));
- ///
- /// // Now, when we wait for the service to become ready, it will
- /// // drive the future to completion internally.
- /// let svc = svc.ready().await.unwrap();
- /// let res = svc.call(()).await.unwrap();
- /// # };
- /// # }
- /// ```
- ///
- /// # Regarding the [`Unpin`] bound
- ///
- /// The [`Unpin`] bound on `F` is necessary because the future will be polled in
- /// [`Service::poll_ready`] which doesn't have a pinned receiver (it takes `&mut self` and not `self:
- /// Pin<&mut Self>`). So we cannot put the future into a `Pin` without requiring `Unpin`.
- ///
- /// This will most likely come up if you're calling `future_service` with an async block. In that
- /// case you can use `Box::pin(async { ... })` as shown in the example.
- pub const fn new(future: F) -> Self {
- Self {
- state: State::Future(future),
- }
- }
-}
-
-impl<F, S> fmt::Debug for FutureService<F, S>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("FutureService")
- .field("state", &format_args!("{:?}", self.state))
- .finish()
- }
-}
-
-#[derive(Clone)]
-enum State<F, S> {
- Future(F),
- Service(S),
-}
-
-impl<F, S> fmt::Debug for State<F, S>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- State::Future(_) => f
- .debug_tuple("State::Future")
- .field(&format_args!("<{}>", std::any::type_name::<F>()))
- .finish(),
- State::Service(svc) => f.debug_tuple("State::Service").field(svc).finish(),
- }
- }
-}
-
-impl<F, S, R, E> Service<R> for FutureService<F, S>
-where
- F: Future<Output = Result<S, E>> + Unpin,
- S: Service<R, Error = E>,
-{
- type Response = S::Response;
- type Error = E;
- type Future = S::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- loop {
- self.state = match &mut self.state {
- State::Future(fut) => {
- let fut = Pin::new(fut);
- let svc = futures_core::ready!(fut.poll(cx)?);
- State::Service(svc)
- }
- State::Service(svc) => return svc.poll_ready(cx),
- };
- }
- }
-
- fn call(&mut self, req: R) -> Self::Future {
- if let State::Service(svc) = &mut self.state {
- svc.call(req)
- } else {
- panic!("FutureService::call was called before FutureService::poll_ready")
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::util::{future_service, ServiceExt};
- use crate::Service;
- use futures::future::{ready, Ready};
- use std::convert::Infallible;
-
- #[tokio::test]
- async fn pending_service_debug_impl() {
- let mut pending_svc = future_service(ready(Ok(DebugService)));
-
- assert_eq!(
- format!("{:?}", pending_svc),
- "FutureService { state: State::Future(<futures_util::future::ready::Ready<core::result::Result<tower::util::future_service::tests::DebugService, core::convert::Infallible>>>) }"
- );
-
- pending_svc.ready().await.unwrap();
-
- assert_eq!(
- format!("{:?}", pending_svc),
- "FutureService { state: State::Service(DebugService) }"
- );
- }
-
- #[derive(Debug)]
- struct DebugService;
-
- impl Service<()> for DebugService {
- type Response = ();
- type Error = Infallible;
- type Future = Ready<Result<Self::Response, Self::Error>>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- Ok(()).into()
- }
-
- fn call(&mut self, _req: ()) -> Self::Future {
- ready(Ok(()))
- }
- }
-}
diff --git a/vendor/tower/src/util/map_err.rs b/vendor/tower/src/util/map_err.rs
deleted file mode 100644
index 1b936acb..00000000
--- a/vendor/tower/src/util/map_err.rs
+++ /dev/null
@@ -1,98 +0,0 @@
-use futures_util::{future, TryFutureExt};
-use std::fmt;
-use std::task::{Context, Poll};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Service returned by the [`map_err`] combinator.
-///
-/// [`map_err`]: crate::util::ServiceExt::map_err
-#[derive(Clone)]
-pub struct MapErr<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> fmt::Debug for MapErr<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MapErr")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-/// A [`Layer`] that produces [`MapErr`] services.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Clone, Debug)]
-pub struct MapErrLayer<F> {
- f: F,
-}
-
-opaque_future! {
- /// Response future from [`MapErr`] services.
- ///
- /// [`MapErr`]: crate::util::MapErr
- pub type MapErrFuture<F, N> = future::MapErr<F, N>;
-}
-
-impl<S, F> MapErr<S, F> {
- /// Creates a new [`MapErr`] service.
- pub const fn new(inner: S, f: F) -> Self {
- MapErr { f, inner }
- }
-
- /// Returns a new [`Layer`] that produces [`MapErr`] services.
- ///
- /// This is a convenience function that simply calls [`MapErrLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> MapErrLayer<F> {
- MapErrLayer { f }
- }
-}
-
-impl<S, F, Request, Error> Service<Request> for MapErr<S, F>
-where
- S: Service<Request>,
- F: FnOnce(S::Error) -> Error + Clone,
-{
- type Response = S::Response;
- type Error = Error;
- type Future = MapErrFuture<S::Future, F>;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(self.f.clone())
- }
-
- #[inline]
- fn call(&mut self, request: Request) -> Self::Future {
- MapErrFuture::new(self.inner.call(request).map_err(self.f.clone()))
- }
-}
-
-impl<F> MapErrLayer<F> {
- /// Creates a new [`MapErrLayer`].
- pub const fn new(f: F) -> Self {
- MapErrLayer { f }
- }
-}
-
-impl<S, F> Layer<S> for MapErrLayer<F>
-where
- F: Clone,
-{
- type Service = MapErr<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- MapErr {
- f: self.f.clone(),
- inner,
- }
- }
-}
diff --git a/vendor/tower/src/util/map_future.rs b/vendor/tower/src/util/map_future.rs
deleted file mode 100644
index 55bf96d0..00000000
--- a/vendor/tower/src/util/map_future.rs
+++ /dev/null
@@ -1,113 +0,0 @@
-use std::{
- fmt,
- future::Future,
- task::{Context, Poll},
-};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// [`Service`] returned by the [`map_future`] combinator.
-///
-/// [`map_future`]: crate::util::ServiceExt::map_future
-#[derive(Clone)]
-pub struct MapFuture<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> MapFuture<S, F> {
- /// Creates a new [`MapFuture`] service.
- pub const fn new(inner: S, f: F) -> Self {
- Self { inner, f }
- }
-
- /// Returns a new [`Layer`] that produces [`MapFuture`] services.
- ///
- /// This is a convenience function that simply calls [`MapFutureLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> MapFutureLayer<F> {
- MapFutureLayer::new(f)
- }
-
- /// Get a reference to the inner service
- pub fn get_ref(&self) -> &S {
- &self.inner
- }
-
- /// Get a mutable reference to the inner service
- pub fn get_mut(&mut self) -> &mut S {
- &mut self.inner
- }
-
- /// Consume `self`, returning the inner service
- pub fn into_inner(self) -> S {
- self.inner
- }
-}
-
-impl<R, S, F, T, E, Fut> Service<R> for MapFuture<S, F>
-where
- S: Service<R>,
- F: FnMut(S::Future) -> Fut,
- E: From<S::Error>,
- Fut: Future<Output = Result<T, E>>,
-{
- type Response = T;
- type Error = E;
- type Future = Fut;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(From::from)
- }
-
- fn call(&mut self, req: R) -> Self::Future {
- (self.f)(self.inner.call(req))
- }
-}
-
-impl<S, F> fmt::Debug for MapFuture<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MapFuture")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-/// A [`Layer`] that produces a [`MapFuture`] service.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Clone)]
-pub struct MapFutureLayer<F> {
- f: F,
-}
-
-impl<F> MapFutureLayer<F> {
- /// Creates a new [`MapFutureLayer`] layer.
- pub const fn new(f: F) -> Self {
- Self { f }
- }
-}
-
-impl<S, F> Layer<S> for MapFutureLayer<F>
-where
- F: Clone,
-{
- type Service = MapFuture<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- MapFuture::new(inner, self.f.clone())
- }
-}
-
-impl<F> fmt::Debug for MapFutureLayer<F> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MapFutureLayer")
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
diff --git a/vendor/tower/src/util/map_request.rs b/vendor/tower/src/util/map_request.rs
deleted file mode 100644
index 62f2de3c..00000000
--- a/vendor/tower/src/util/map_request.rs
+++ /dev/null
@@ -1,90 +0,0 @@
-use std::fmt;
-use std::task::{Context, Poll};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Service returned by the [`MapRequest`] combinator.
-///
-/// [`MapRequest`]: crate::util::ServiceExt::map_request
-#[derive(Clone)]
-pub struct MapRequest<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> fmt::Debug for MapRequest<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MapRequest")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-impl<S, F> MapRequest<S, F> {
- /// Creates a new [`MapRequest`] service.
- pub const fn new(inner: S, f: F) -> Self {
- MapRequest { inner, f }
- }
-
- /// Returns a new [`Layer`] that produces [`MapRequest`] services.
- ///
- /// This is a convenience function that simply calls [`MapRequestLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> MapRequestLayer<F> {
- MapRequestLayer { f }
- }
-}
-
-impl<S, F, R1, R2> Service<R1> for MapRequest<S, F>
-where
- S: Service<R2>,
- F: FnMut(R1) -> R2,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = S::Future;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
- self.inner.poll_ready(cx)
- }
-
- #[inline]
- fn call(&mut self, request: R1) -> S::Future {
- self.inner.call((self.f)(request))
- }
-}
-
-/// A [`Layer`] that produces [`MapRequest`] services.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Clone, Debug)]
-pub struct MapRequestLayer<F> {
- f: F,
-}
-
-impl<F> MapRequestLayer<F> {
- /// Creates a new [`MapRequestLayer`].
- pub const fn new(f: F) -> Self {
- MapRequestLayer { f }
- }
-}
-
-impl<S, F> Layer<S> for MapRequestLayer<F>
-where
- F: Clone,
-{
- type Service = MapRequest<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- MapRequest {
- f: self.f.clone(),
- inner,
- }
- }
-}
diff --git a/vendor/tower/src/util/map_response.rs b/vendor/tower/src/util/map_response.rs
deleted file mode 100644
index 8edac10a..00000000
--- a/vendor/tower/src/util/map_response.rs
+++ /dev/null
@@ -1,98 +0,0 @@
-use futures_util::{future::MapOk, TryFutureExt};
-use std::fmt;
-use std::task::{Context, Poll};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Service returned by the [`map_response`] combinator.
-///
-/// [`map_response`]: crate::util::ServiceExt::map_response
-#[derive(Clone)]
-pub struct MapResponse<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> fmt::Debug for MapResponse<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MapResponse")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-/// A [`Layer`] that produces a [`MapResponse`] service.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Debug, Clone)]
-pub struct MapResponseLayer<F> {
- f: F,
-}
-
-opaque_future! {
- /// Response future from [`MapResponse`] services.
- ///
- /// [`MapResponse`]: crate::util::MapResponse
- pub type MapResponseFuture<F, N> = MapOk<F, N>;
-}
-
-impl<S, F> MapResponse<S, F> {
- /// Creates a new `MapResponse` service.
- pub const fn new(inner: S, f: F) -> Self {
- MapResponse { f, inner }
- }
-
- /// Returns a new [`Layer`] that produces [`MapResponse`] services.
- ///
- /// This is a convenience function that simply calls [`MapResponseLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> MapResponseLayer<F> {
- MapResponseLayer { f }
- }
-}
-
-impl<S, F, Request, Response> Service<Request> for MapResponse<S, F>
-where
- S: Service<Request>,
- F: FnOnce(S::Response) -> Response + Clone,
-{
- type Response = Response;
- type Error = S::Error;
- type Future = MapResponseFuture<S::Future, F>;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx)
- }
-
- #[inline]
- fn call(&mut self, request: Request) -> Self::Future {
- MapResponseFuture::new(self.inner.call(request).map_ok(self.f.clone()))
- }
-}
-
-impl<F> MapResponseLayer<F> {
- /// Creates a new [`MapResponseLayer`] layer.
- pub const fn new(f: F) -> Self {
- MapResponseLayer { f }
- }
-}
-
-impl<S, F> Layer<S> for MapResponseLayer<F>
-where
- F: Clone,
-{
- type Service = MapResponse<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- MapResponse {
- f: self.f.clone(),
- inner,
- }
- }
-}
diff --git a/vendor/tower/src/util/map_result.rs b/vendor/tower/src/util/map_result.rs
deleted file mode 100644
index 5a96af2d..00000000
--- a/vendor/tower/src/util/map_result.rs
+++ /dev/null
@@ -1,99 +0,0 @@
-use futures_util::{future::Map, FutureExt};
-use std::fmt;
-use std::task::{Context, Poll};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// Service returned by the [`map_result`] combinator.
-///
-/// [`map_result`]: crate::util::ServiceExt::map_result
-#[derive(Clone)]
-pub struct MapResult<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> fmt::Debug for MapResult<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MapResult")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-/// A [`Layer`] that produces a [`MapResult`] service.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Debug, Clone)]
-pub struct MapResultLayer<F> {
- f: F,
-}
-
-opaque_future! {
- /// Response future from [`MapResult`] services.
- ///
- /// [`MapResult`]: crate::util::MapResult
- pub type MapResultFuture<F, N> = Map<F, N>;
-}
-
-impl<S, F> MapResult<S, F> {
- /// Creates a new [`MapResult`] service.
- pub const fn new(inner: S, f: F) -> Self {
- MapResult { f, inner }
- }
-
- /// Returns a new [`Layer`] that produces [`MapResult`] services.
- ///
- /// This is a convenience function that simply calls [`MapResultLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> MapResultLayer<F> {
- MapResultLayer { f }
- }
-}
-
-impl<S, F, Request, Response, Error> Service<Request> for MapResult<S, F>
-where
- S: Service<Request>,
- Error: From<S::Error>,
- F: FnOnce(Result<S::Response, S::Error>) -> Result<Response, Error> + Clone,
-{
- type Response = Response;
- type Error = Error;
- type Future = MapResultFuture<S::Future, F>;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(Into::into)
- }
-
- #[inline]
- fn call(&mut self, request: Request) -> Self::Future {
- MapResultFuture::new(self.inner.call(request).map(self.f.clone()))
- }
-}
-
-impl<F> MapResultLayer<F> {
- /// Creates a new [`MapResultLayer`] layer.
- pub const fn new(f: F) -> Self {
- MapResultLayer { f }
- }
-}
-
-impl<S, F> Layer<S> for MapResultLayer<F>
-where
- F: Clone,
-{
- type Service = MapResult<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- MapResult {
- f: self.f.clone(),
- inner,
- }
- }
-}
diff --git a/vendor/tower/src/util/mod.rs b/vendor/tower/src/util/mod.rs
deleted file mode 100644
index 4c56de81..00000000
--- a/vendor/tower/src/util/mod.rs
+++ /dev/null
@@ -1,1073 +0,0 @@
-//! Various utility types and functions that are generally used with Tower.
-
-mod and_then;
-mod boxed;
-mod boxed_clone;
-mod boxed_clone_sync;
-mod call_all;
-mod either;
-
-mod future_service;
-mod map_err;
-mod map_request;
-mod map_response;
-mod map_result;
-
-mod map_future;
-mod oneshot;
-mod optional;
-mod ready;
-mod service_fn;
-mod then;
-
-pub mod rng;
-
-pub use self::{
- and_then::{AndThen, AndThenLayer},
- boxed::{
- BoxCloneServiceLayer, BoxCloneSyncServiceLayer, BoxLayer, BoxService, UnsyncBoxService,
- },
- boxed_clone::BoxCloneService,
- boxed_clone_sync::BoxCloneSyncService,
- either::Either,
- future_service::{future_service, FutureService},
- map_err::{MapErr, MapErrLayer},
- map_future::{MapFuture, MapFutureLayer},
- map_request::{MapRequest, MapRequestLayer},
- map_response::{MapResponse, MapResponseLayer},
- map_result::{MapResult, MapResultLayer},
- oneshot::Oneshot,
- optional::Optional,
- ready::{Ready, ReadyOneshot},
- service_fn::{service_fn, ServiceFn},
- then::{Then, ThenLayer},
-};
-
-pub use self::call_all::{CallAll, CallAllUnordered};
-use std::future::Future;
-
-use crate::layer::util::Identity;
-
-pub mod error {
- //! Error types
-
- pub use super::optional::error as optional;
-}
-
-pub mod future {
- //! Future types
-
- pub use super::and_then::AndThenFuture;
- pub use super::either::EitherResponseFuture;
- pub use super::map_err::MapErrFuture;
- pub use super::map_response::MapResponseFuture;
- pub use super::map_result::MapResultFuture;
- pub use super::optional::future as optional;
- pub use super::then::ThenFuture;
-}
-
-/// An extension trait for `Service`s that provides a variety of convenient
-/// adapters
-pub trait ServiceExt<Request>: tower_service::Service<Request> {
- /// Yields a mutable reference to the service when it is ready to accept a request.
- fn ready(&mut self) -> Ready<'_, Self, Request>
- where
- Self: Sized,
- {
- Ready::new(self)
- }
-
- /// Yields the service when it is ready to accept a request.
- fn ready_oneshot(self) -> ReadyOneshot<Self, Request>
- where
- Self: Sized,
- {
- ReadyOneshot::new(self)
- }
-
- /// Consume this `Service`, calling it with the provided request once it is ready.
- fn oneshot(self, req: Request) -> Oneshot<Self, Request>
- where
- Self: Sized,
- {
- Oneshot::new(self, req)
- }
-
- /// Process all requests from the given [`Stream`], and produce a [`Stream`] of their responses.
- ///
- /// This is essentially [`Stream<Item = Request>`][stream] + `Self` => [`Stream<Item =
- /// Response>`][stream]. See the documentation for [`CallAll`] for
- /// details.
- ///
- /// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
- /// [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
- fn call_all<S>(self, reqs: S) -> CallAll<Self, S>
- where
- Self: Sized,
- S: futures_core::Stream<Item = Request>,
- {
- CallAll::new(self, reqs)
- }
-
- /// Executes a new future after this service's future resolves. This does
- /// not alter the behaviour of the [`poll_ready`] method.
- ///
- /// This method can be used to change the [`Response`] type of the service
- /// into a different type. You can use this method to chain along a computation once the
- /// service's response has been resolved.
- ///
- /// [`Response`]: crate::Service::Response
- /// [`poll_ready`]: crate::Service::poll_ready
- ///
- /// # Example
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # struct Record {
- /// # pub name: String,
- /// # pub age: u16
- /// # }
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = Record;
- /// # type Error = u8;
- /// # type Future = futures_util::future::Ready<Result<Record, u8>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(Record { name: "Jack".into(), age: 32 }))
- /// # }
- /// # }
- /// #
- /// # async fn avatar_lookup(name: String) -> Result<Vec<u8>, u8> { Ok(vec![]) }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service returning Result<Record, _>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // Map the response into a new response
- /// let mut new_service = service.and_then(|record: Record| async move {
- /// let name = record.name;
- /// avatar_lookup(name).await
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// let avatar = new_service.call(id).await.unwrap();
- /// # };
- /// # }
- /// ```
- fn and_then<F>(self, f: F) -> AndThen<Self, F>
- where
- Self: Sized,
- F: Clone,
- {
- AndThen::new(self, f)
- }
-
- /// Maps this service's response value to a different value. This does not
- /// alter the behaviour of the [`poll_ready`] method.
- ///
- /// This method can be used to change the [`Response`] type of the service
- /// into a different type. It is similar to the [`Result::map`]
- /// method. You can use this method to chain along a computation once the
- /// service's response has been resolved.
- ///
- /// [`Response`]: crate::Service::Response
- /// [`poll_ready`]: crate::Service::poll_ready
- ///
- /// # Example
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # struct Record {
- /// # pub name: String,
- /// # pub age: u16
- /// # }
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = Record;
- /// # type Error = u8;
- /// # type Future = futures_util::future::Ready<Result<Record, u8>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(Record { name: "Jack".into(), age: 32 }))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service returning Result<Record, _>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // Map the response into a new response
- /// let mut new_service = service.map_response(|record| record.name);
- ///
- /// // Call the new service
- /// let id = 13;
- /// let name = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await?;
- /// # Ok::<(), u8>(())
- /// # };
- /// # }
- /// ```
- fn map_response<F, Response>(self, f: F) -> MapResponse<Self, F>
- where
- Self: Sized,
- F: FnOnce(Self::Response) -> Response + Clone,
- {
- MapResponse::new(self, f)
- }
-
- /// Maps this service's error value to a different value. This does not
- /// alter the behaviour of the [`poll_ready`] method.
- ///
- /// This method can be used to change the [`Error`] type of the service
- /// into a different type. It is similar to the [`Result::map_err`] method.
- ///
- /// [`Error`]: crate::Service::Error
- /// [`poll_ready`]: crate::Service::poll_ready
- ///
- /// # Example
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # struct Error {
- /// # pub code: u32,
- /// # pub message: String
- /// # }
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = String;
- /// # type Error = Error;
- /// # type Future = futures_util::future::Ready<Result<String, Error>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(String::new()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service returning Result<_, Error>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // Map the error to a new error
- /// let mut new_service = service.map_err(|err| err.code);
- ///
- /// // Call the new service
- /// let id = 13;
- /// let code = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await
- /// .unwrap_err();
- /// # Ok::<(), u32>(())
- /// # };
- /// # }
- /// ```
- fn map_err<F, Error>(self, f: F) -> MapErr<Self, F>
- where
- Self: Sized,
- F: FnOnce(Self::Error) -> Error + Clone,
- {
- MapErr::new(self, f)
- }
-
- /// Maps this service's result type (`Result<Self::Response, Self::Error>`)
- /// to a different value, regardless of whether the future succeeds or
- /// fails.
- ///
- /// This is similar to the [`map_response`] and [`map_err`] combinators,
- /// except that the *same* function is invoked when the service's future
- /// completes, whether it completes successfully or fails. This function
- /// takes the [`Result`] returned by the service's future, and returns a
- /// [`Result`].
- ///
- /// Like the standard library's [`Result::and_then`], this method can be
- /// used to implement control flow based on `Result` values. For example, it
- /// may be used to implement error recovery, by turning some [`Err`]
- /// responses from the service into [`Ok`] responses. Similarly, some
- /// successful responses from the service could be rejected, by returning an
- /// [`Err`] conditionally, depending on the value inside the [`Ok`.] Finally,
- /// this method can also be used to implement behaviors that must run when a
- /// service's future completes, regardless of whether it succeeded or failed.
- ///
- /// This method can be used to change the [`Response`] type of the service
- /// into a different type. It can also be used to change the [`Error`] type
- /// of the service. However, because the [`map_result`] function is not applied
- /// to the errors returned by the service's [`poll_ready`] method, it must
- /// be possible to convert the service's [`Error`] type into the error type
- /// returned by the [`map_result`] function. This is trivial when the function
- /// returns the same error type as the service, but in other cases, it can
- /// be useful to use [`BoxError`] to erase differing error types.
- ///
- /// # Examples
- ///
- /// Recovering from certain errors:
- ///
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # struct Record {
- /// # pub name: String,
- /// # pub age: u16
- /// # }
- /// # #[derive(Debug)]
- /// # enum DbError {
- /// # Parse(std::num::ParseIntError),
- /// # NoRecordsFound,
- /// # }
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = Vec<Record>;
- /// # type Error = DbError;
- /// # type Future = futures_util::future::Ready<Result<Vec<Record>, DbError>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(vec![Record { name: "Jack".into(), age: 32 }]))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service returning Result<Vec<Record>, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // If the database returns no records for the query, we just want an empty `Vec`.
- /// let mut new_service = service.map_result(|result| match result {
- /// // If the error indicates that no records matched the query, return an empty
- /// // `Vec` instead.
- /// Err(DbError::NoRecordsFound) => Ok(Vec::new()),
- /// // Propagate all other responses (`Ok` and `Err`) unchanged
- /// x => x,
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// let name = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await?;
- /// # Ok::<(), DbError>(())
- /// # };
- /// # }
- /// ```
- ///
- /// Rejecting some `Ok` responses:
- ///
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # struct Record {
- /// # pub name: String,
- /// # pub age: u16
- /// # }
- /// # type DbError = String;
- /// # type AppError = String;
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = Record;
- /// # type Error = DbError;
- /// # type Future = futures_util::future::Ready<Result<Record, DbError>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(Record { name: "Jack".into(), age: 32 }))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// use tower::BoxError;
- ///
- /// // A service returning Result<Record, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // If the user is zero years old, return an error.
- /// let mut new_service = service.map_result(|result| {
- /// let record = result?;
- ///
- /// if record.age == 0 {
- /// // Users must have been born to use our app!
- /// let app_error = AppError::from("users cannot be 0 years old!");
- ///
- /// // Box the error to erase its type (as it can be an `AppError`
- /// // *or* the inner service's `DbError`).
- /// return Err(BoxError::from(app_error));
- /// }
- ///
- /// // Otherwise, return the record.
- /// Ok(record)
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// let record = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await?;
- /// # Ok::<(), BoxError>(())
- /// # };
- /// # }
- /// ```
- ///
- /// Performing an action that must be run for both successes and failures:
- ///
- /// ```
- /// # use std::convert::TryFrom;
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = String;
- /// # type Error = u8;
- /// # type Future = futures_util::future::Ready<Result<String, u8>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(String::new()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service returning Result<Record, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // Print a message whenever a query completes.
- /// let mut new_service = service.map_result(|result| {
- /// println!("query completed; success={}", result.is_ok());
- /// result
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// let response = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await;
- /// # response
- /// # };
- /// # }
- /// ```
- ///
- /// [`map_response`]: ServiceExt::map_response
- /// [`map_err`]: ServiceExt::map_err
- /// [`map_result`]: ServiceExt::map_result
- /// [`Error`]: crate::Service::Error
- /// [`Response`]: crate::Service::Response
- /// [`poll_ready`]: crate::Service::poll_ready
- /// [`BoxError`]: crate::BoxError
- fn map_result<F, Response, Error>(self, f: F) -> MapResult<Self, F>
- where
- Self: Sized,
- Error: From<Self::Error>,
- F: FnOnce(Result<Self::Response, Self::Error>) -> Result<Response, Error> + Clone,
- {
- MapResult::new(self, f)
- }
-
- /// Composes a function *in front of* the service.
- ///
- /// This adapter produces a new service that passes each value through the
- /// given function `f` before sending it to `self`.
- ///
- /// # Example
- /// ```
- /// # use std::convert::TryFrom;
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # impl Service<String> for DatabaseService {
- /// # type Response = String;
- /// # type Error = u8;
- /// # type Future = futures_util::future::Ready<Result<String, u8>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: String) -> Self::Future {
- /// # futures_util::future::ready(Ok(String::new()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service taking a String as a request
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // Map the request to a new request
- /// let mut new_service = service.map_request(|id: u32| id.to_string());
- ///
- /// // Call the new service
- /// let id = 13;
- /// let response = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await;
- /// # response
- /// # };
- /// # }
- /// ```
- fn map_request<F, NewRequest>(self, f: F) -> MapRequest<Self, F>
- where
- Self: Sized,
- F: FnMut(NewRequest) -> Request,
- {
- MapRequest::new(self, f)
- }
-
- /// Composes this service with a [`Filter`] that conditionally accepts or
- /// rejects requests based on a [predicate].
- ///
- /// This adapter produces a new service that passes each value through the
- /// given function `predicate` before sending it to `self`.
- ///
- /// # Example
- /// ```
- /// # use std::convert::TryFrom;
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # #[derive(Debug)] enum DbError {
- /// # Parse(std::num::ParseIntError)
- /// # }
- /// #
- /// # impl std::fmt::Display for DbError {
- /// # fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { std::fmt::Debug::fmt(self, f) }
- /// # }
- /// # impl std::error::Error for DbError {}
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = String;
- /// # type Error = DbError;
- /// # type Future = futures_util::future::Ready<Result<String, DbError>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(String::new()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service taking a u32 as a request and returning Result<_, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // Fallibly map the request to a new request
- /// let mut new_service = service
- /// .filter(|id_str: &str| id_str.parse().map_err(DbError::Parse));
- ///
- /// // Call the new service
- /// let id = "13";
- /// let response = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await;
- /// # response
- /// # };
- /// # }
- /// ```
- ///
- /// [`Filter`]: crate::filter::Filter
- /// [predicate]: crate::filter::Predicate
- #[cfg(feature = "filter")]
- fn filter<F, NewRequest>(self, filter: F) -> crate::filter::Filter<Self, F>
- where
- Self: Sized,
- F: crate::filter::Predicate<NewRequest>,
- {
- crate::filter::Filter::new(self, filter)
- }
-
- /// Composes this service with an [`AsyncFilter`] that conditionally accepts or
- /// rejects requests based on an [async predicate].
- ///
- /// This adapter produces a new service that passes each value through the
- /// given function `predicate` before sending it to `self`.
- ///
- /// # Example
- /// ```
- /// # use std::convert::TryFrom;
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # #[derive(Clone)] struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// # #[derive(Debug)]
- /// # enum DbError {
- /// # Rejected
- /// # }
- /// # impl std::fmt::Display for DbError {
- /// # fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { std::fmt::Debug::fmt(self, f) }
- /// # }
- /// # impl std::error::Error for DbError {}
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = String;
- /// # type Error = DbError;
- /// # type Future = futures_util::future::Ready<Result<String, DbError>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(String::new()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// # async {
- /// // A service taking a u32 as a request and returning Result<_, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// /// Returns `true` if we should query the database for an ID.
- /// async fn should_query(id: u32) -> bool {
- /// // ...
- /// # true
- /// }
- ///
- /// // Filter requests based on `should_query`.
- /// let mut new_service = service
- /// .filter_async(|id: u32| async move {
- /// if should_query(id).await {
- /// return Ok(id);
- /// }
- ///
- /// Err(DbError::Rejected)
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// # let id: u32 = id;
- /// let response = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await;
- /// # response
- /// # };
- /// # }
- /// ```
- ///
- /// [`AsyncFilter`]: crate::filter::AsyncFilter
- /// [asynchronous predicate]: crate::filter::AsyncPredicate
- #[cfg(feature = "filter")]
- fn filter_async<F, NewRequest>(self, filter: F) -> crate::filter::AsyncFilter<Self, F>
- where
- Self: Sized,
- F: crate::filter::AsyncPredicate<NewRequest>,
- {
- crate::filter::AsyncFilter::new(self, filter)
- }
-
- /// Composes an asynchronous function *after* this service.
- ///
- /// This takes a function or closure returning a future, and returns a new
- /// `Service` that chains that function after this service's [`Future`]. The
- /// new `Service`'s future will consist of this service's future, followed
- /// by the future returned by calling the chained function with the future's
- /// [`Output`] type. The chained function is called regardless of whether
- /// this service's future completes with a successful response or with an
- /// error.
- ///
- /// This method can be thought of as an equivalent to the [`futures`
- /// crate]'s [`FutureExt::then`] combinator, but acting on `Service`s that
- /// _return_ futures, rather than on an individual future. Similarly to that
- /// combinator, [`ServiceExt::then`] can be used to implement asynchronous
- /// error recovery, by calling some asynchronous function with errors
- /// returned by this service. Alternatively, it may also be used to call a
- /// fallible async function with the successful response of this service.
- ///
- /// This method can be used to change the [`Response`] type of the service
- /// into a different type. It can also be used to change the [`Error`] type
- /// of the service. However, because the `then` function is not applied
- /// to the errors returned by the service's [`poll_ready`] method, it must
- /// be possible to convert the service's [`Error`] type into the error type
- /// returned by the `then` future. This is trivial when the function
- /// returns the same error type as the service, but in other cases, it can
- /// be useful to use [`BoxError`] to erase differing error types.
- ///
- /// # Examples
- ///
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # type Record = ();
- /// # type DbError = ();
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = Record;
- /// # type Error = DbError;
- /// # type Future = futures_util::future::Ready<Result<Record, DbError>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// // A service returning Result<Record, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- ///
- /// // An async function that attempts to recover from errors returned by the
- /// // database.
- /// async fn recover_from_error(error: DbError) -> Result<Record, DbError> {
- /// // ...
- /// # Ok(())
- /// }
- /// # async {
- ///
- /// // If the database service returns an error, attempt to recover by
- /// // calling `recover_from_error`. Otherwise, return the successful response.
- /// let mut new_service = service.then(|result| async move {
- /// match result {
- /// Ok(record) => Ok(record),
- /// Err(e) => recover_from_error(e).await,
- /// }
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// let record = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await?;
- /// # Ok::<(), DbError>(())
- /// # };
- /// # }
- /// ```
- ///
- /// [`Future`]: crate::Service::Future
- /// [`Output`]: std::future::Future::Output
- /// [`futures` crate]: https://docs.rs/futures
- /// [`FutureExt::then`]: https://docs.rs/futures/latest/futures/future/trait.FutureExt.html#method.then
- /// [`Error`]: crate::Service::Error
- /// [`Response`]: crate::Service::Response
- /// [`poll_ready`]: crate::Service::poll_ready
- /// [`BoxError`]: crate::BoxError
- fn then<F, Response, Error, Fut>(self, f: F) -> Then<Self, F>
- where
- Self: Sized,
- Error: From<Self::Error>,
- F: FnOnce(Result<Self::Response, Self::Error>) -> Fut + Clone,
- Fut: Future<Output = Result<Response, Error>>,
- {
- Then::new(self, f)
- }
-
- /// Composes a function that transforms futures produced by the service.
- ///
- /// This takes a function or closure returning a future computed from the future returned by
- /// the service's [`call`] method, as opposed to the responses produced by the future.
- ///
- /// # Examples
- ///
- /// ```
- /// # use std::task::{Poll, Context};
- /// # use tower::{Service, ServiceExt, BoxError};
- /// #
- /// # struct DatabaseService;
- /// # impl DatabaseService {
- /// # fn new(address: &str) -> Self {
- /// # DatabaseService
- /// # }
- /// # }
- /// #
- /// # type Record = ();
- /// # type DbError = crate::BoxError;
- /// #
- /// # impl Service<u32> for DatabaseService {
- /// # type Response = Record;
- /// # type Error = DbError;
- /// # type Future = futures_util::future::Ready<Result<Record, DbError>>;
- /// #
- /// # fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- /// # Poll::Ready(Ok(()))
- /// # }
- /// #
- /// # fn call(&mut self, request: u32) -> Self::Future {
- /// # futures_util::future::ready(Ok(()))
- /// # }
- /// # }
- /// #
- /// # fn main() {
- /// use std::time::Duration;
- /// use tokio::time::timeout;
- ///
- /// // A service returning Result<Record, DbError>
- /// let service = DatabaseService::new("127.0.0.1:8080");
- /// # async {
- ///
- /// let mut new_service = service.map_future(|future| async move {
- /// let res = timeout(Duration::from_secs(1), future).await?;
- /// Ok::<_, BoxError>(res)
- /// });
- ///
- /// // Call the new service
- /// let id = 13;
- /// let record = new_service
- /// .ready()
- /// .await?
- /// .call(id)
- /// .await?;
- /// # Ok::<(), BoxError>(())
- /// # };
- /// # }
- /// ```
- ///
- /// Note that normally you wouldn't implement timeouts like this and instead use [`Timeout`].
- ///
- /// [`call`]: crate::Service::call
- /// [`Timeout`]: crate::timeout::Timeout
- fn map_future<F, Fut, Response, Error>(self, f: F) -> MapFuture<Self, F>
- where
- Self: Sized,
- F: FnMut(Self::Future) -> Fut,
- Error: From<Self::Error>,
- Fut: Future<Output = Result<Response, Error>>,
- {
- MapFuture::new(self, f)
- }
-
- /// Convert the service into a [`Service`] + [`Send`] trait object.
- ///
- /// See [`BoxService`] for more details.
- ///
- /// If `Self` implements the [`Clone`] trait, the [`boxed_clone`] method
- /// can be used instead, to produce a boxed service which will also
- /// implement [`Clone`].
- ///
- /// # Example
- ///
- /// ```
- /// use tower::{Service, ServiceExt, BoxError, service_fn, util::BoxService};
- /// #
- /// # struct Request;
- /// # struct Response;
- /// # impl Response {
- /// # fn new() -> Self { Self }
- /// # }
- ///
- /// let service = service_fn(|req: Request| async {
- /// Ok::<_, BoxError>(Response::new())
- /// });
- ///
- /// let service: BoxService<Request, Response, BoxError> = service
- /// .map_request(|req| {
- /// println!("received request");
- /// req
- /// })
- /// .map_response(|res| {
- /// println!("response produced");
- /// res
- /// })
- /// .boxed();
- /// # let service = assert_service(service);
- /// # fn assert_service<S, R>(svc: S) -> S
- /// # where S: Service<R> { svc }
- /// ```
- ///
- /// [`Service`]: crate::Service
- /// [`boxed_clone`]: Self::boxed_clone
- fn boxed(self) -> BoxService<Request, Self::Response, Self::Error>
- where
- Self: Sized + Send + 'static,
- Self::Future: Send + 'static,
- {
- BoxService::new(self)
- }
-
- /// Convert the service into a [`Service`] + [`Clone`] + [`Send`] trait object.
- ///
- /// This is similar to the [`boxed`] method, but it requires that `Self` implement
- /// [`Clone`], and the returned boxed service implements [`Clone`].
- /// See [`BoxCloneService`] for more details.
- ///
- /// # Example
- ///
- /// ```
- /// use tower::{Service, ServiceExt, BoxError, service_fn, util::BoxCloneService};
- /// #
- /// # struct Request;
- /// # struct Response;
- /// # impl Response {
- /// # fn new() -> Self { Self }
- /// # }
- ///
- /// let service = service_fn(|req: Request| async {
- /// Ok::<_, BoxError>(Response::new())
- /// });
- ///
- /// let service: BoxCloneService<Request, Response, BoxError> = service
- /// .map_request(|req| {
- /// println!("received request");
- /// req
- /// })
- /// .map_response(|res| {
- /// println!("response produced");
- /// res
- /// })
- /// .boxed_clone();
- ///
- /// // The boxed service can still be cloned.
- /// service.clone();
- /// # let service = assert_service(service);
- /// # fn assert_service<S, R>(svc: S) -> S
- /// # where S: Service<R> { svc }
- /// ```
- ///
- /// [`Service`]: crate::Service
- /// [`boxed`]: Self::boxed
- fn boxed_clone(self) -> BoxCloneService<Request, Self::Response, Self::Error>
- where
- Self: Clone + Sized + Send + 'static,
- Self::Future: Send + 'static,
- {
- BoxCloneService::new(self)
- }
-}
-
-impl<T: ?Sized, Request> ServiceExt<Request> for T where T: tower_service::Service<Request> {}
-
-/// Convert an `Option<Layer>` into a [`Layer`].
-///
-/// ```
-/// # use std::time::Duration;
-/// # use tower::Service;
-/// # use tower::builder::ServiceBuilder;
-/// use tower::util::option_layer;
-/// # use tower::timeout::TimeoutLayer;
-/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
-/// # let timeout = Some(Duration::new(10, 0));
-/// // Layer to apply a timeout if configured
-/// let maybe_timeout = option_layer(timeout.map(TimeoutLayer::new));
-///
-/// ServiceBuilder::new()
-/// .layer(maybe_timeout)
-/// .service(svc);
-/// # }
-/// ```
-///
-/// [`Layer`]: crate::layer::Layer
-pub fn option_layer<L>(layer: Option<L>) -> Either<L, Identity> {
- if let Some(layer) = layer {
- Either::Left(layer)
- } else {
- Either::Right(Identity::new())
- }
-}
diff --git a/vendor/tower/src/util/oneshot.rs b/vendor/tower/src/util/oneshot.rs
deleted file mode 100644
index 114b2f82..00000000
--- a/vendor/tower/src/util/oneshot.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::{
- fmt,
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-pin_project! {
- /// A [`Future`] consuming a [`Service`] and request, waiting until the [`Service`]
- /// is ready, and then calling [`Service::call`] with the request, and
- /// waiting for that [`Future`].
- #[derive(Debug)]
- pub struct Oneshot<S: Service<Req>, Req> {
- #[pin]
- state: State<S, Req>,
- }
-}
-
-pin_project! {
- #[project = StateProj]
- enum State<S: Service<Req>, Req> {
- NotReady {
- svc: S,
- req: Option<Req>,
- },
- Called {
- #[pin]
- fut: S::Future,
- },
- Done,
- }
-}
-
-impl<S: Service<Req>, Req> State<S, Req> {
- const fn not_ready(svc: S, req: Option<Req>) -> Self {
- Self::NotReady { svc, req }
- }
-
- const fn called(fut: S::Future) -> Self {
- Self::Called { fut }
- }
-}
-
-impl<S, Req> fmt::Debug for State<S, Req>
-where
- S: Service<Req> + fmt::Debug,
- Req: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- State::NotReady {
- svc,
- req: Some(req),
- } => f
- .debug_tuple("State::NotReady")
- .field(svc)
- .field(req)
- .finish(),
- State::NotReady { req: None, .. } => unreachable!(),
- State::Called { .. } => f.debug_tuple("State::Called").field(&"S::Future").finish(),
- State::Done => f.debug_tuple("State::Done").finish(),
- }
- }
-}
-
-impl<S, Req> Oneshot<S, Req>
-where
- S: Service<Req>,
-{
- #[allow(missing_docs)]
- pub const fn new(svc: S, req: Req) -> Self {
- Oneshot {
- state: State::not_ready(svc, Some(req)),
- }
- }
-}
-
-impl<S, Req> Future for Oneshot<S, Req>
-where
- S: Service<Req>,
-{
- type Output = Result<S::Response, S::Error>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
- loop {
- match this.state.as_mut().project() {
- StateProj::NotReady { svc, req } => {
- let _ = ready!(svc.poll_ready(cx))?;
- let f = svc.call(req.take().expect("already called"));
- this.state.set(State::called(f));
- }
- StateProj::Called { fut } => {
- let res = ready!(fut.poll(cx))?;
- this.state.set(State::Done);
- return Poll::Ready(Ok(res));
- }
- StateProj::Done => panic!("polled after complete"),
- }
- }
- }
-}
diff --git a/vendor/tower/src/util/optional/error.rs b/vendor/tower/src/util/optional/error.rs
deleted file mode 100644
index 78061335..00000000
--- a/vendor/tower/src/util/optional/error.rs
+++ /dev/null
@@ -1,21 +0,0 @@
-use std::{error, fmt};
-
-/// Error returned if the inner [`Service`] has not been set.
-///
-/// [`Service`]: crate::Service
-#[derive(Debug)]
-pub struct None(());
-
-impl None {
- pub(crate) fn new() -> None {
- None(())
- }
-}
-
-impl fmt::Display for None {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- write!(fmt, "None")
- }
-}
-
-impl error::Error for None {}
diff --git a/vendor/tower/src/util/optional/future.rs b/vendor/tower/src/util/optional/future.rs
deleted file mode 100644
index 7d289b7b..00000000
--- a/vendor/tower/src/util/optional/future.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-use super::error;
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-pin_project! {
- /// Response future returned by [`Optional`].
- ///
- /// [`Optional`]: crate::util::Optional
- #[derive(Debug)]
- pub struct ResponseFuture<T> {
- #[pin]
- inner: Option<T>,
- }
-}
-
-impl<T> ResponseFuture<T> {
- pub(crate) fn new(inner: Option<T>) -> ResponseFuture<T> {
- ResponseFuture { inner }
- }
-}
-
-impl<F, T, E> Future for ResponseFuture<F>
-where
- F: Future<Output = Result<T, E>>,
- E: Into<crate::BoxError>,
-{
- type Output = Result<T, crate::BoxError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- match self.project().inner.as_pin_mut() {
- Some(inner) => Poll::Ready(Ok(ready!(inner.poll(cx)).map_err(Into::into)?)),
- None => Poll::Ready(Err(error::None::new().into())),
- }
- }
-}
diff --git a/vendor/tower/src/util/optional/mod.rs b/vendor/tower/src/util/optional/mod.rs
deleted file mode 100644
index 4d020709..00000000
--- a/vendor/tower/src/util/optional/mod.rs
+++ /dev/null
@@ -1,59 +0,0 @@
-//! Contains [`Optional`] and related types and functions.
-//!
-//! See [`Optional`] documentation for more details.
-
-/// Error types for [`Optional`].
-pub mod error;
-/// Future types for [`Optional`].
-pub mod future;
-
-use self::future::ResponseFuture;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-/// Optionally forwards requests to an inner service.
-///
-/// If the inner service is [`None`], [`optional::None`] is returned as the response.
-///
-/// [`optional::None`]: crate::util::error::optional::None
-#[derive(Debug)]
-pub struct Optional<T> {
- inner: Option<T>,
-}
-
-impl<T> Optional<T> {
- /// Create a new [`Optional`].
- pub const fn new<Request>(inner: Option<T>) -> Optional<T>
- where
- T: Service<Request>,
- T::Error: Into<crate::BoxError>,
- {
- Optional { inner }
- }
-}
-
-impl<T, Request> Service<Request> for Optional<T>
-where
- T: Service<Request>,
- T::Error: Into<crate::BoxError>,
-{
- type Response = T::Response;
- type Error = crate::BoxError;
- type Future = ResponseFuture<T::Future>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- match self.inner {
- Some(ref mut inner) => match inner.poll_ready(cx) {
- Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
- Poll::Pending => Poll::Pending,
- },
- // None services are always ready
- None => Poll::Ready(Ok(())),
- }
- }
-
- fn call(&mut self, request: Request) -> Self::Future {
- let inner = self.inner.as_mut().map(|i| i.call(request));
- ResponseFuture::new(inner)
- }
-}
diff --git a/vendor/tower/src/util/ready.rs b/vendor/tower/src/util/ready.rs
deleted file mode 100644
index 750db872..00000000
--- a/vendor/tower/src/util/ready.rs
+++ /dev/null
@@ -1,103 +0,0 @@
-use std::{fmt, marker::PhantomData};
-
-use futures_core::ready;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-use tower_service::Service;
-
-/// A [`Future`] that yields the service when it is ready to accept a request.
-///
-/// [`ReadyOneshot`] values are produced by [`ServiceExt::ready_oneshot`].
-///
-/// [`ServiceExt::ready_oneshot`]: crate::util::ServiceExt::ready_oneshot
-pub struct ReadyOneshot<T, Request> {
- inner: Option<T>,
- _p: PhantomData<fn() -> Request>,
-}
-
-// Safety: This is safe because `Services`'s are always `Unpin`.
-impl<T, Request> Unpin for ReadyOneshot<T, Request> {}
-
-impl<T, Request> ReadyOneshot<T, Request>
-where
- T: Service<Request>,
-{
- #[allow(missing_docs)]
- pub const fn new(service: T) -> Self {
- Self {
- inner: Some(service),
- _p: PhantomData,
- }
- }
-}
-
-impl<T, Request> Future for ReadyOneshot<T, Request>
-where
- T: Service<Request>,
-{
- type Output = Result<T, T::Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- ready!(self
- .inner
- .as_mut()
- .expect("poll after Poll::Ready")
- .poll_ready(cx))?;
-
- Poll::Ready(Ok(self.inner.take().expect("poll after Poll::Ready")))
- }
-}
-
-impl<T, Request> fmt::Debug for ReadyOneshot<T, Request>
-where
- T: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("ReadyOneshot")
- .field("inner", &self.inner)
- .finish()
- }
-}
-
-/// A future that yields a mutable reference to the service when it is ready to accept a request.
-///
-/// [`Ready`] values are produced by [`ServiceExt::ready`].
-///
-/// [`ServiceExt::ready`]: crate::util::ServiceExt::ready
-pub struct Ready<'a, T, Request>(ReadyOneshot<&'a mut T, Request>);
-
-// Safety: This is safe for the same reason that the impl for ReadyOneshot is safe.
-impl<'a, T, Request> Unpin for Ready<'a, T, Request> {}
-
-impl<'a, T, Request> Ready<'a, T, Request>
-where
- T: Service<Request>,
-{
- #[allow(missing_docs)]
- pub fn new(service: &'a mut T) -> Self {
- Self(ReadyOneshot::new(service))
- }
-}
-
-impl<'a, T, Request> Future for Ready<'a, T, Request>
-where
- T: Service<Request>,
-{
- type Output = Result<&'a mut T, T::Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- Pin::new(&mut self.0).poll(cx)
- }
-}
-
-impl<'a, T, Request> fmt::Debug for Ready<'a, T, Request>
-where
- T: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple("Ready").field(&self.0).finish()
- }
-}
diff --git a/vendor/tower/src/util/rng.rs b/vendor/tower/src/util/rng.rs
deleted file mode 100644
index 5b2f9fce..00000000
--- a/vendor/tower/src/util/rng.rs
+++ /dev/null
@@ -1,181 +0,0 @@
-//! [PRNG] utilities for tower middleware.
-//!
-//! This module provides a generic [`Rng`] trait and a [`HasherRng`] that
-//! implements the trait based on [`RandomState`] or any other [`Hasher`].
-//!
-//! These utilities replace tower's internal usage of `rand` with these smaller,
-//! more lightweight methods. Most of the implementations are extracted from
-//! their corresponding `rand` implementations.
-//!
-//! [PRNG]: https://en.wikipedia.org/wiki/Pseudorandom_number_generator
-
-use std::{
- collections::hash_map::RandomState,
- hash::{BuildHasher, Hasher},
- ops::Range,
-};
-
-/// A simple [PRNG] trait for use within tower middleware.
-///
-/// [PRNG]: https://en.wikipedia.org/wiki/Pseudorandom_number_generator
-pub trait Rng {
- /// Generate a random [`u64`].
- fn next_u64(&mut self) -> u64;
-
- /// Generate a random [`f64`] between `[0, 1)`.
- fn next_f64(&mut self) -> f64 {
- // Borrowed from:
- // https://github.com/rust-random/rand/blob/master/src/distributions/float.rs#L106
- let float_size = std::mem::size_of::<f64>() as u32 * 8;
- let precision = 52 + 1;
- let scale = 1.0 / ((1u64 << precision) as f64);
-
- let value = self.next_u64();
- let value = value >> (float_size - precision);
-
- scale * value as f64
- }
-
- /// Randomly pick a value within the range.
- ///
- /// # Panic
- ///
- /// - If start < end this will panic in debug mode.
- fn next_range(&mut self, range: Range<u64>) -> u64 {
- debug_assert!(
- range.start < range.end,
- "The range start must be smaller than the end"
- );
- let start = range.start;
- let end = range.end;
-
- let range = end - start;
-
- let n = self.next_u64();
-
- (n % range) + start
- }
-}
-
-impl<R: Rng + ?Sized> Rng for Box<R> {
- fn next_u64(&mut self) -> u64 {
- (**self).next_u64()
- }
-}
-
-/// A [`Rng`] implementation that uses a [`Hasher`] to generate the random
-/// values. The implementation uses an internal counter to pass to the hasher
-/// for each iteration of [`Rng::next_u64`].
-///
-/// # Default
-///
-/// This hasher has a default type of [`RandomState`] which just uses the
-/// libstd method of getting a random u64.
-#[derive(Clone, Debug)]
-pub struct HasherRng<H = RandomState> {
- hasher: H,
- counter: u64,
-}
-
-impl HasherRng {
- /// Create a new default [`HasherRng`].
- pub fn new() -> Self {
- HasherRng::default()
- }
-}
-
-impl Default for HasherRng {
- fn default() -> Self {
- HasherRng::with_hasher(RandomState::default())
- }
-}
-
-impl<H> HasherRng<H> {
- /// Create a new [`HasherRng`] with the provided hasher.
- pub fn with_hasher(hasher: H) -> Self {
- HasherRng { hasher, counter: 0 }
- }
-}
-
-impl<H> Rng for HasherRng<H>
-where
- H: BuildHasher,
-{
- fn next_u64(&mut self) -> u64 {
- let mut hasher = self.hasher.build_hasher();
- hasher.write_u64(self.counter);
- self.counter = self.counter.wrapping_add(1);
- hasher.finish()
- }
-}
-
-/// A sampler modified from the Rand implementation for use internally for the balance middleware.
-///
-/// It's an implementation of Floyd's combination algorithm with amount fixed at 2. This uses no allocated
-/// memory and finishes in constant time (only 2 random calls).
-///
-/// ref: This was borrowed and modified from the following Rand implementation
-/// https://github.com/rust-random/rand/blob/b73640705d6714509f8ceccc49e8df996fa19f51/src/seq/index.rs#L375-L411
-#[cfg(feature = "balance")]
-pub(crate) fn sample_floyd2<R: Rng>(rng: &mut R, length: u64) -> [u64; 2] {
- debug_assert!(2 <= length);
- let aidx = rng.next_range(0..length - 1);
- let bidx = rng.next_range(0..length);
- let aidx = if aidx == bidx { length - 1 } else { aidx };
- [aidx, bidx]
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use quickcheck::*;
-
- quickcheck! {
- fn next_f64(counter: u64) -> TestResult {
- let mut rng = HasherRng::default();
- rng.counter = counter;
- let n = rng.next_f64();
-
- TestResult::from_bool(n < 1.0 && n >= 0.0)
- }
-
- fn next_range(counter: u64, range: Range<u64>) -> TestResult {
- if range.start >= range.end{
- return TestResult::discard();
- }
-
- let mut rng = HasherRng::default();
- rng.counter = counter;
-
- let n = rng.next_range(range.clone());
-
- TestResult::from_bool(n >= range.start && (n < range.end || range.start == range.end))
- }
-
- fn sample_floyd2(counter: u64, length: u64) -> TestResult {
- if length < 2 || length > 256 {
- return TestResult::discard();
- }
-
- let mut rng = HasherRng::default();
- rng.counter = counter;
-
- let [a, b] = super::sample_floyd2(&mut rng, length);
-
- if a >= length || b >= length || a == b {
- return TestResult::failed();
- }
-
- TestResult::passed()
- }
- }
-
- #[test]
- fn sample_inplace_boundaries() {
- let mut r = HasherRng::default();
- match super::sample_floyd2(&mut r, 2) {
- [0, 1] | [1, 0] => (),
- array => panic!("unexpected inplace boundaries: {:?}", array),
- }
- }
-}
diff --git a/vendor/tower/src/util/service_fn.rs b/vendor/tower/src/util/service_fn.rs
deleted file mode 100644
index d6e6be87..00000000
--- a/vendor/tower/src/util/service_fn.rs
+++ /dev/null
@@ -1,82 +0,0 @@
-use std::fmt;
-use std::future::Future;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-/// Returns a new [`ServiceFn`] with the given closure.
-///
-/// This lets you build a [`Service`] from an async function that returns a [`Result`].
-///
-/// # Example
-///
-/// ```
-/// use tower::{service_fn, Service, ServiceExt, BoxError};
-/// # struct Request;
-/// # impl Request {
-/// # fn new() -> Self { Self }
-/// # }
-/// # struct Response(&'static str);
-/// # impl Response {
-/// # fn new(body: &'static str) -> Self {
-/// # Self(body)
-/// # }
-/// # fn into_body(self) -> &'static str { self.0 }
-/// # }
-///
-/// # #[tokio::main]
-/// # async fn main() -> Result<(), BoxError> {
-/// async fn handle(request: Request) -> Result<Response, BoxError> {
-/// let response = Response::new("Hello, World!");
-/// Ok(response)
-/// }
-///
-/// let mut service = service_fn(handle);
-///
-/// let response = service
-/// .ready()
-/// .await?
-/// .call(Request::new())
-/// .await?;
-///
-/// assert_eq!("Hello, World!", response.into_body());
-/// #
-/// # Ok(())
-/// # }
-/// ```
-pub fn service_fn<T>(f: T) -> ServiceFn<T> {
- ServiceFn { f }
-}
-
-/// A [`Service`] implemented by a closure.
-///
-/// See [`service_fn`] for more details.
-#[derive(Copy, Clone)]
-pub struct ServiceFn<T> {
- f: T,
-}
-
-impl<T> fmt::Debug for ServiceFn<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("ServiceFn")
- .field("f", &format_args!("{}", std::any::type_name::<T>()))
- .finish()
- }
-}
-
-impl<T, F, Request, R, E> Service<Request> for ServiceFn<T>
-where
- T: FnMut(Request) -> F,
- F: Future<Output = Result<R, E>>,
-{
- type Response = R;
- type Error = E;
- type Future = F;
-
- fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), E>> {
- Ok(()).into()
- }
-
- fn call(&mut self, req: Request) -> Self::Future {
- (self.f)(req)
- }
-}
diff --git a/vendor/tower/src/util/then.rs b/vendor/tower/src/util/then.rs
deleted file mode 100644
index 5e934506..00000000
--- a/vendor/tower/src/util/then.rs
+++ /dev/null
@@ -1,103 +0,0 @@
-use futures_util::{future, FutureExt};
-use std::{
- fmt,
- future::Future,
- task::{Context, Poll},
-};
-use tower_layer::Layer;
-use tower_service::Service;
-
-/// [`Service`] returned by the [`then`] combinator.
-///
-/// [`then`]: crate::util::ServiceExt::then
-#[derive(Clone)]
-pub struct Then<S, F> {
- inner: S,
- f: F,
-}
-
-impl<S, F> fmt::Debug for Then<S, F>
-where
- S: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Then")
- .field("inner", &self.inner)
- .field("f", &format_args!("{}", std::any::type_name::<F>()))
- .finish()
- }
-}
-
-/// A [`Layer`] that produces a [`Then`] service.
-///
-/// [`Layer`]: tower_layer::Layer
-#[derive(Debug, Clone)]
-pub struct ThenLayer<F> {
- f: F,
-}
-
-impl<S, F> Then<S, F> {
- /// Creates a new `Then` service.
- pub const fn new(inner: S, f: F) -> Self {
- Then { f, inner }
- }
-
- /// Returns a new [`Layer`] that produces [`Then`] services.
- ///
- /// This is a convenience function that simply calls [`ThenLayer::new`].
- ///
- /// [`Layer`]: tower_layer::Layer
- pub fn layer(f: F) -> ThenLayer<F> {
- ThenLayer { f }
- }
-}
-
-opaque_future! {
- /// Response future from [`Then`] services.
- ///
- /// [`Then`]: crate::util::Then
- pub type ThenFuture<F1, F2, N> = future::Then<F1, F2, N>;
-}
-
-impl<S, F, Request, Response, Error, Fut> Service<Request> for Then<S, F>
-where
- S: Service<Request>,
- S::Error: Into<Error>,
- F: FnOnce(Result<S::Response, S::Error>) -> Fut + Clone,
- Fut: Future<Output = Result<Response, Error>>,
-{
- type Response = Response;
- type Error = Error;
- type Future = ThenFuture<S::Future, Fut, F>;
-
- #[inline]
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(Into::into)
- }
-
- #[inline]
- fn call(&mut self, request: Request) -> Self::Future {
- ThenFuture::new(self.inner.call(request).then(self.f.clone()))
- }
-}
-
-impl<F> ThenLayer<F> {
- /// Creates a new [`ThenLayer`] layer.
- pub const fn new(f: F) -> Self {
- ThenLayer { f }
- }
-}
-
-impl<S, F> Layer<S> for ThenLayer<F>
-where
- F: Clone,
-{
- type Service = Then<S, F>;
-
- fn layer(&self, inner: S) -> Self::Service {
- Then {
- f: self.f.clone(),
- inner,
- }
- }
-}
diff --git a/vendor/tower/tests/balance/main.rs b/vendor/tower/tests/balance/main.rs
deleted file mode 100644
index aed51203..00000000
--- a/vendor/tower/tests/balance/main.rs
+++ /dev/null
@@ -1,170 +0,0 @@
-#![cfg(feature = "balance")]
-#[path = "../support.rs"]
-mod support;
-
-use std::future::Future;
-use std::task::{Context, Poll};
-use tokio_test::{assert_pending, assert_ready, task};
-use tower::balance::p2c::Balance;
-use tower::discover::Change;
-use tower_service::Service;
-use tower_test::mock;
-
-type Req = &'static str;
-struct Mock(mock::Mock<Req, Req>);
-
-impl Service<Req> for Mock {
- type Response = <mock::Mock<Req, Req> as Service<Req>>::Response;
- type Error = <mock::Mock<Req, Req> as Service<Req>>::Error;
- type Future = <mock::Mock<Req, Req> as Service<Req>>::Future;
- fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
- self.0.poll_ready(cx)
- }
- fn call(&mut self, req: Req) -> Self::Future {
- self.0.call(req)
- }
-}
-
-impl tower::load::Load for Mock {
- type Metric = usize;
- fn load(&self) -> Self::Metric {
- rand::random()
- }
-}
-
-#[test]
-fn stress() {
- let _t = support::trace_init();
- let mut task = task::spawn(());
- let (tx, rx) = tokio::sync::mpsc::unbounded_channel::<Result<_, &'static str>>();
- let mut cache = Balance::<_, Req>::new(support::IntoStream::new(rx));
-
- let mut nready = 0;
- let mut services = slab::Slab::<(mock::Handle<Req, Req>, bool)>::new();
- let mut retired = Vec::<mock::Handle<Req, Req>>::new();
- for _ in 0..100_000 {
- for _ in 0..(rand::random::<u8>() % 8) {
- if !services.is_empty() && rand::random() {
- if nready == 0 || rand::random::<u8>() > u8::max_value() / 4 {
- // ready a service
- // TODO: sometimes ready a removed service?
- for (_, (handle, ready)) in &mut services {
- if !*ready {
- handle.allow(1);
- *ready = true;
- nready += 1;
- break;
- }
- }
- } else {
- // use a service
- use std::task::Poll;
- match task.enter(|cx, _| cache.poll_ready(cx)) {
- Poll::Ready(Ok(())) => {
- assert_ne!(nready, 0, "got ready when no service is ready");
- let mut fut = cache.call("hello");
- let mut fut = std::pin::Pin::new(&mut fut);
- assert_pending!(task.enter(|cx, _| fut.as_mut().poll(cx)));
- let mut found = false;
- for (_, (handle, ready)) in &mut services {
- if *ready {
- if let Poll::Ready(Some((req, res))) = handle.poll_request() {
- assert_eq!(req, "hello");
- res.send_response("world");
- *ready = false;
- nready -= 1;
- found = true;
- break;
- }
- }
- }
- if !found {
- // we must have been given a retired service
- let mut at = None;
- for (i, handle) in retired.iter_mut().enumerate() {
- if let Poll::Ready(Some((req, res))) = handle.poll_request() {
- assert_eq!(req, "hello");
- res.send_response("world");
- at = Some(i);
- break;
- }
- }
- let _ = retired.swap_remove(
- at.expect("request was not sent to a ready service"),
- );
- nready -= 1;
- }
- assert_ready!(task.enter(|cx, _| fut.as_mut().poll(cx))).unwrap();
- }
- Poll::Ready(_) => unreachable!("discover stream has not failed"),
- Poll::Pending => {
- // assert_eq!(nready, 0, "got pending when a service is ready");
- }
- }
- }
- } else if services.is_empty() || rand::random() {
- if services.is_empty() || nready == 0 || rand::random() {
- // add
- let (svc, mut handle) = mock::pair::<Req, Req>();
- let svc = Mock(svc);
- handle.allow(0);
- let k = services.insert((handle, false));
- let ok = tx.send(Ok(Change::Insert(k, svc)));
- assert!(ok.is_ok());
- } else {
- // remove
- while !services.is_empty() {
- let k = rand::random::<usize>() % (services.iter().last().unwrap().0 + 1);
- if services.contains(k) {
- let (handle, ready) = services.remove(k);
- if ready {
- retired.push(handle);
- }
- let ok = tx.send(Ok(Change::Remove(k)));
- assert!(ok.is_ok());
- break;
- }
- }
- }
- } else {
- // fail a service
- while !services.is_empty() {
- let k = rand::random::<usize>() % (services.iter().last().unwrap().0 + 1);
- if services.contains(k) {
- let (mut handle, ready) = services.remove(k);
- if ready {
- nready -= 1;
- }
- handle.send_error("doom");
- break;
- }
- }
- }
- }
-
- let r = task.enter(|cx, _| cache.poll_ready(cx));
-
- // drop any retired services that the p2c has gotten rid of
- let mut removed = Vec::new();
- for (i, handle) in retired.iter_mut().enumerate() {
- if let Poll::Ready(None) = handle.poll_request() {
- removed.push(i);
- }
- }
- for i in removed.into_iter().rev() {
- retired.swap_remove(i);
- nready -= 1;
- }
-
- use std::task::Poll;
- match r {
- Poll::Ready(Ok(())) => {
- assert_ne!(nready, 0, "got ready when no service is ready");
- }
- Poll::Ready(_) => unreachable!("discover stream has not failed"),
- Poll::Pending => {
- assert_eq!(nready, 0, "got pending when a service is ready");
- }
- }
- }
-}
diff --git a/vendor/tower/tests/buffer/main.rs b/vendor/tower/tests/buffer/main.rs
deleted file mode 100644
index ee238f11..00000000
--- a/vendor/tower/tests/buffer/main.rs
+++ /dev/null
@@ -1,459 +0,0 @@
-#![cfg(feature = "buffer")]
-#[path = "../support.rs"]
-mod support;
-use std::thread;
-use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok, task};
-use tower::buffer::{error, Buffer};
-use tower::{util::ServiceExt, Service};
-use tower_test::{assert_request_eq, mock};
-
-fn let_worker_work() {
- // Allow the Buffer's executor to do work
- thread::sleep(::std::time::Duration::from_millis(100));
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn req_and_res() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service();
-
- assert_ready_ok!(service.poll_ready());
- let mut response = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_response("world");
-
- let_worker_work();
- assert_eq!(assert_ready_ok!(response.poll()), "world");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn clears_canceled_requests() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service();
-
- handle.allow(1);
-
- assert_ready_ok!(service.poll_ready());
- let mut res1 = task::spawn(service.call("hello"));
-
- let send_response1 = assert_request_eq!(handle, "hello");
-
- // don't respond yet, new requests will get buffered
- assert_ready_ok!(service.poll_ready());
- let res2 = task::spawn(service.call("hello2"));
-
- assert_pending!(handle.poll_request());
-
- assert_ready_ok!(service.poll_ready());
- let mut res3 = task::spawn(service.call("hello3"));
-
- drop(res2);
-
- send_response1.send_response("world");
-
- let_worker_work();
- assert_eq!(assert_ready_ok!(res1.poll()), "world");
-
- // res2 was dropped, so it should have been canceled in the buffer
- handle.allow(1);
-
- assert_request_eq!(handle, "hello3").send_response("world3");
-
- let_worker_work();
- assert_eq!(assert_ready_ok!(res3.poll()), "world3");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn when_inner_is_not_ready() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service();
-
- // Make the service NotReady
- handle.allow(0);
-
- assert_ready_ok!(service.poll_ready());
- let mut res1 = task::spawn(service.call("hello"));
-
- let_worker_work();
- assert_pending!(res1.poll());
- assert_pending!(handle.poll_request());
-
- handle.allow(1);
-
- assert_request_eq!(handle, "hello").send_response("world");
-
- let_worker_work();
- assert_eq!(assert_ready_ok!(res1.poll()), "world");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn when_inner_fails() {
- use std::error::Error as StdError;
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service();
-
- // Make the service NotReady
- handle.allow(0);
- handle.send_error("foobar");
-
- assert_ready_ok!(service.poll_ready());
- let mut res1 = task::spawn(service.call("hello"));
-
- let_worker_work();
- let e = assert_ready_err!(res1.poll());
- if let Some(e) = e.downcast_ref::<error::ServiceError>() {
- let e = e.source().unwrap();
-
- assert_eq!(e.to_string(), "foobar");
- } else {
- panic!("unexpected error type: {:?}", e);
- }
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn poll_ready_when_worker_is_dropped_early() {
- let _t = support::trace_init();
-
- let (service, _handle) = mock::pair::<(), ()>();
-
- let (service, worker) = Buffer::pair(service, 1);
-
- let mut service = mock::Spawn::new(service);
-
- drop(worker);
-
- let err = assert_ready_err!(service.poll_ready());
-
- assert!(err.is::<error::Closed>(), "should be a Closed: {:?}", err);
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn response_future_when_worker_is_dropped_early() {
- let _t = support::trace_init();
-
- let (service, mut handle) = mock::pair::<_, ()>();
-
- let (service, worker) = Buffer::pair(service, 1);
-
- let mut service = mock::Spawn::new(service);
-
- // keep the request in the worker
- handle.allow(0);
- assert_ready_ok!(service.poll_ready());
- let mut response = task::spawn(service.call("hello"));
-
- drop(worker);
-
- let_worker_work();
- let err = assert_ready_err!(response.poll());
- assert!(err.is::<error::Closed>(), "should be a Closed: {:?}", err);
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn waits_for_channel_capacity() {
- let _t = support::trace_init();
-
- let (service, mut handle) = mock::pair::<&'static str, &'static str>();
-
- let (service, worker) = Buffer::pair(service, 2);
-
- let mut service = mock::Spawn::new(service);
- let mut worker = task::spawn(worker);
-
- // keep requests in the worker
- handle.allow(0);
- assert_ready_ok!(service.poll_ready());
- let mut response1 = task::spawn(service.call("hello"));
- assert_pending!(worker.poll());
-
- assert_ready_ok!(service.poll_ready());
- let mut response2 = task::spawn(service.call("hello"));
- assert_pending!(worker.poll());
-
- assert_ready_ok!(service.poll_ready());
- let mut response3 = task::spawn(service.call("hello"));
- assert_pending!(service.poll_ready());
- assert_pending!(worker.poll());
-
- handle.allow(1);
- assert_pending!(worker.poll());
-
- handle
- .next_request()
- .await
- .unwrap()
- .1
- .send_response("world");
- assert_pending!(worker.poll());
- assert_ready_ok!(response1.poll());
-
- assert_ready_ok!(service.poll_ready());
- let mut response4 = task::spawn(service.call("hello"));
- assert_pending!(worker.poll());
-
- handle.allow(3);
- assert_pending!(worker.poll());
-
- handle
- .next_request()
- .await
- .unwrap()
- .1
- .send_response("world");
- assert_pending!(worker.poll());
- assert_ready_ok!(response2.poll());
-
- assert_pending!(worker.poll());
- handle
- .next_request()
- .await
- .unwrap()
- .1
- .send_response("world");
- assert_pending!(worker.poll());
- assert_ready_ok!(response3.poll());
-
- assert_pending!(worker.poll());
- handle
- .next_request()
- .await
- .unwrap()
- .1
- .send_response("world");
- assert_pending!(worker.poll());
- assert_ready_ok!(response4.poll());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn wakes_pending_waiters_on_close() {
- let _t = support::trace_init();
-
- let (service, mut handle) = mock::pair::<_, ()>();
-
- let (mut service, worker) = Buffer::pair(service, 1);
- let mut worker = task::spawn(worker);
-
- // keep the request in the worker
- handle.allow(0);
- let service1 = service.ready().await.unwrap();
- assert_pending!(worker.poll());
- let mut response = task::spawn(service1.call("hello"));
-
- assert!(worker.is_woken(), "worker task should be woken by request");
- assert_pending!(worker.poll());
-
- // fill the channel so all subsequent requests will wait for capacity
- let service1 = assert_ready_ok!(task::spawn(service.ready()).poll());
- assert_pending!(worker.poll());
- let mut response2 = task::spawn(service1.call("world"));
-
- let mut service1 = service.clone();
- let mut ready1 = task::spawn(service1.ready());
- assert_pending!(worker.poll());
- assert_pending!(ready1.poll(), "no capacity");
-
- let mut service1 = service.clone();
- let mut ready2 = task::spawn(service1.ready());
- assert_pending!(worker.poll());
- assert_pending!(ready2.poll(), "no capacity");
-
- // kill the worker task
- drop(worker);
-
- let err = assert_ready_err!(response.poll());
- assert!(
- err.is::<error::Closed>(),
- "response should fail with a Closed, got: {:?}",
- err
- );
-
- let err = assert_ready_err!(response2.poll());
- assert!(
- err.is::<error::Closed>(),
- "response should fail with a Closed, got: {:?}",
- err
- );
-
- assert!(
- ready1.is_woken(),
- "dropping worker should wake ready task 1"
- );
- let err = assert_ready_err!(ready1.poll());
- assert!(
- err.is::<error::Closed>(),
- "ready 1 should fail with a Closed, got: {:?}",
- err
- );
-
- assert!(
- ready2.is_woken(),
- "dropping worker should wake ready task 2"
- );
- let err = assert_ready_err!(ready1.poll());
- assert!(
- err.is::<error::Closed>(),
- "ready 2 should fail with a Closed, got: {:?}",
- err
- );
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn wakes_pending_waiters_on_failure() {
- let _t = support::trace_init();
-
- let (service, mut handle) = mock::pair::<_, ()>();
-
- let (mut service, worker) = Buffer::pair(service, 1);
- let mut worker = task::spawn(worker);
-
- // keep the request in the worker
- handle.allow(0);
- let service1 = service.ready().await.unwrap();
- assert_pending!(worker.poll());
- let mut response = task::spawn(service1.call("hello"));
-
- assert!(worker.is_woken(), "worker task should be woken by request");
- assert_pending!(worker.poll());
-
- // fill the channel so all subsequent requests will wait for capacity
- let service1 = assert_ready_ok!(task::spawn(service.ready()).poll());
- assert_pending!(worker.poll());
- let mut response2 = task::spawn(service1.call("world"));
-
- let mut service1 = service.clone();
- let mut ready1 = task::spawn(service1.ready());
- assert_pending!(worker.poll());
- assert_pending!(ready1.poll(), "no capacity");
-
- let mut service1 = service.clone();
- let mut ready2 = task::spawn(service1.ready());
- assert_pending!(worker.poll());
- assert_pending!(ready2.poll(), "no capacity");
-
- // fail the inner service
- handle.send_error("foobar");
- // worker task terminates
- assert_ready!(worker.poll());
-
- let err = assert_ready_err!(response.poll());
- assert!(
- err.is::<error::ServiceError>(),
- "response should fail with a ServiceError, got: {:?}",
- err
- );
- let err = assert_ready_err!(response2.poll());
- assert!(
- err.is::<error::ServiceError>(),
- "response should fail with a ServiceError, got: {:?}",
- err
- );
-
- assert!(
- ready1.is_woken(),
- "dropping worker should wake ready task 1"
- );
- let err = assert_ready_err!(ready1.poll());
- assert!(
- err.is::<error::ServiceError>(),
- "ready 1 should fail with a ServiceError, got: {:?}",
- err
- );
-
- assert!(
- ready2.is_woken(),
- "dropping worker should wake ready task 2"
- );
- let err = assert_ready_err!(ready1.poll());
- assert!(
- err.is::<error::ServiceError>(),
- "ready 2 should fail with a ServiceError, got: {:?}",
- err
- );
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn propagates_trace_spans() {
- use tower::util::ServiceExt;
- use tracing::Instrument;
-
- let _t = support::trace_init();
-
- let span = tracing::info_span!("my_span");
-
- let service = support::AssertSpanSvc::new(span.clone());
- let (service, worker) = Buffer::pair(service, 5);
- let worker = tokio::spawn(worker);
-
- let result = tokio::spawn(service.oneshot(()).instrument(span));
-
- result.await.expect("service panicked").expect("failed");
- worker.await.expect("worker panicked");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn doesnt_leak_permits() {
- let _t = support::trace_init();
-
- let (service, mut handle) = mock::pair::<_, ()>();
-
- let (mut service1, worker) = Buffer::pair(service, 2);
- let mut worker = task::spawn(worker);
- let mut service2 = service1.clone();
- let mut service3 = service1.clone();
-
- // Attempt to poll the first clone of the buffer to readiness multiple
- // times. These should all succeed, because the readiness is never
- // *consumed* --- no request is sent.
- assert_ready_ok!(task::spawn(service1.ready()).poll());
- assert_ready_ok!(task::spawn(service1.ready()).poll());
- assert_ready_ok!(task::spawn(service1.ready()).poll());
-
- // It should also be possible to drive the second clone of the service to
- // readiness --- it should only acquire one permit, as well.
- assert_ready_ok!(task::spawn(service2.ready()).poll());
- assert_ready_ok!(task::spawn(service2.ready()).poll());
- assert_ready_ok!(task::spawn(service2.ready()).poll());
-
- // The third clone *doesn't* poll ready, because the first two clones have
- // each acquired one permit.
- let mut ready3 = task::spawn(service3.ready());
- assert_pending!(ready3.poll());
-
- // Consume the first service's readiness.
- let mut response = task::spawn(service1.call(()));
- handle.allow(1);
- assert_pending!(worker.poll());
-
- handle.next_request().await.unwrap().1.send_response(());
- assert_pending!(worker.poll());
- assert_ready_ok!(response.poll());
-
- // Now, the third service should acquire a permit...
- assert!(ready3.is_woken());
- assert_ready_ok!(ready3.poll());
-}
-
-type Handle = mock::Handle<&'static str, &'static str>;
-type MockBuffer = Buffer<&'static str, mock::future::ResponseFuture<&'static str>>;
-
-fn new_service() -> (mock::Spawn<MockBuffer>, Handle) {
- // bound is >0 here because clears_canceled_requests needs multiple outstanding requests
- new_service_with_bound(10)
-}
-
-fn new_service_with_bound(bound: usize) -> (mock::Spawn<MockBuffer>, Handle) {
- mock::spawn_with(|s| {
- let (svc, worker) = Buffer::pair(s, bound);
-
- thread::spawn(move || {
- let mut fut = tokio_test::task::spawn(worker);
- while fut.poll().is_pending() {}
- });
-
- svc
- })
-}
diff --git a/vendor/tower/tests/builder.rs b/vendor/tower/tests/builder.rs
deleted file mode 100644
index 574766b8..00000000
--- a/vendor/tower/tests/builder.rs
+++ /dev/null
@@ -1,55 +0,0 @@
-#![cfg(all(feature = "buffer", feature = "limit", feature = "retry"))]
-mod support;
-use futures_util::{future::Ready, pin_mut};
-use std::time::Duration;
-use tower::builder::ServiceBuilder;
-use tower::retry::Policy;
-use tower::util::ServiceExt;
-use tower_service::*;
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn builder_service() {
- let _t = support::trace_init();
-
- let (service, handle) = mock::pair();
- pin_mut!(handle);
-
- let policy = MockPolicy::<&'static str, bool>::default();
- let mut client = ServiceBuilder::new()
- .buffer(5)
- .concurrency_limit(5)
- .rate_limit(5, Duration::from_secs(5))
- .retry(policy)
- .map_response(|r: &'static str| r == "world")
- .map_request(|r: &'static str| r == "hello")
- .service(service);
-
- // allow a request through
- handle.allow(1);
-
- let fut = client.ready().await.unwrap().call("hello");
- assert_request_eq!(handle, true).send_response("world");
- assert!(fut.await.unwrap());
-}
-
-#[derive(Debug, Clone, Default)]
-struct MockPolicy<Req, Res> {
- _pd: std::marker::PhantomData<(Req, Res)>,
-}
-
-impl<Req, Res, E> Policy<Req, Res, E> for MockPolicy<Req, Res>
-where
- Req: Clone,
- E: Into<Box<dyn std::error::Error + Send + Sync + 'static>>,
-{
- type Future = Ready<()>;
-
- fn retry(&mut self, _req: &mut Req, _result: &mut Result<Res, E>) -> Option<Self::Future> {
- None
- }
-
- fn clone_request(&mut self, req: &Req) -> Option<Req> {
- Some(req.clone())
- }
-}
diff --git a/vendor/tower/tests/filter/async_filter.rs b/vendor/tower/tests/filter/async_filter.rs
deleted file mode 100644
index 4a0a5a91..00000000
--- a/vendor/tower/tests/filter/async_filter.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-#![cfg(feature = "filter")]
-#[path = "../support.rs"]
-mod support;
-use futures_util::{future::poll_fn, pin_mut};
-use std::future::Future;
-use tower::filter::{error::Error, AsyncFilter};
-use tower_service::Service;
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn passthrough_sync() {
- let _t = support::trace_init();
-
- let (mut service, handle) = new_service(|_| async { Ok(()) });
-
- let th = tokio::spawn(async move {
- // Receive the requests and respond
- pin_mut!(handle);
- for i in 0..10usize {
- assert_request_eq!(handle, format!("ping-{}", i)).send_response(format!("pong-{}", i));
- }
- });
-
- let mut responses = vec![];
-
- for i in 0usize..10 {
- let request = format!("ping-{}", i);
- poll_fn(|cx| service.poll_ready(cx)).await.unwrap();
- let exchange = service.call(request);
- let exchange = async move {
- let response = exchange.await.unwrap();
- let expect = format!("pong-{}", i);
- assert_eq!(response.as_str(), expect.as_str());
- };
-
- responses.push(exchange);
- }
-
- futures_util::future::join_all(responses).await;
- th.await.unwrap();
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn rejected_sync() {
- let _t = support::trace_init();
-
- let (mut service, _handle) = new_service(|_| async { Err(Error::rejected()) });
-
- service.call("hello".into()).await.unwrap_err();
-}
-
-type Mock = mock::Mock<String, String>;
-type Handle = mock::Handle<String, String>;
-
-fn new_service<F, U>(f: F) -> (AsyncFilter<Mock, F>, Handle)
-where
- F: Fn(&String) -> U,
- U: Future<Output = Result<(), Error>>,
-{
- let (service, handle) = mock::pair();
- let service = AsyncFilter::new(service, f);
- (service, handle)
-}
diff --git a/vendor/tower/tests/hedge/main.rs b/vendor/tower/tests/hedge/main.rs
deleted file mode 100644
index d4082ca2..00000000
--- a/vendor/tower/tests/hedge/main.rs
+++ /dev/null
@@ -1,184 +0,0 @@
-#![cfg(feature = "hedge")]
-#[path = "../support.rs"]
-mod support;
-
-use std::time::Duration;
-use tokio::time;
-use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
-use tower::hedge::{Hedge, Policy};
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn hedge_orig_completes_first() {
- let _t = support::trace_init();
- time::pause();
-
- let (mut service, mut handle) = new_service(TestPolicy);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("orig"));
-
- // Check that orig request has been issued.
- let req = assert_request_eq!(handle, "orig");
- // Check fut is not ready.
- assert_pending!(fut.poll());
-
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
- time::advance(Duration::from_millis(11)).await;
- // Check fut is not ready.
- assert_pending!(fut.poll());
- // Check that the hedge has been issued.
- let _hedge_req = assert_request_eq!(handle, "orig");
-
- req.send_response("orig-done");
- // Check that fut gets orig response.
- assert_eq!(assert_ready_ok!(fut.poll()), "orig-done");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn hedge_hedge_completes_first() {
- let _t = support::trace_init();
- time::pause();
-
- let (mut service, mut handle) = new_service(TestPolicy);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("orig"));
-
- // Check that orig request has been issued.
- let _req = assert_request_eq!(handle, "orig");
-
- // Check fut is not ready.
- assert_pending!(fut.poll());
-
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
- time::advance(Duration::from_millis(11)).await;
- // Check fut is not ready.
- assert_pending!(fut.poll());
-
- // Check that the hedge has been issued.
- let hedge_req = assert_request_eq!(handle, "orig");
- hedge_req.send_response("hedge-done");
- // Check that fut gets hedge response.
- assert_eq!(assert_ready_ok!(fut.poll()), "hedge-done");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn completes_before_hedge() {
- let _t = support::trace_init();
- let (mut service, mut handle) = new_service(TestPolicy);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("orig"));
-
- // Check that orig request has been issued.
- let req = assert_request_eq!(handle, "orig");
- // Check fut is not ready.
- assert_pending!(fut.poll());
-
- req.send_response("orig-done");
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
- // Check that fut gets orig response.
- assert_eq!(assert_ready_ok!(fut.poll()), "orig-done");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn request_not_retyable() {
- let _t = support::trace_init();
- time::pause();
-
- let (mut service, mut handle) = new_service(TestPolicy);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call(NOT_RETRYABLE));
-
- // Check that orig request has been issued.
- let req = assert_request_eq!(handle, NOT_RETRYABLE);
- // Check fut is not ready.
- assert_pending!(fut.poll());
-
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
- time::advance(Duration::from_millis(10)).await;
- // Check fut is not ready.
- assert_pending!(fut.poll());
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
-
- req.send_response("orig-done");
- // Check that fut gets orig response.
- assert_eq!(assert_ready_ok!(fut.poll()), "orig-done");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn request_not_clonable() {
- let _t = support::trace_init();
- time::pause();
-
- let (mut service, mut handle) = new_service(TestPolicy);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call(NOT_CLONABLE));
-
- // Check that orig request has been issued.
- let req = assert_request_eq!(handle, NOT_CLONABLE);
- // Check fut is not ready.
- assert_pending!(fut.poll());
-
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
- time::advance(Duration::from_millis(10)).await;
- // Check fut is not ready.
- assert_pending!(fut.poll());
- // Check hedge has not been issued.
- assert_pending!(handle.poll_request());
-
- req.send_response("orig-done");
- // Check that fut gets orig response.
- assert_eq!(assert_ready_ok!(fut.poll()), "orig-done");
-}
-
-type Req = &'static str;
-type Res = &'static str;
-type Mock = tower_test::mock::Mock<Req, Res>;
-type Handle = tower_test::mock::Handle<Req, Res>;
-
-static NOT_RETRYABLE: &str = "NOT_RETRYABLE";
-static NOT_CLONABLE: &str = "NOT_CLONABLE";
-
-#[derive(Clone)]
-struct TestPolicy;
-
-impl tower::hedge::Policy<Req> for TestPolicy {
- fn can_retry(&self, req: &Req) -> bool {
- *req != NOT_RETRYABLE
- }
-
- fn clone_request(&self, req: &Req) -> Option<Req> {
- if *req == NOT_CLONABLE {
- None
- } else {
- Some(req)
- }
- }
-}
-
-fn new_service<P: Policy<Req> + Clone>(policy: P) -> (mock::Spawn<Hedge<Mock, P>>, Handle) {
- let (service, handle) = tower_test::mock::pair();
-
- let mock_latencies: [u64; 10] = [1, 1, 1, 1, 1, 1, 1, 1, 10, 10];
-
- let service = Hedge::new_with_mock_latencies(
- service,
- policy,
- 10,
- 0.9,
- Duration::from_secs(60),
- &mock_latencies,
- );
-
- (mock::Spawn::new(service), handle)
-}
diff --git a/vendor/tower/tests/limit/concurrency.rs b/vendor/tower/tests/limit/concurrency.rs
deleted file mode 100644
index a471c52f..00000000
--- a/vendor/tower/tests/limit/concurrency.rs
+++ /dev/null
@@ -1,217 +0,0 @@
-#[path = "../support.rs"]
-mod support;
-use tokio_test::{assert_pending, assert_ready, assert_ready_ok};
-use tower::limit::concurrency::ConcurrencyLimitLayer;
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn basic_service_limit_functionality_with_poll_ready() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(2);
- let (mut service, mut handle) = mock::spawn_layer(limit);
-
- assert_ready_ok!(service.poll_ready());
- let r1 = service.call("hello 1");
-
- assert_ready_ok!(service.poll_ready());
- let r2 = service.call("hello 2");
-
- assert_pending!(service.poll_ready());
-
- assert!(!service.is_woken());
-
- // The request gets passed through
- assert_request_eq!(handle, "hello 1").send_response("world 1");
-
- // The next request gets passed through
- assert_request_eq!(handle, "hello 2").send_response("world 2");
-
- // There are no more requests
- assert_pending!(handle.poll_request());
-
- assert_eq!(r1.await.unwrap(), "world 1");
-
- assert!(service.is_woken());
-
- // Another request can be sent
- assert_ready_ok!(service.poll_ready());
-
- let r3 = service.call("hello 3");
-
- assert_pending!(service.poll_ready());
-
- assert_eq!(r2.await.unwrap(), "world 2");
-
- // The request gets passed through
- assert_request_eq!(handle, "hello 3").send_response("world 3");
-
- assert_eq!(r3.await.unwrap(), "world 3");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn basic_service_limit_functionality_without_poll_ready() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(2);
- let (mut service, mut handle) = mock::spawn_layer(limit);
-
- assert_ready_ok!(service.poll_ready());
- let r1 = service.call("hello 1");
-
- assert_ready_ok!(service.poll_ready());
- let r2 = service.call("hello 2");
-
- assert_pending!(service.poll_ready());
-
- // The request gets passed through
- assert_request_eq!(handle, "hello 1").send_response("world 1");
-
- assert!(!service.is_woken());
-
- // The next request gets passed through
- assert_request_eq!(handle, "hello 2").send_response("world 2");
-
- assert!(!service.is_woken());
-
- // There are no more requests
- assert_pending!(handle.poll_request());
-
- assert_eq!(r1.await.unwrap(), "world 1");
-
- assert!(service.is_woken());
-
- // One more request can be sent
- assert_ready_ok!(service.poll_ready());
- let r4 = service.call("hello 4");
-
- assert_pending!(service.poll_ready());
-
- assert_eq!(r2.await.unwrap(), "world 2");
- assert!(service.is_woken());
-
- // The request gets passed through
- assert_request_eq!(handle, "hello 4").send_response("world 4");
-
- assert_eq!(r4.await.unwrap(), "world 4");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn request_without_capacity() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(0);
- let (mut service, _) = mock::spawn_layer::<(), (), _>(limit);
-
- assert_pending!(service.poll_ready());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn reserve_capacity_without_sending_request() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(1);
- let (mut s1, mut handle) = mock::spawn_layer(limit);
-
- let mut s2 = s1.clone();
-
- // Reserve capacity in s1
- assert_ready_ok!(s1.poll_ready());
-
- // Service 2 cannot get capacity
- assert_pending!(s2.poll_ready());
-
- // s1 sends the request, then s2 is able to get capacity
- let r1 = s1.call("hello");
-
- assert_request_eq!(handle, "hello").send_response("world");
-
- assert_pending!(s2.poll_ready());
-
- r1.await.unwrap();
-
- assert_ready_ok!(s2.poll_ready());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn service_drop_frees_capacity() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(1);
- let (mut s1, _handle) = mock::spawn_layer::<(), (), _>(limit);
-
- let mut s2 = s1.clone();
-
- // Reserve capacity in s1
- assert_ready_ok!(s1.poll_ready());
-
- // Service 2 cannot get capacity
- assert_pending!(s2.poll_ready());
-
- drop(s1);
-
- assert!(s2.is_woken());
- assert_ready_ok!(s2.poll_ready());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn response_error_releases_capacity() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(1);
- let (mut s1, mut handle) = mock::spawn_layer::<_, (), _>(limit);
-
- let mut s2 = s1.clone();
-
- // Reserve capacity in s1
- assert_ready_ok!(s1.poll_ready());
-
- // s1 sends the request, then s2 is able to get capacity
- let r1 = s1.call("hello");
-
- assert_request_eq!(handle, "hello").send_error("boom");
-
- r1.await.unwrap_err();
-
- assert_ready_ok!(s2.poll_ready());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn response_future_drop_releases_capacity() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(1);
- let (mut s1, _handle) = mock::spawn_layer::<_, (), _>(limit);
-
- let mut s2 = s1.clone();
-
- // Reserve capacity in s1
- assert_ready_ok!(s1.poll_ready());
-
- // s1 sends the request, then s2 is able to get capacity
- let r1 = s1.call("hello");
-
- assert_pending!(s2.poll_ready());
-
- drop(r1);
-
- assert_ready_ok!(s2.poll_ready());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn multi_waiters() {
- let _t = support::trace_init();
- let limit = ConcurrencyLimitLayer::new(1);
- let (mut s1, _handle) = mock::spawn_layer::<(), (), _>(limit);
- let mut s2 = s1.clone();
- let mut s3 = s1.clone();
-
- // Reserve capacity in s1
- assert_ready_ok!(s1.poll_ready());
-
- // s2 and s3 are not ready
- assert_pending!(s2.poll_ready());
- assert_pending!(s3.poll_ready());
-
- drop(s1);
-
- assert!(s2.is_woken());
- assert!(!s3.is_woken());
-
- drop(s2);
-
- assert!(s3.is_woken());
-}
diff --git a/vendor/tower/tests/limit/main.rs b/vendor/tower/tests/limit/main.rs
deleted file mode 100644
index 12744547..00000000
--- a/vendor/tower/tests/limit/main.rs
+++ /dev/null
@@ -1,5 +0,0 @@
-#![cfg(feature = "limit")]
-mod concurrency;
-mod rate;
-#[path = "../support.rs"]
-pub(crate) mod support;
diff --git a/vendor/tower/tests/limit/rate.rs b/vendor/tower/tests/limit/rate.rs
deleted file mode 100644
index 9c49e4ba..00000000
--- a/vendor/tower/tests/limit/rate.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-use super::support;
-use std::time::Duration;
-use tokio::time;
-use tokio_test::{assert_pending, assert_ready, assert_ready_ok};
-use tower::limit::rate::RateLimitLayer;
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn reaching_capacity() {
- let _t = support::trace_init();
- time::pause();
-
- let rate_limit = RateLimitLayer::new(1, Duration::from_millis(100));
- let (mut service, mut handle) = mock::spawn_layer(rate_limit);
-
- assert_ready_ok!(service.poll_ready());
-
- let response = service.call("hello");
-
- assert_request_eq!(handle, "hello").send_response("world");
-
- assert_eq!(response.await.unwrap(), "world");
- assert_pending!(service.poll_ready());
-
- assert_pending!(handle.poll_request());
-
- time::advance(Duration::from_millis(101)).await;
-
- assert_ready_ok!(service.poll_ready());
-
- let response = service.call("two");
-
- assert_request_eq!(handle, "two").send_response("done");
-
- assert_eq!(response.await.unwrap(), "done");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn remaining_gets_reset() {
- // This test checks for the case where the `until` state gets reset
- // but the `rem` does not. This was a bug found `cd7dd12315706fc0860a35646b1eb7b60c50a5c1`.
- //
- // The main premise here is that we can make one request which should initialize the state
- // as ready. Then we can advance the clock to put us beyond the current period. When we make
- // subsequent requests the `rem` for the next window is continued from the previous when
- // it should be totally reset.
- let _t = support::trace_init();
- time::pause();
-
- let rate_limit = RateLimitLayer::new(3, Duration::from_millis(100));
- let (mut service, mut handle) = mock::spawn_layer(rate_limit);
-
- assert_ready_ok!(service.poll_ready());
- let response = service.call("hello");
- assert_request_eq!(handle, "hello").send_response("world");
- assert_eq!(response.await.unwrap(), "world");
-
- time::advance(Duration::from_millis(100)).await;
-
- assert_ready_ok!(service.poll_ready());
- let response = service.call("hello");
- assert_request_eq!(handle, "hello").send_response("world");
- assert_eq!(response.await.unwrap(), "world");
-
- assert_ready_ok!(service.poll_ready());
- let response = service.call("hello");
- assert_request_eq!(handle, "hello").send_response("world");
- assert_eq!(response.await.unwrap(), "world");
-
- assert_ready_ok!(service.poll_ready());
-}
diff --git a/vendor/tower/tests/load_shed/main.rs b/vendor/tower/tests/load_shed/main.rs
deleted file mode 100644
index 98eba860..00000000
--- a/vendor/tower/tests/load_shed/main.rs
+++ /dev/null
@@ -1,39 +0,0 @@
-#![cfg(feature = "load-shed")]
-#[path = "../support.rs"]
-mod support;
-
-use tokio_test::{assert_ready_err, assert_ready_ok, task};
-use tower::load_shed::LoadShedLayer;
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn when_ready() {
- let _t = support::trace_init();
-
- let layer = LoadShedLayer::new();
- let (mut service, mut handle) = mock::spawn_layer(layer);
-
- assert_ready_ok!(service.poll_ready(), "overload always reports ready");
-
- let mut response = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_response("world");
- assert_eq!(assert_ready_ok!(response.poll()), "world");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn when_not_ready() {
- let _t = support::trace_init();
-
- let layer = LoadShedLayer::new();
- let (mut service, mut handle) = mock::spawn_layer::<_, (), _>(layer);
-
- handle.allow(0);
-
- assert_ready_ok!(service.poll_ready(), "overload always reports ready");
-
- let mut fut = task::spawn(service.call("hello"));
-
- let err = assert_ready_err!(fut.poll());
- assert!(err.is::<tower::load_shed::error::Overloaded>());
-}
diff --git a/vendor/tower/tests/ready_cache/main.rs b/vendor/tower/tests/ready_cache/main.rs
deleted file mode 100644
index cdcfcbbf..00000000
--- a/vendor/tower/tests/ready_cache/main.rs
+++ /dev/null
@@ -1,223 +0,0 @@
-#![cfg(feature = "ready-cache")]
-#[path = "../support.rs"]
-mod support;
-
-use std::pin::Pin;
-use tokio_test::{assert_pending, assert_ready, task};
-use tower::ready_cache::{error, ReadyCache};
-use tower_test::mock;
-
-type Req = &'static str;
-type Mock = mock::Mock<Req, Req>;
-
-#[test]
-fn poll_ready_inner_failure() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service0, mut handle0) = mock::pair::<Req, Req>();
- handle0.send_error("doom");
- cache.push(0, service0);
-
- let (service1, mut handle1) = mock::pair::<Req, Req>();
- handle1.allow(1);
- cache.push(1, service1);
-
- let failed = assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap_err();
-
- assert_eq!(failed.0, 0);
- assert_eq!(format!("{}", failed.1), "doom");
-
- assert_eq!(cache.len(), 1);
-}
-
-#[test]
-fn poll_ready_not_ready() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service0, mut handle0) = mock::pair::<Req, Req>();
- handle0.allow(0);
- cache.push(0, service0);
-
- let (service1, mut handle1) = mock::pair::<Req, Req>();
- handle1.allow(0);
- cache.push(1, service1);
-
- assert_pending!(task.enter(|cx, _| cache.poll_pending(cx)));
-
- assert_eq!(cache.ready_len(), 0);
- assert_eq!(cache.pending_len(), 2);
- assert_eq!(cache.len(), 2);
-}
-
-#[test]
-fn poll_ready_promotes_inner() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service0, mut handle0) = mock::pair::<Req, Req>();
- handle0.allow(1);
- cache.push(0, service0);
-
- let (service1, mut handle1) = mock::pair::<Req, Req>();
- handle1.allow(1);
- cache.push(1, service1);
-
- assert_eq!(cache.ready_len(), 0);
- assert_eq!(cache.pending_len(), 2);
- assert_eq!(cache.len(), 2);
-
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-
- assert_eq!(cache.ready_len(), 2);
- assert_eq!(cache.pending_len(), 0);
- assert_eq!(cache.len(), 2);
-}
-
-#[test]
-fn evict_ready_then_error() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service, mut handle) = mock::pair::<Req, Req>();
- handle.allow(0);
- cache.push(0, service);
-
- assert_pending!(task.enter(|cx, _| cache.poll_pending(cx)));
-
- handle.allow(1);
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-
- handle.send_error("doom");
- assert!(cache.evict(&0));
-
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-}
-
-#[test]
-fn evict_pending_then_error() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service, mut handle) = mock::pair::<Req, Req>();
- handle.allow(0);
- cache.push(0, service);
-
- assert_pending!(task.enter(|cx, _| cache.poll_pending(cx)));
-
- handle.send_error("doom");
- assert!(cache.evict(&0));
-
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-}
-
-#[test]
-fn push_then_evict() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service, mut handle) = mock::pair::<Req, Req>();
- handle.allow(0);
- cache.push(0, service);
- handle.send_error("doom");
- assert!(cache.evict(&0));
-
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-}
-
-#[test]
-fn error_after_promote() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service, mut handle) = mock::pair::<Req, Req>();
- handle.allow(0);
- cache.push(0, service);
-
- assert_pending!(task.enter(|cx, _| cache.poll_pending(cx)));
-
- handle.allow(1);
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-
- handle.send_error("doom");
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
-}
-
-#[test]
-fn duplicate_key_by_index() {
- let _t = support::trace_init();
-
- let mut task = task::spawn(());
- let mut cache = ReadyCache::<usize, Mock, Req>::default();
-
- let (service0, mut handle0) = mock::pair::<Req, Req>();
- handle0.allow(1);
- cache.push(0, service0);
-
- let (service1, mut handle1) = mock::pair::<Req, Req>();
- handle1.allow(1);
- // this push should replace the old service (service0)
- cache.push(0, service1);
-
- // this evict should evict service1
- cache.evict(&0);
-
- // poll_pending should complete (there are no remaining pending services)
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
- // but service 0 should not be ready (1 replaced, 1 evicted)
- assert!(!task.enter(|cx, _| cache.check_ready(cx, &0)).unwrap());
-
- let (service2, mut handle2) = mock::pair::<Req, Req>();
- handle2.allow(1);
- // this push should ensure replace the evicted service1
- cache.push(0, service2);
-
- // there should be no more pending
- assert_ready!(task.enter(|cx, _| cache.poll_pending(cx))).unwrap();
- // _and_ service 0 should now be callable
- assert!(task.enter(|cx, _| cache.check_ready(cx, &0)).unwrap());
-}
-
-// Tests https://github.com/tower-rs/tower/issues/415
-#[tokio::test(flavor = "current_thread")]
-async fn cancelation_observed() {
- let mut cache = ReadyCache::default();
- let mut handles = vec![];
-
- // NOTE This test passes at 129 items, but fails at 130 items (if coop
- // scheduling interferes with cancelation).
- for _ in 0..130 {
- let (svc, mut handle) = tower_test::mock::pair::<(), ()>();
- handle.allow(1);
- cache.push("ep0", svc);
- handles.push(handle);
- }
-
- struct Ready(ReadyCache<&'static str, tower_test::mock::Mock<(), ()>, ()>);
- impl Unpin for Ready {}
- impl std::future::Future for Ready {
- type Output = Result<(), error::Failed<&'static str>>;
- fn poll(
- self: Pin<&mut Self>,
- cx: &mut std::task::Context<'_>,
- ) -> std::task::Poll<Self::Output> {
- self.get_mut().0.poll_pending(cx)
- }
- }
- Ready(cache).await.unwrap();
-}
diff --git a/vendor/tower/tests/retry/main.rs b/vendor/tower/tests/retry/main.rs
deleted file mode 100644
index 7ce220b4..00000000
--- a/vendor/tower/tests/retry/main.rs
+++ /dev/null
@@ -1,226 +0,0 @@
-#![cfg(feature = "retry")]
-#[path = "../support.rs"]
-mod support;
-
-use futures_util::future;
-use tokio_test::{assert_pending, assert_ready_err, assert_ready_ok, task};
-use tower::retry::Policy;
-use tower_test::{assert_request_eq, mock};
-
-#[tokio::test(flavor = "current_thread")]
-async fn retry_errors() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service(RetryErrors);
-
- assert_ready_ok!(service.poll_ready());
-
- let mut fut = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_error("retry me");
-
- assert_pending!(fut.poll());
-
- assert_request_eq!(handle, "hello").send_response("world");
-
- assert_eq!(fut.into_inner().await.unwrap(), "world");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn retry_limit() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service(Limit(2));
-
- assert_ready_ok!(service.poll_ready());
-
- let mut fut = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_error("retry 1");
- assert_pending!(fut.poll());
-
- assert_request_eq!(handle, "hello").send_error("retry 2");
- assert_pending!(fut.poll());
-
- assert_request_eq!(handle, "hello").send_error("retry 3");
- assert_eq!(assert_ready_err!(fut.poll()).to_string(), "retry 3");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn retry_error_inspection() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service(UnlessErr("reject"));
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_error("retry 1");
- assert_pending!(fut.poll());
-
- assert_request_eq!(handle, "hello").send_error("reject");
- assert_eq!(assert_ready_err!(fut.poll()).to_string(), "reject");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn retry_cannot_clone_request() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service(CannotClone);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_error("retry 1");
- assert_eq!(assert_ready_err!(fut.poll()).to_string(), "retry 1");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn success_with_cannot_clone() {
- let _t = support::trace_init();
-
- // Even though the request couldn't be cloned, if the first request succeeds,
- // it should succeed overall.
- let (mut service, mut handle) = new_service(CannotClone);
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_response("world");
- assert_ready_ok!(fut.poll(), "world");
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn retry_mutating_policy() {
- let _t = support::trace_init();
-
- let (mut service, mut handle) = new_service(MutatingPolicy { remaining: 2 });
-
- assert_ready_ok!(service.poll_ready());
- let mut fut = task::spawn(service.call("hello"));
-
- assert_request_eq!(handle, "hello").send_response("world");
- assert_pending!(fut.poll());
- // the policy alters the request. in real life, this might be setting a header
- assert_request_eq!(handle, "retrying").send_response("world");
-
- assert_pending!(fut.poll());
-
- assert_request_eq!(handle, "retrying").send_response("world");
-
- assert_ready_err!(fut.poll(), "out of retries");
-}
-
-type Req = &'static str;
-type Res = &'static str;
-type InnerError = &'static str;
-type Error = Box<dyn std::error::Error + Send + Sync>;
-type Mock = mock::Mock<Req, Res>;
-type Handle = mock::Handle<Req, Res>;
-
-#[derive(Clone)]
-struct RetryErrors;
-
-impl Policy<Req, Res, Error> for RetryErrors {
- type Future = future::Ready<()>;
- fn retry(&mut self, _: &mut Req, result: &mut Result<Res, Error>) -> Option<Self::Future> {
- if result.is_err() {
- Some(future::ready(()))
- } else {
- None
- }
- }
-
- fn clone_request(&mut self, req: &Req) -> Option<Req> {
- Some(*req)
- }
-}
-
-#[derive(Clone)]
-struct Limit(usize);
-
-impl Policy<Req, Res, Error> for Limit {
- type Future = future::Ready<()>;
- fn retry(&mut self, _: &mut Req, result: &mut Result<Res, Error>) -> Option<Self::Future> {
- if result.is_err() && self.0 > 0 {
- self.0 -= 1;
- Some(future::ready(()))
- } else {
- None
- }
- }
-
- fn clone_request(&mut self, req: &Req) -> Option<Req> {
- Some(*req)
- }
-}
-
-#[derive(Clone)]
-struct UnlessErr(InnerError);
-
-impl Policy<Req, Res, Error> for UnlessErr {
- type Future = future::Ready<()>;
- fn retry(&mut self, _: &mut Req, result: &mut Result<Res, Error>) -> Option<Self::Future> {
- result.as_ref().err().and_then(|err| {
- if err.to_string() != self.0 {
- Some(future::ready(()))
- } else {
- None
- }
- })
- }
-
- fn clone_request(&mut self, req: &Req) -> Option<Req> {
- Some(*req)
- }
-}
-
-#[derive(Clone)]
-struct CannotClone;
-
-impl Policy<Req, Res, Error> for CannotClone {
- type Future = future::Ready<()>;
- fn retry(&mut self, _: &mut Req, _: &mut Result<Res, Error>) -> Option<Self::Future> {
- unreachable!("retry cannot be called since request isn't cloned");
- }
-
- fn clone_request(&mut self, _req: &Req) -> Option<Req> {
- None
- }
-}
-
-/// Test policy that changes the request to `retrying` during retries and the result to `"out of retries"`
-/// when retries are exhausted.
-#[derive(Clone)]
-struct MutatingPolicy {
- remaining: usize,
-}
-
-impl Policy<Req, Res, Error> for MutatingPolicy
-where
- Error: From<&'static str>,
-{
- type Future = future::Ready<()>;
-
- fn retry(&mut self, req: &mut Req, result: &mut Result<Res, Error>) -> Option<Self::Future> {
- if self.remaining == 0 {
- *result = Err("out of retries".into());
- None
- } else {
- *req = "retrying";
- self.remaining -= 1;
- Some(future::ready(()))
- }
- }
-
- fn clone_request(&mut self, req: &Req) -> Option<Req> {
- Some(*req)
- }
-}
-
-fn new_service<P: Policy<Req, Res, Error> + Clone>(
- policy: P,
-) -> (mock::Spawn<tower::retry::Retry<P, Mock>>, Handle) {
- let retry = tower::retry::RetryLayer::new(policy);
- mock::spawn_layer(retry)
-}
diff --git a/vendor/tower/tests/spawn_ready/main.rs b/vendor/tower/tests/spawn_ready/main.rs
deleted file mode 100644
index d1890159..00000000
--- a/vendor/tower/tests/spawn_ready/main.rs
+++ /dev/null
@@ -1,86 +0,0 @@
-#![cfg(feature = "spawn-ready")]
-#[path = "../support.rs"]
-mod support;
-
-use tokio::time;
-use tokio_test::{assert_pending, assert_ready, assert_ready_err, assert_ready_ok};
-use tower::spawn_ready::{SpawnReady, SpawnReadyLayer};
-use tower::util::ServiceExt;
-use tower_test::mock;
-
-#[tokio::test(flavor = "current_thread")]
-async fn when_inner_is_not_ready() {
- time::pause();
-
- let _t = support::trace_init();
-
- let layer = SpawnReadyLayer::new();
- let (mut service, mut handle) = mock::spawn_layer::<(), (), _>(layer);
-
- // Make the service NotReady
- handle.allow(0);
-
- assert_pending!(service.poll_ready());
-
- // Make the service is Ready
- handle.allow(1);
- time::sleep(time::Duration::from_millis(100)).await;
- assert_ready_ok!(service.poll_ready());
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn when_inner_fails() {
- let _t = support::trace_init();
-
- let layer = SpawnReadyLayer::new();
- let (mut service, mut handle) = mock::spawn_layer::<(), (), _>(layer);
-
- // Make the service NotReady
- handle.allow(0);
- handle.send_error("foobar");
-
- assert_eq!(
- assert_ready_err!(service.poll_ready()).to_string(),
- "foobar"
- );
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn propagates_trace_spans() {
- use tracing::Instrument;
-
- let _t = support::trace_init();
-
- let span = tracing::info_span!("my_span");
-
- let service = support::AssertSpanSvc::new(span.clone());
- let service = SpawnReady::new(service);
- let result = tokio::spawn(service.oneshot(()).instrument(span));
-
- result.await.expect("service panicked").expect("failed");
-}
-
-#[cfg(test)]
-#[tokio::test(flavor = "current_thread")]
-async fn abort_on_drop() {
- let (mock, mut handle) = mock::pair::<(), ()>();
- let mut svc = SpawnReady::new(mock);
- handle.allow(0);
-
- // Drive the service to readiness until we signal a drop.
- let (drop_tx, drop_rx) = tokio::sync::oneshot::channel();
- let mut task = tokio_test::task::spawn(async move {
- tokio::select! {
- _ = drop_rx => {}
- _ = svc.ready() => unreachable!("Service must not become ready"),
- }
- });
- assert_pending!(task.poll());
- assert_pending!(handle.poll_request());
-
- // End the task and ensure that the inner service has been dropped.
- assert!(drop_tx.send(()).is_ok());
- tokio_test::assert_ready!(task.poll());
- tokio::task::yield_now().await;
- assert!(tokio_test::assert_ready!(handle.poll_request()).is_none());
-}
diff --git a/vendor/tower/tests/steer/main.rs b/vendor/tower/tests/steer/main.rs
deleted file mode 100644
index 1ff08d32..00000000
--- a/vendor/tower/tests/steer/main.rs
+++ /dev/null
@@ -1,59 +0,0 @@
-#![cfg(feature = "steer")]
-#[path = "../support.rs"]
-mod support;
-
-use futures_util::future::{ready, Ready};
-use std::task::{Context, Poll};
-use tower::steer::Steer;
-use tower_service::Service;
-
-type StdError = Box<dyn std::error::Error + Send + Sync + 'static>;
-
-struct MyService(u8, bool);
-
-impl Service<String> for MyService {
- type Response = u8;
- type Error = StdError;
- type Future = Ready<Result<u8, Self::Error>>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- if !self.1 {
- Poll::Pending
- } else {
- Poll::Ready(Ok(()))
- }
- }
-
- fn call(&mut self, _req: String) -> Self::Future {
- ready(Ok(self.0))
- }
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn pick_correctly() {
- let _t = support::trace_init();
- let srvs = vec![MyService(42, true), MyService(57, true)];
- let mut st = Steer::new(srvs, |_: &_, _: &[_]| 1);
-
- futures_util::future::poll_fn(|cx| st.poll_ready(cx))
- .await
- .unwrap();
- let r = st.call(String::from("foo")).await.unwrap();
- assert_eq!(r, 57);
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn pending_all_ready() {
- let _t = support::trace_init();
-
- let srvs = vec![MyService(42, true), MyService(57, false)];
- let mut st = Steer::new(srvs, |_: &_, _: &[_]| 0);
-
- let p = futures_util::poll!(futures_util::future::poll_fn(|cx| st.poll_ready(cx)));
- match p {
- Poll::Pending => (),
- _ => panic!(
- "Steer should not return poll_ready if at least one component service is not ready"
- ),
- }
-}
diff --git a/vendor/tower/tests/support.rs b/vendor/tower/tests/support.rs
deleted file mode 100644
index b5470822..00000000
--- a/vendor/tower/tests/support.rs
+++ /dev/null
@@ -1,112 +0,0 @@
-#![allow(dead_code)]
-
-use futures::future;
-use std::fmt;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use tokio::sync::mpsc;
-use tokio_stream::Stream;
-use tower::Service;
-
-pub(crate) fn trace_init() -> tracing::subscriber::DefaultGuard {
- let subscriber = tracing_subscriber::fmt()
- .with_test_writer()
- .with_max_level(tracing::Level::TRACE)
- .with_thread_names(true)
- .finish();
- tracing::subscriber::set_default(subscriber)
-}
-
-pin_project_lite::pin_project! {
- #[derive(Clone, Debug)]
- pub struct IntoStream<S> {
- #[pin]
- inner: S
- }
-}
-
-impl<S> IntoStream<S> {
- pub fn new(inner: S) -> Self {
- Self { inner }
- }
-}
-
-impl<I> Stream for IntoStream<mpsc::Receiver<I>> {
- type Item = I;
-
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- self.project().inner.poll_recv(cx)
- }
-}
-
-impl<I> Stream for IntoStream<mpsc::UnboundedReceiver<I>> {
- type Item = I;
-
- fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
- self.project().inner.poll_recv(cx)
- }
-}
-
-#[derive(Clone, Debug)]
-pub struct AssertSpanSvc {
- span: tracing::Span,
- polled: bool,
-}
-
-pub struct AssertSpanError(String);
-
-impl fmt::Debug for AssertSpanError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&self.0, f)
- }
-}
-
-impl fmt::Display for AssertSpanError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&self.0, f)
- }
-}
-
-impl std::error::Error for AssertSpanError {}
-
-impl AssertSpanSvc {
- pub fn new(span: tracing::Span) -> Self {
- Self {
- span,
- polled: false,
- }
- }
-
- fn check(&self, func: &str) -> Result<(), AssertSpanError> {
- let current_span = tracing::Span::current();
- tracing::debug!(?current_span, ?self.span, %func);
- if current_span == self.span {
- return Ok(());
- }
-
- Err(AssertSpanError(format!(
- "{} called outside expected span\n expected: {:?}\n current: {:?}",
- func, self.span, current_span
- )))
- }
-}
-
-impl Service<()> for AssertSpanSvc {
- type Response = ();
- type Error = AssertSpanError;
- type Future = future::Ready<Result<Self::Response, Self::Error>>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- if self.polled {
- return Poll::Ready(self.check("poll_ready"));
- }
-
- cx.waker().wake_by_ref();
- self.polled = true;
- Poll::Pending
- }
-
- fn call(&mut self, _: ()) -> Self::Future {
- future::ready(self.check("call"))
- }
-}
diff --git a/vendor/tower/tests/util/call_all.rs b/vendor/tower/tests/util/call_all.rs
deleted file mode 100644
index 6c3b1950..00000000
--- a/vendor/tower/tests/util/call_all.rs
+++ /dev/null
@@ -1,253 +0,0 @@
-use super::support;
-use futures_core::Stream;
-use futures_util::{
- future::{ready, Ready},
- pin_mut,
-};
-use std::fmt;
-use std::future::Future;
-use std::task::{Context, Poll};
-use std::{cell::Cell, rc::Rc};
-use tokio_test::{assert_pending, assert_ready, task};
-use tower::util::ServiceExt;
-use tower_service::*;
-use tower_test::{assert_request_eq, mock, mock::Mock};
-
-type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
-
-#[derive(Debug, Eq, PartialEq)]
-struct Srv {
- admit: Rc<Cell<bool>>,
- count: Rc<Cell<usize>>,
-}
-impl Service<&'static str> for Srv {
- type Response = &'static str;
- type Error = Error;
- type Future = Ready<Result<Self::Response, Error>>;
-
- fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- if !self.admit.get() {
- return Poll::Pending;
- }
-
- self.admit.set(false);
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: &'static str) -> Self::Future {
- self.count.set(self.count.get() + 1);
- ready(Ok(req))
- }
-}
-
-#[test]
-fn ordered() {
- let _t = support::trace_init();
-
- let mut mock = task::spawn(());
-
- let admit = Rc::new(Cell::new(false));
- let count = Rc::new(Cell::new(0));
- let srv = Srv {
- count: count.clone(),
- admit: admit.clone(),
- };
- let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
- let ca = srv.call_all(support::IntoStream::new(rx));
- pin_mut!(ca);
-
- assert_pending!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)));
- tx.send("one").unwrap();
- mock.is_woken();
- assert_pending!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)));
- admit.set(true);
- let v = assert_ready!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert_eq!(v, Some("one"));
- assert_pending!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)));
- admit.set(true);
- tx.send("two").unwrap();
- mock.is_woken();
- tx.send("three").unwrap();
- let v = assert_ready!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert_eq!(v, Some("two"));
- assert_pending!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)));
- admit.set(true);
- let v = assert_ready!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert_eq!(v, Some("three"));
- admit.set(true);
- assert_pending!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)));
- admit.set(true);
- tx.send("four").unwrap();
- mock.is_woken();
- let v = assert_ready!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert_eq!(v, Some("four"));
- assert_pending!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)));
-
- // need to be ready since impl doesn't know it'll get EOF
- admit.set(true);
-
- // When we drop the request stream, CallAll should return None.
- drop(tx);
- mock.is_woken();
- let v = assert_ready!(mock.enter(|cx, _| ca.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert!(v.is_none());
- assert_eq!(count.get(), 4);
-
- // We should also be able to recover the wrapped Service.
- assert_eq!(ca.take_service(), Srv { count, admit });
-}
-
-#[tokio::test(flavor = "current_thread")]
-async fn unordered() {
- let _t = support::trace_init();
-
- let (mock, handle) = mock::pair::<_, &'static str>();
- pin_mut!(handle);
-
- let mut task = task::spawn(());
- let requests = futures_util::stream::iter(&["one", "two"]);
-
- let svc = mock.call_all(requests).unordered();
- pin_mut!(svc);
-
- assert_pending!(task.enter(|cx, _| svc.as_mut().poll_next(cx)));
-
- let resp1 = assert_request_eq!(handle, &"one");
- let resp2 = assert_request_eq!(handle, &"two");
-
- resp2.send_response("resp 1");
-
- let v = assert_ready!(task.enter(|cx, _| svc.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert_eq!(v, Some("resp 1"));
- assert_pending!(task.enter(|cx, _| svc.as_mut().poll_next(cx)));
-
- resp1.send_response("resp 2");
-
- let v = assert_ready!(task.enter(|cx, _| svc.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert_eq!(v, Some("resp 2"));
-
- let v = assert_ready!(task.enter(|cx, _| svc.as_mut().poll_next(cx)))
- .transpose()
- .unwrap();
- assert!(v.is_none());
-}
-
-#[tokio::test]
-async fn pending() {
- let _t = support::trace_init();
-
- let (mock, mut handle) = mock::pair::<_, &'static str>();
-
- let mut task = task::spawn(());
-
- let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
- let ca = mock.call_all(support::IntoStream::new(rx));
- pin_mut!(ca);
-
- assert_pending!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
- tx.send("req").unwrap();
- assert_pending!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
- assert_request_eq!(handle, "req").send_response("res");
- let res = assert_ready!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
- assert_eq!(res.transpose().unwrap(), Some("res"));
- assert_pending!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
-}
-
-#[tokio::test]
-async fn poll_ready_error() {
- struct ReadyOnceThenErr {
- polled: bool,
- inner: Mock<&'static str, &'static str>,
- }
-
- #[derive(Debug)]
- pub struct StringErr(String);
-
- impl fmt::Display for StringErr {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&self.0, f)
- }
- }
-
- impl std::error::Error for StringErr {}
-
- impl Service<&'static str> for ReadyOnceThenErr {
- type Response = &'static str;
- type Error = Error;
- type Future = <Mock<&'static str, &'static str> as Service<&'static str>>::Future;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- match self.polled {
- false => {
- self.polled = true;
- self.inner.poll_ready(cx)
- }
- true => Poll::Ready(Err(Box::new(StringErr("poll_ready error".to_string())))),
- }
- }
-
- fn call(&mut self, req: &'static str) -> Self::Future {
- self.inner.call(req)
- }
- }
-
- let _t = support::trace_init();
-
- let (mock, mut handle) = mock::pair::<_, &'static str>();
- let svc = ReadyOnceThenErr {
- polled: false,
- inner: mock,
- };
- let mut task = task::spawn(());
-
- // "req0" is called, then "req1" receives a poll_ready error so "req2" will never be called.
- // Still the response from "req0" is waited on before ending the `call_all` stream.
- let requests = futures_util::stream::iter(vec!["req0", "req1", "req2"]);
- let ca = svc.call_all(requests);
- pin_mut!(ca);
- let err = assert_ready!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
- assert_eq!(err.unwrap().unwrap_err().to_string(), "poll_ready error");
- assert_request_eq!(handle, "req0").send_response("res0");
- let res = assert_ready!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
- assert_eq!(res.transpose().unwrap(), Some("res0"));
- let res = assert_ready!(task.enter(|cx, _| ca.as_mut().poll_next(cx)));
- assert_eq!(res.transpose().unwrap(), None);
-}
-
-#[tokio::test]
-async fn stream_does_not_block_service() {
- use tower::buffer::Buffer;
- use tower::limit::ConcurrencyLimit;
-
- let _t = support::trace_init();
- let (mock, mut handle) = mock::pair::<_, &'static str>();
- let mut task = task::spawn(());
-
- let svc = Buffer::new(ConcurrencyLimit::new(mock, 1), 1);
-
- // Always pending, but should not occupy a concurrency slot.
- let pending = svc.clone().call_all(futures_util::stream::pending());
- pin_mut!(pending);
- assert_pending!(task.enter(|cx, _| pending.as_mut().poll_next(cx)));
-
- let call = svc.oneshot("req");
- pin_mut!(call);
- assert_pending!(task.enter(|cx, _| call.as_mut().poll(cx)));
- assert_request_eq!(handle, "req").send_response("res");
- let res = assert_ready!(task.enter(|cx, _| call.as_mut().poll(cx)));
- assert_eq!(res.unwrap(), "res");
-}
diff --git a/vendor/tower/tests/util/main.rs b/vendor/tower/tests/util/main.rs
deleted file mode 100644
index 18b7813f..00000000
--- a/vendor/tower/tests/util/main.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-#![cfg(feature = "util")]
-#![allow(clippy::type_complexity)]
-
-mod call_all;
-mod oneshot;
-mod service_fn;
-#[path = "../support.rs"]
-pub(crate) mod support;
diff --git a/vendor/tower/tests/util/oneshot.rs b/vendor/tower/tests/util/oneshot.rs
deleted file mode 100644
index 54120e72..00000000
--- a/vendor/tower/tests/util/oneshot.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-use std::task::{Context, Poll};
-use std::{future::Future, pin::Pin};
-use tower::util::ServiceExt;
-use tower_service::Service;
-
-#[tokio::test(flavor = "current_thread")]
-async fn service_driven_to_readiness() {
- // This test ensures that `oneshot` will repeatedly call `poll_ready` until
- // the service is ready.
- let _t = super::support::trace_init();
-
- struct PollMeTwice {
- ready: bool,
- }
- impl Service<()> for PollMeTwice {
- type Error = ();
- type Response = ();
- type Future = Pin<
- Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + Sync + 'static>,
- >;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), ()>> {
- if self.ready {
- Poll::Ready(Ok(()))
- } else {
- self.ready = true;
- cx.waker().wake_by_ref();
- Poll::Pending
- }
- }
-
- fn call(&mut self, _: ()) -> Self::Future {
- assert!(self.ready, "service not driven to readiness!");
- Box::pin(async { Ok(()) })
- }
- }
-
- let svc = PollMeTwice { ready: false };
- svc.oneshot(()).await.unwrap();
-}
diff --git a/vendor/tower/tests/util/service_fn.rs b/vendor/tower/tests/util/service_fn.rs
deleted file mode 100644
index ac6bf06f..00000000
--- a/vendor/tower/tests/util/service_fn.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-use futures_util::future::ready;
-use tower::util::service_fn;
-use tower_service::Service;
-
-#[tokio::test(flavor = "current_thread")]
-async fn simple() {
- let _t = super::support::trace_init();
-
- let mut add_one = service_fn(|req| ready(Ok::<_, ()>(req + 1)));
- let answer = add_one.call(1).await.unwrap();
- assert_eq!(answer, 2);
-}