summaryrefslogtreecommitdiff
path: root/vendor/hyper-util
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/hyper-util')
-rw-r--r--vendor/hyper-util/.cargo-checksum.json1
-rw-r--r--vendor/hyper-util/CHANGELOG.md116
-rw-r--r--vendor/hyper-util/Cargo.lock892
-rw-r--r--vendor/hyper-util/Cargo.toml247
-rw-r--r--vendor/hyper-util/LICENSE19
-rw-r--r--vendor/hyper-util/README.md11
-rw-r--r--vendor/hyper-util/examples/client.rs37
-rw-r--r--vendor/hyper-util/examples/server.rs75
-rw-r--r--vendor/hyper-util/examples/server_graceful.rs64
-rw-r--r--vendor/hyper-util/src/client/client.rs132
-rw-r--r--vendor/hyper-util/src/client/legacy/client.rs1690
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/capture.rs187
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/dns.rs360
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/http.rs1468
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/mod.rs444
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/mod.rs6
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/mod.rs121
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/errors.rs22
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/messages.rs131
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/mod.rs183
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/errors.rs47
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/messages.rs347
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/mod.rs313
-rw-r--r--vendor/hyper-util/src/client/legacy/connect/proxy/tunnel.rs258
-rw-r--r--vendor/hyper-util/src/client/legacy/mod.rs10
-rw-r--r--vendor/hyper-util/src/client/legacy/pool.rs1093
-rw-r--r--vendor/hyper-util/src/client/mod.rs8
-rw-r--r--vendor/hyper-util/src/client/proxy/matcher.rs848
-rw-r--r--vendor/hyper-util/src/client/proxy/mod.rs3
-rw-r--r--vendor/hyper-util/src/client/service.rs8
-rw-r--r--vendor/hyper-util/src/common/exec.rs53
-rw-r--r--vendor/hyper-util/src/common/future.rs30
-rw-r--r--vendor/hyper-util/src/common/lazy.rs78
-rw-r--r--vendor/hyper-util/src/common/mod.rs19
-rw-r--r--vendor/hyper-util/src/common/rewind.rs137
-rw-r--r--vendor/hyper-util/src/common/sync.rs67
-rw-r--r--vendor/hyper-util/src/common/timer.rs38
-rw-r--r--vendor/hyper-util/src/error.rs14
-rw-r--r--vendor/hyper-util/src/lib.rs18
-rw-r--r--vendor/hyper-util/src/rt/io.rs34
-rw-r--r--vendor/hyper-util/src/rt/mod.rs12
-rw-r--r--vendor/hyper-util/src/rt/tokio.rs339
-rw-r--r--vendor/hyper-util/src/rt/tokio/with_hyper_io.rs170
-rw-r--r--vendor/hyper-util/src/rt/tokio/with_tokio_io.rs178
-rw-r--r--vendor/hyper-util/src/server/conn/auto/mod.rs1304
-rw-r--r--vendor/hyper-util/src/server/conn/auto/upgrade.rs68
-rw-r--r--vendor/hyper-util/src/server/conn/mod.rs4
-rw-r--r--vendor/hyper-util/src/server/graceful.rs488
-rw-r--r--vendor/hyper-util/src/server/mod.rs6
-rw-r--r--vendor/hyper-util/src/service/glue.rs72
-rw-r--r--vendor/hyper-util/src/service/mod.rs32
-rw-r--r--vendor/hyper-util/src/service/oneshot.rs63
-rw-r--r--vendor/hyper-util/tests/legacy_client.rs1488
-rw-r--r--vendor/hyper-util/tests/proxy.rs478
-rw-r--r--vendor/hyper-util/tests/test_utils/mod.rs175
55 files changed, 0 insertions, 14506 deletions
diff --git a/vendor/hyper-util/.cargo-checksum.json b/vendor/hyper-util/.cargo-checksum.json
deleted file mode 100644
index 419986c6..00000000
--- a/vendor/hyper-util/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{"CHANGELOG.md":"3d94063e7ba72aab4cba9db55938042b5d3e88d37d9bf00157096c914399b5c1","Cargo.lock":"fff7d1ee9c58689a12f56b324703b348e8e41e0b3c8fd82a0d1891382bd28877","Cargo.toml":"9e2b97be50fcb6347a6442cc5abebc2fd382138fd01fad80b3e3aa8a99b4cc33","LICENSE":"9e0a97848ea543aef745c98e84fde696a9a3e0735538f6daefdd3cb1942effc1","README.md":"b47cc5fcb41e5d802bd175f0014f5636eefb897419720ab125b446e0b7f6f666","examples/client.rs":"8153fa0d74b3653f9cee113e2d42448febe20562f9bccc7a32b2fc151fc47dd0","examples/server.rs":"2e04d3f86a76810bb170d8c1633b44b4c5989822b17dbed0f3b63c221cc23f1a","examples/server_graceful.rs":"e4d65a46c7d2f44266b5c540596da1e753eeb74c580c9265296352d6e8bde83c","src/client/client.rs":"fc61cc85756232e33addf964ed95ad4f221fefa00be1f1a341a93a2a90af041b","src/client/legacy/client.rs":"3b7bf77b0a181e5cc437313f9b85a23467efffc0409ad8c99d16b63401bffc40","src/client/legacy/connect/capture.rs":"d14b19b2a944a7ef33e637f4494a7e2449e2cdc1200a977841c73b85c6b2e59f","src/client/legacy/connect/dns.rs":"611f8808b28c532325f43906da11cf6b9e3116a39de360c2defa9dd587e9c917","src/client/legacy/connect/http.rs":"fbab36a1be0cddabeec521ebbe91b400fa3a5efd5849bcb59d0204e25c29eea0","src/client/legacy/connect/mod.rs":"4ac3ed01b01b309f1e0aba913b8c5ca354ac84e576c9ee0f5ae5579a45fb0503","src/client/legacy/connect/proxy/mod.rs":"2dc7d98c83f3303d9f275fe3517a251ed7b0bc78ff101ab8f641158ee6c98abf","src/client/legacy/connect/proxy/socks/mod.rs":"6bdbb4c61f9abe36149456c17988e12e3d5cb801f3bb05c2ee0e8de2ebe7788b","src/client/legacy/connect/proxy/socks/v4/errors.rs":"ae6df7f24ea4c81aaa955255ed61885f1fd5ffde597a050f6b08218f7c5ee134","src/client/legacy/connect/proxy/socks/v4/messages.rs":"b51d7fbd219621c517532533181bd0a6fc7db208fb6186ed8b6600f84919c755","src/client/legacy/connect/proxy/socks/v4/mod.rs":"06054448509d65a246f20db938fba19bb522a82fb89f9cc5193e7a2e37485c50","src/client/legacy/connect/proxy/socks/v5/errors.rs":"e28013c08b05fc5743d9a475d4ab89bd563d22f0b2009b504f19b4974fde8880","src/client/legacy/connect/proxy/socks/v5/messages.rs":"9f25709c2b40dff0fc8cd05991a4ae8dea1f1f2df23e33bce77605e013f5537c","src/client/legacy/connect/proxy/socks/v5/mod.rs":"1c2d8094435b653e20967c9c365cd39b038c1890b6696addb8bc071602c5f706","src/client/legacy/connect/proxy/tunnel.rs":"873bc32f7dfe98749f2bbb0322276fc0e0f3672efd9720681f5435d503e191aa","src/client/legacy/mod.rs":"a893df119a025ce5e8226868f76130b50b15dd0a4c8ae007556b14f29cd3520a","src/client/legacy/pool.rs":"bcfaa802f8845d96cc7e53039a7e5fea68f30fdb8d87e637665bba42df29582d","src/client/mod.rs":"6e67909d22cb1aca076ae1705017d904c13b6e99f8f589489a00ef80be8e2f11","src/client/proxy/matcher.rs":"7c4ec6270389b28bc3b786a16d4e3d96a725b1982281a04abc77d383603c6e10","src/client/proxy/mod.rs":"1a6b7fdc17aa57aa356a5b8cd44c8a8401aa0a87258334ff8da62444a1b0885a","src/client/service.rs":"506c31d99d94f2ca26e31c2e5546e1d3cae040ed966bc51581fc53f3f7e2c1e6","src/common/exec.rs":"74f899c6a3972fcbecde525c1601ba16945db863e4451660cc6da6008c5d3738","src/common/future.rs":"88109d6f40f1a6ac6783c3cb09497d86356eb6f23af808e7109e8b6831c22cc0","src/common/lazy.rs":"fe7cfc2b88a15f7ac4ee2459f525857d8324d250c51ce6a3047c3fc4ef559bab","src/common/mod.rs":"3158f594f254ea730f4c6e5e87d1443f9e0e00e162083ebb9fea557024b7e826","src/common/rewind.rs":"333cfde1eefa3419de76ff65367d30a48e03560cc18f4b5770d883b98e7ac864","src/common/sync.rs":"047d79590bbbf59aae35664b1b38f4d0e6478aa10116ab33a9f231e59b3fa32d","src/common/timer.rs":"7dd2aa8256e5588248fdfa9965152c4ed944dbdebff5cf377fd09b579229cfb5","src/error.rs":"c10d50e4fd57184f9c0529c0f42852845d24455c3563ef445becbec67df67d79","src/lib.rs":"02fc3365227e2123bc8bbbd9d4f4e11f669f5e2e5ab8b0f5f9ea8b4a78612e13","src/rt/io.rs":"cf1780e8611b29977596423ac97eb77165ecfc66a24229f9ab659cde88c5431d","src/rt/mod.rs":"484f478be32e25714fa7a24d12626e5e828ac36960a54a7ec1650948e1088159","src/rt/tokio.rs":"f2e59972a29c5acafbcee7b15590db42db5699dc581daf30a55805861b835568","src/rt/tokio/with_hyper_io.rs":"67b91e27248354ae138507a78dd62bbbe845ef16bcf6f59e00923c5877541f7e","src/rt/tokio/with_tokio_io.rs":"70f30a1475ed64047800da86b4e24a4a74e340be5046bc043f2b0b1ac8000fa5","src/server/conn/auto/mod.rs":"2bbaa7cb863ba61b5cef61b68cef4a556fcc6a99ee4dd1e32fa679169e86f969","src/server/conn/auto/upgrade.rs":"293d823c1fb34d4bb505f835b8b26255a7bafc10c56bc9235c3cb151ebb1e72c","src/server/conn/mod.rs":"f192ca3fb030351ad22dba11f58edf02503598e3f9c18652a445f3973d16b723","src/server/graceful.rs":"651ff44015798b7b837d27e11f8c48f5ec4878463caa989e2e49ef0bb36a3db5","src/server/mod.rs":"c30d25551b75ec6c2915ba9da0a5638488c2760a27c589c02415dec1441fb746","src/service/glue.rs":"dffde87ea652504e6260c52b7ddf28ba4c3df84e82acaf2ddcbd562abeb799f6","src/service/mod.rs":"e70a66aa974af3500cd0ab1e68397079f8b271af69276b6df90216df18420e0c","src/service/oneshot.rs":"38bd544920c6c9af5c1eb825d43b601ebaba703c84c6543cb7ac8a4273dac3c4","tests/legacy_client.rs":"ab7e889aad9e8f2d54440d61e57fcfc982b9913718765fb9167b63d7f19a6267","tests/proxy.rs":"a17d630e0a65a876c1718cc564424e7a8071686103dc63f70b681229305b8d76","tests/test_utils/mod.rs":"dd39bb194b214b51b225aa94b746d1168f79c04cde4935ab8ceb157e2f8addfe"},"package":"dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb"} \ No newline at end of file
diff --git a/vendor/hyper-util/CHANGELOG.md b/vendor/hyper-util/CHANGELOG.md
deleted file mode 100644
index 434d234c..00000000
--- a/vendor/hyper-util/CHANGELOG.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# 0.1.14 (2025-06-04)
-
-- Fix `HttpConnector` to defer address family order to resolver sort order.
-- Fix `proxy::Matcher` to find HTTPS system proxies on Windows.
-
-# 0.1.13 (2025-05-27)
-
-- Fix `HttpConnector` to always prefer IPv6 addresses first, if happy eyeballs is enabled.
-- Fix `legacy::Client` to return better errors if available on the connection.
-
-# 0.1.12 (2025-05-19)
-
-- Add `client::legacy::proxy::Tunnel` connector that wraps another connector with HTTP tunneling.
-- Add `client::legacy::proxy::{SocksV4, SocksV5}` connectors that wraps another connector with SOCKS.
-- Add `client::proxy::matcher::Matcher` type that can use environment variables to match proxy rules.
-- Add `server::graceful::Watcher` type that can be sent to watch a connection in another task.
-- Add `GracefulShutdown::count()` method to get number of currently watched connections.
-- Fix missing `must_use` attributes on `Connection` futures.
-- Fix tracing span in GAI resolver that can cause panics.
-
-
-# 0.1.11 (2025-03-31)
-
-- Add `tracing` crate feature with support in `TokioExecutor`.
-- Add `HttpConnector::interface()` support for macOS and Solarish systems.
-- Add `rt::WithHyperIo` and `rt::WithTokioIo` combinators.
-- Add `auto_date_header()` for auto server builder.
-- Add `max_local_error_reset_streams()` for auto server builder.
-- Add `ignore_invalid_headers()` for auto server builder.
-- Add methods to determine if auto server is configured for HTTP/1 or HTTP/2.
-- Implement `Connection` for `UnixStream` and `NamedPipeClient`.
-- Fix HTTP/2 websocket requests sent through `legacy::Client`.
-
-# 0.1.10 (2024-10-28)
-
-- Add `http2_max_header_list_size(num)` option to legacy client builder.
-- Add `set_tcp_user_timeout(dur)` option to legacy `HttpConnector`.
-
-# 0.1.9 (2024-09-24)
-
-- Add support for `client::legacy` DNS resolvers to set non-zero ports on returned addresses.
-- Fix `client::legacy` wrongly retrying pooled connections that were created successfully but failed immediately after, resulting in a retry loop.
-
-
-# 0.1.8 (2024-09-09)
-
-- Add `server::conn::auto::upgrade::downcast()` for use with auto connection upgrades.
-
-# 0.1.7 (2024-08-06)
-
-- Add `Connected::poison()` to `legacy` client, a port from hyper v0.14.x.
-- Add `Error::connect_info()` to `legacy` client, a port from hyper v0.14.x.
-
-# 0.1.6 (2024-07-01)
-
-- Add support for AIX operating system to `legacy` client.
-- Fix `legacy` client to better use dying pooled connections.
-
-# 0.1.5 (2024-05-28)
-
-- Add `server::graceful::GracefulShutdown` helper to coordinate over many connections.
-- Add `server::conn::auto::Connection::into_owned()` to unlink lifetime from `Builder`.
-- Allow `service` module to be available with only `service` feature enabled.
-
-# 0.1.4 (2024-05-24)
-
-- Add `initial_max_send_streams()` to `legacy` client builder
-- Add `max_pending_accept_reset_streams()` to `legacy` client builder
-- Add `max_headers(usize)` to `auto` server builder
-- Add `http1_onl()` and `http2_only()` to `auto` server builder
-- Add connection capturing API to `legacy` client
-- Add `impl Connection for TokioIo`
-- Fix graceful shutdown hanging on reading the HTTP version
-
-# 0.1.3 (2024-01-31)
-
-### Added
-
-- Add `Error::is_connect()` which returns true if error came from client `Connect`.
-- Add timer support to `legacy` pool.
-- Add support to enable http1/http2 parts of `auto::Builder` individually.
-
-### Fixed
-
-- Fix `auto` connection so it can handle requests shorter than the h2 preface.
-- Fix `legacy::Client` to no longer error when keep-alive is diabled.
-
-# 0.1.2 (2023-12-20)
-
-### Added
-
-- Add `graceful_shutdown()` method to `auto` connections.
-- Add `rt::TokioTimer` type that implements `hyper::rt::Timer`.
-- Add `service::TowerToHyperService` adapter, allowing using `tower::Service`s as a `hyper::service::Service`.
-- Implement `Clone` for `auto::Builder`.
-- Exports `legacy::{Builder, ResponseFuture}`.
-
-### Fixed
-
-- Enable HTTP/1 upgrades on the `legacy::Client`.
-- Prevent divide by zero if DNS returns 0 addresses.
-
-# 0.1.1 (2023-11-17)
-
-### Added
-
-- Make `server-auto` enable the `server` feature.
-
-### Fixed
-
-- Reduce `Send` bounds requirements for `auto` connections.
-- Docs: enable all features when generating.
-
-# 0.1.0 (2023-11-16)
-
-Initial release.
diff --git a/vendor/hyper-util/Cargo.lock b/vendor/hyper-util/Cargo.lock
deleted file mode 100644
index f971e413..00000000
--- a/vendor/hyper-util/Cargo.lock
+++ /dev/null
@@ -1,892 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "addr2line"
-version = "0.24.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
-dependencies = [
- "gimli",
-]
-
-[[package]]
-name = "adler2"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
-
-[[package]]
-name = "aho-corasick"
-version = "1.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
-dependencies = [
- "memchr",
-]
-
-[[package]]
-name = "async-stream"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
-dependencies = [
- "async-stream-impl",
- "futures-core",
- "pin-project-lite",
-]
-
-[[package]]
-name = "async-stream-impl"
-version = "0.3.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "atomic-waker"
-version = "1.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
-
-[[package]]
-name = "autocfg"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
-
-[[package]]
-name = "backtrace"
-version = "0.3.74"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
-dependencies = [
- "addr2line",
- "cfg-if",
- "libc",
- "miniz_oxide",
- "object",
- "rustc-demangle",
- "windows-targets",
-]
-
-[[package]]
-name = "base64"
-version = "0.22.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
-
-[[package]]
-name = "bitflags"
-version = "2.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
-
-[[package]]
-name = "bytes"
-version = "1.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "core-foundation"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
-dependencies = [
- "core-foundation-sys",
- "libc",
-]
-
-[[package]]
-name = "core-foundation-sys"
-version = "0.8.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
-
-[[package]]
-name = "env_logger"
-version = "0.10.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
-dependencies = [
- "humantime",
- "is-terminal",
- "log",
- "regex",
- "termcolor",
-]
-
-[[package]]
-name = "equivalent"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
-
-[[package]]
-name = "fnv"
-version = "1.0.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
-
-[[package]]
-name = "futures-channel"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
-dependencies = [
- "futures-core",
-]
-
-[[package]]
-name = "futures-core"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
-
-[[package]]
-name = "futures-sink"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
-
-[[package]]
-name = "futures-task"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
-
-[[package]]
-name = "futures-util"
-version = "0.3.31"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
-dependencies = [
- "futures-core",
- "futures-task",
- "pin-project-lite",
- "pin-utils",
-]
-
-[[package]]
-name = "gimli"
-version = "0.31.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
-
-[[package]]
-name = "h2"
-version = "0.4.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2"
-dependencies = [
- "atomic-waker",
- "bytes",
- "fnv",
- "futures-core",
- "futures-sink",
- "http",
- "indexmap",
- "slab",
- "tokio",
- "tokio-util",
- "tracing",
-]
-
-[[package]]
-name = "hashbrown"
-version = "0.15.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
-
-[[package]]
-name = "hermit-abi"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e"
-
-[[package]]
-name = "http"
-version = "1.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
-dependencies = [
- "bytes",
- "fnv",
- "itoa",
-]
-
-[[package]]
-name = "http-body"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
-dependencies = [
- "bytes",
- "http",
-]
-
-[[package]]
-name = "http-body-util"
-version = "0.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
-dependencies = [
- "bytes",
- "futures-core",
- "http",
- "http-body",
- "pin-project-lite",
-]
-
-[[package]]
-name = "httparse"
-version = "1.10.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87"
-
-[[package]]
-name = "httpdate"
-version = "1.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
-
-[[package]]
-name = "humantime"
-version = "2.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f"
-
-[[package]]
-name = "hyper"
-version = "1.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80"
-dependencies = [
- "bytes",
- "futures-channel",
- "futures-util",
- "h2",
- "http",
- "http-body",
- "httparse",
- "httpdate",
- "itoa",
- "pin-project-lite",
- "smallvec",
- "tokio",
- "want",
-]
-
-[[package]]
-name = "hyper-util"
-version = "0.1.14"
-dependencies = [
- "base64",
- "bytes",
- "futures-channel",
- "futures-core",
- "futures-util",
- "http",
- "http-body",
- "http-body-util",
- "hyper",
- "ipnet",
- "libc",
- "percent-encoding",
- "pin-project-lite",
- "pnet_datalink",
- "pretty_env_logger",
- "socket2",
- "system-configuration",
- "tokio",
- "tokio-test",
- "tower-service",
- "tracing",
- "windows-registry",
-]
-
-[[package]]
-name = "indexmap"
-version = "2.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058"
-dependencies = [
- "equivalent",
- "hashbrown",
-]
-
-[[package]]
-name = "ipnet"
-version = "2.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
-
-[[package]]
-name = "ipnetwork"
-version = "0.20.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "is-terminal"
-version = "0.4.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9"
-dependencies = [
- "hermit-abi",
- "libc",
- "windows-sys 0.59.0",
-]
-
-[[package]]
-name = "itoa"
-version = "1.0.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
-
-[[package]]
-name = "libc"
-version = "0.2.171"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
-
-[[package]]
-name = "log"
-version = "0.4.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
-
-[[package]]
-name = "memchr"
-version = "2.7.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
-
-[[package]]
-name = "miniz_oxide"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5"
-dependencies = [
- "adler2",
-]
-
-[[package]]
-name = "mio"
-version = "1.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd"
-dependencies = [
- "libc",
- "wasi",
- "windows-sys 0.52.0",
-]
-
-[[package]]
-name = "no-std-net"
-version = "0.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65"
-
-[[package]]
-name = "object"
-version = "0.36.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
-dependencies = [
- "memchr",
-]
-
-[[package]]
-name = "once_cell"
-version = "1.21.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
-
-[[package]]
-name = "percent-encoding"
-version = "2.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
-
-[[package]]
-name = "pin-project-lite"
-version = "0.2.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
-
-[[package]]
-name = "pin-utils"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-
-[[package]]
-name = "pnet_base"
-version = "0.35.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7"
-dependencies = [
- "no-std-net",
-]
-
-[[package]]
-name = "pnet_datalink"
-version = "0.35.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7"
-dependencies = [
- "ipnetwork",
- "libc",
- "pnet_base",
- "pnet_sys",
- "winapi",
-]
-
-[[package]]
-name = "pnet_sys"
-version = "0.35.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b"
-dependencies = [
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "pretty_env_logger"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c"
-dependencies = [
- "env_logger",
- "log",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.94"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.40"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "regex"
-version = "1.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
-dependencies = [
- "aho-corasick",
- "memchr",
- "regex-automata",
- "regex-syntax",
-]
-
-[[package]]
-name = "regex-automata"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
-dependencies = [
- "aho-corasick",
- "memchr",
- "regex-syntax",
-]
-
-[[package]]
-name = "regex-syntax"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
-
-[[package]]
-name = "rustc-demangle"
-version = "0.1.24"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
-
-[[package]]
-name = "serde"
-version = "1.0.219"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.219"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "signal-hook-registry"
-version = "1.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "slab"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
-name = "smallvec"
-version = "1.14.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd"
-
-[[package]]
-name = "socket2"
-version = "0.5.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef"
-dependencies = [
- "libc",
- "windows-sys 0.52.0",
-]
-
-[[package]]
-name = "syn"
-version = "2.0.100"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "system-configuration"
-version = "0.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
-dependencies = [
- "bitflags",
- "core-foundation",
- "system-configuration-sys",
-]
-
-[[package]]
-name = "system-configuration-sys"
-version = "0.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4"
-dependencies = [
- "core-foundation-sys",
- "libc",
-]
-
-[[package]]
-name = "termcolor"
-version = "1.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
-name = "tokio"
-version = "1.44.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a"
-dependencies = [
- "backtrace",
- "bytes",
- "libc",
- "mio",
- "pin-project-lite",
- "signal-hook-registry",
- "socket2",
- "tokio-macros",
- "windows-sys 0.52.0",
-]
-
-[[package]]
-name = "tokio-macros"
-version = "2.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "tokio-stream"
-version = "0.1.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
-dependencies = [
- "futures-core",
- "pin-project-lite",
- "tokio",
-]
-
-[[package]]
-name = "tokio-test"
-version = "0.4.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
-dependencies = [
- "async-stream",
- "bytes",
- "futures-core",
- "tokio",
- "tokio-stream",
-]
-
-[[package]]
-name = "tokio-util"
-version = "0.7.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034"
-dependencies = [
- "bytes",
- "futures-core",
- "futures-sink",
- "pin-project-lite",
- "tokio",
-]
-
-[[package]]
-name = "tower-service"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
-
-[[package]]
-name = "tracing"
-version = "0.1.41"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
-dependencies = [
- "pin-project-lite",
- "tracing-core",
-]
-
-[[package]]
-name = "tracing-core"
-version = "0.1.33"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
-dependencies = [
- "once_cell",
-]
-
-[[package]]
-name = "try-lock"
-version = "0.2.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
-
-[[package]]
-name = "want"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
-dependencies = [
- "try-lock",
-]
-
-[[package]]
-name = "wasi"
-version = "0.11.0+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-util"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
-dependencies = [
- "windows-sys 0.59.0",
-]
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-
-[[package]]
-name = "windows-link"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
-
-[[package]]
-name = "windows-registry"
-version = "0.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820"
-dependencies = [
- "windows-link",
- "windows-result",
- "windows-strings",
-]
-
-[[package]]
-name = "windows-result"
-version = "0.3.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
-dependencies = [
- "windows-link",
-]
-
-[[package]]
-name = "windows-strings"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
-dependencies = [
- "windows-link",
-]
-
-[[package]]
-name = "windows-sys"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
-dependencies = [
- "windows-targets",
-]
-
-[[package]]
-name = "windows-sys"
-version = "0.59.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
-dependencies = [
- "windows-targets",
-]
-
-[[package]]
-name = "windows-targets"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
-dependencies = [
- "windows_aarch64_gnullvm",
- "windows_aarch64_msvc",
- "windows_i686_gnu",
- "windows_i686_gnullvm",
- "windows_i686_msvc",
- "windows_x86_64_gnu",
- "windows_x86_64_gnullvm",
- "windows_x86_64_msvc",
-]
-
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
-
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
-
-[[package]]
-name = "windows_i686_gnu"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
-
-[[package]]
-name = "windows_i686_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
-
-[[package]]
-name = "windows_i686_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
-
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
-
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
-
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.52.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
diff --git a/vendor/hyper-util/Cargo.toml b/vendor/hyper-util/Cargo.toml
deleted file mode 100644
index 3352c13b..00000000
--- a/vendor/hyper-util/Cargo.toml
+++ /dev/null
@@ -1,247 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2021"
-rust-version = "1.63"
-name = "hyper-util"
-version = "0.1.14"
-authors = ["Sean McArthur <sean@seanmonstar.com>"]
-build = false
-autolib = false
-autobins = false
-autoexamples = false
-autotests = false
-autobenches = false
-description = "hyper utilities"
-homepage = "https://hyper.rs"
-documentation = "https://docs.rs/hyper-util"
-readme = "README.md"
-keywords = [
- "http",
- "hyper",
- "hyperium",
-]
-categories = [
- "network-programming",
- "web-programming::http-client",
- "web-programming::http-server",
-]
-license = "MIT"
-repository = "https://github.com/hyperium/hyper-util"
-
-[package.metadata.docs.rs]
-features = ["full"]
-rustdoc-args = [
- "--cfg",
- "docsrs",
-]
-
-[features]
-__internal_happy_eyeballs_tests = []
-client = [
- "hyper/client",
- "dep:tracing",
- "dep:futures-channel",
- "dep:tower-service",
-]
-client-legacy = [
- "client",
- "dep:socket2",
- "tokio/sync",
- "dep:libc",
- "dep:futures-util",
-]
-client-proxy = [
- "client",
- "dep:base64",
- "dep:ipnet",
- "dep:percent-encoding",
-]
-client-proxy-system = [
- "dep:system-configuration",
- "dep:windows-registry",
-]
-default = []
-full = [
- "client",
- "client-legacy",
- "server",
- "server-auto",
- "server-graceful",
- "service",
- "http1",
- "http2",
- "tokio",
- "tracing",
-]
-http1 = ["hyper/http1"]
-http2 = ["hyper/http2"]
-server = ["hyper/server"]
-server-auto = [
- "server",
- "http1",
- "http2",
-]
-server-graceful = [
- "server",
- "tokio/sync",
-]
-service = ["dep:tower-service"]
-tokio = [
- "dep:tokio",
- "tokio/net",
- "tokio/rt",
- "tokio/time",
-]
-tracing = ["dep:tracing"]
-
-[lib]
-name = "hyper_util"
-path = "src/lib.rs"
-
-[[example]]
-name = "client"
-path = "examples/client.rs"
-required-features = [
- "client-legacy",
- "http1",
- "tokio",
-]
-
-[[example]]
-name = "server"
-path = "examples/server.rs"
-required-features = [
- "server",
- "http1",
- "tokio",
-]
-
-[[example]]
-name = "server_graceful"
-path = "examples/server_graceful.rs"
-required-features = [
- "tokio",
- "server-graceful",
- "server-auto",
-]
-
-[[test]]
-name = "legacy_client"
-path = "tests/legacy_client.rs"
-
-[[test]]
-name = "proxy"
-path = "tests/proxy.rs"
-
-[dependencies.base64]
-version = "0.22"
-optional = true
-
-[dependencies.bytes]
-version = "1.7.1"
-
-[dependencies.futures-channel]
-version = "0.3"
-optional = true
-
-[dependencies.futures-core]
-version = "0.3"
-
-[dependencies.futures-util]
-version = "0.3.16"
-optional = true
-default-features = false
-
-[dependencies.http]
-version = "1.0"
-
-[dependencies.http-body]
-version = "1.0.0"
-
-[dependencies.hyper]
-version = "1.6.0"
-
-[dependencies.ipnet]
-version = "2.9"
-optional = true
-
-[dependencies.libc]
-version = "0.2"
-optional = true
-
-[dependencies.percent-encoding]
-version = "2.3"
-optional = true
-
-[dependencies.pin-project-lite]
-version = "0.2.4"
-
-[dependencies.socket2]
-version = "0.5.9"
-features = ["all"]
-optional = true
-
-[dependencies.tokio]
-version = "1"
-optional = true
-default-features = false
-
-[dependencies.tower-service]
-version = "0.3"
-optional = true
-
-[dependencies.tracing]
-version = "0.1"
-features = ["std"]
-optional = true
-default-features = false
-
-[dev-dependencies.bytes]
-version = "1"
-
-[dev-dependencies.futures-util]
-version = "0.3.16"
-features = ["alloc"]
-default-features = false
-
-[dev-dependencies.http-body-util]
-version = "0.1.0"
-
-[dev-dependencies.hyper]
-version = "1.4.0"
-features = ["full"]
-
-[dev-dependencies.pretty_env_logger]
-version = "0.5"
-
-[dev-dependencies.tokio]
-version = "1"
-features = [
- "macros",
- "test-util",
- "signal",
-]
-
-[dev-dependencies.tokio-test]
-version = "0.4"
-
-[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink]
-version = "0.35.0"
-
-[target.'cfg(target_os = "macos")'.dependencies.system-configuration]
-version = "0.6.1"
-optional = true
-
-[target."cfg(windows)".dependencies.windows-registry]
-version = "0.5"
-optional = true
diff --git a/vendor/hyper-util/LICENSE b/vendor/hyper-util/LICENSE
deleted file mode 100644
index fe064b7c..00000000
--- a/vendor/hyper-util/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2023-2025 Sean McArthur
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/hyper-util/README.md b/vendor/hyper-util/README.md
deleted file mode 100644
index 79cb2132..00000000
--- a/vendor/hyper-util/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# hyper-util
-
-[![crates.io](https://img.shields.io/crates/v/hyper-util.svg)](https://crates.io/crates/hyper-util)
-[![Released API docs](https://docs.rs/hyper-util/badge.svg)](https://docs.rs/hyper-util)
-[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE)
-
-A collection of utilities to do common things with [hyper](https://hyper.rs).
-
-## License
-
-This project is licensed under the [MIT license](./LICENSE).
diff --git a/vendor/hyper-util/examples/client.rs b/vendor/hyper-util/examples/client.rs
deleted file mode 100644
index 04defac0..00000000
--- a/vendor/hyper-util/examples/client.rs
+++ /dev/null
@@ -1,37 +0,0 @@
-use std::env;
-
-use http_body_util::Empty;
-use hyper::Request;
-use hyper_util::client::legacy::{connect::HttpConnector, Client};
-
-#[tokio::main(flavor = "current_thread")]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
- let url = match env::args().nth(1) {
- Some(url) => url,
- None => {
- eprintln!("Usage: client <url>");
- return Ok(());
- }
- };
-
- // HTTPS requires picking a TLS implementation, so give a better
- // warning if the user tries to request an 'https' URL.
- let url = url.parse::<hyper::Uri>()?;
- if url.scheme_str() != Some("http") {
- eprintln!("This example only works with 'http' URLs.");
- return Ok(());
- }
-
- let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build(HttpConnector::new());
-
- let req = Request::builder()
- .uri(url)
- .body(Empty::<bytes::Bytes>::new())?;
-
- let resp = client.request(req).await?;
-
- eprintln!("{:?} {:?}", resp.version(), resp.status());
- eprintln!("{:#?}", resp.headers());
-
- Ok(())
-}
diff --git a/vendor/hyper-util/examples/server.rs b/vendor/hyper-util/examples/server.rs
deleted file mode 100644
index 48806d80..00000000
--- a/vendor/hyper-util/examples/server.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-//! This example runs a server that responds to any request with "Hello, world!"
-
-use std::{convert::Infallible, error::Error};
-
-use bytes::Bytes;
-use http::{header::CONTENT_TYPE, Request, Response};
-use http_body_util::{combinators::BoxBody, BodyExt, Full};
-use hyper::{body::Incoming, service::service_fn};
-use hyper_util::{
- rt::{TokioExecutor, TokioIo},
- server::conn::auto::Builder,
-};
-use tokio::{net::TcpListener, task::JoinSet};
-
-/// Function from an incoming request to an outgoing response
-///
-/// This function gets turned into a [`hyper::service::Service`] later via
-/// [`service_fn`]. Instead of doing this, you could also write a type that
-/// implements [`hyper::service::Service`] directly and pass that in place of
-/// writing a function like this and calling [`service_fn`].
-///
-/// This function could use [`Full`] as the body type directly since that's
-/// the only type that can be returned in this case, but this uses [`BoxBody`]
-/// anyway for demonstration purposes, since this is what's usually used when
-/// writing a more complex webserver library.
-async fn handle_request(
- _request: Request<Incoming>,
-) -> Result<Response<BoxBody<Bytes, Infallible>>, Infallible> {
- let response = Response::builder()
- .header(CONTENT_TYPE, "text/plain")
- .body(Full::new(Bytes::from("Hello, world!\n")).boxed())
- .expect("values provided to the builder should be valid");
-
- Ok(response)
-}
-
-#[tokio::main(flavor = "current_thread")]
-async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
- let listen_addr = "127.0.0.1:8000";
- let tcp_listener = TcpListener::bind(listen_addr).await?;
- println!("listening on http://{listen_addr}");
-
- let mut join_set = JoinSet::new();
- loop {
- let (stream, addr) = match tcp_listener.accept().await {
- Ok(x) => x,
- Err(e) => {
- eprintln!("failed to accept connection: {e}");
- continue;
- }
- };
-
- let serve_connection = async move {
- println!("handling a request from {addr}");
-
- let result = Builder::new(TokioExecutor::new())
- .serve_connection(TokioIo::new(stream), service_fn(handle_request))
- .await;
-
- if let Err(e) = result {
- eprintln!("error serving {addr}: {e}");
- }
-
- println!("handled a request from {addr}");
- };
-
- join_set.spawn(serve_connection);
- }
-
- // If you add a method for breaking the above loop (i.e. graceful shutdown),
- // then you may also want to wait for all existing connections to finish
- // being served before terminating the program, which can be done like this:
- //
- // while let Some(_) = join_set.join_next().await {}
-}
diff --git a/vendor/hyper-util/examples/server_graceful.rs b/vendor/hyper-util/examples/server_graceful.rs
deleted file mode 100644
index bfb43a4b..00000000
--- a/vendor/hyper-util/examples/server_graceful.rs
+++ /dev/null
@@ -1,64 +0,0 @@
-use bytes::Bytes;
-use std::convert::Infallible;
-use std::pin::pin;
-use std::time::Duration;
-use tokio::net::TcpListener;
-
-#[tokio::main(flavor = "current_thread")]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
- let listener = TcpListener::bind("127.0.0.1:8080").await?;
-
- let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());
- let graceful = hyper_util::server::graceful::GracefulShutdown::new();
- let mut ctrl_c = pin!(tokio::signal::ctrl_c());
-
- loop {
- tokio::select! {
- conn = listener.accept() => {
- let (stream, peer_addr) = match conn {
- Ok(conn) => conn,
- Err(e) => {
- eprintln!("accept error: {}", e);
- tokio::time::sleep(Duration::from_secs(1)).await;
- continue;
- }
- };
- eprintln!("incomming connection accepted: {}", peer_addr);
-
- let stream = hyper_util::rt::TokioIo::new(Box::pin(stream));
-
- let conn = server.serve_connection_with_upgrades(stream, hyper::service::service_fn(|_| async move {
- tokio::time::sleep(Duration::from_secs(5)).await; // emulate slow request
- let body = http_body_util::Full::<Bytes>::from("Hello World!".to_owned());
- Ok::<_, Infallible>(http::Response::new(body))
- }));
-
- let conn = graceful.watch(conn.into_owned());
-
- tokio::spawn(async move {
- if let Err(err) = conn.await {
- eprintln!("connection error: {}", err);
- }
- eprintln!("connection dropped: {}", peer_addr);
- });
- },
-
- _ = ctrl_c.as_mut() => {
- drop(listener);
- eprintln!("Ctrl-C received, starting shutdown");
- break;
- }
- }
- }
-
- tokio::select! {
- _ = graceful.shutdown() => {
- eprintln!("Gracefully shutdown!");
- },
- _ = tokio::time::sleep(Duration::from_secs(10)) => {
- eprintln!("Waited 10 seconds for graceful shutdown, aborting...");
- }
- }
-
- Ok(())
-}
diff --git a/vendor/hyper-util/src/client/client.rs b/vendor/hyper-util/src/client/client.rs
deleted file mode 100644
index a9fb244a..00000000
--- a/vendor/hyper-util/src/client/client.rs
+++ /dev/null
@@ -1,132 +0,0 @@
-use hyper::{Request, Response};
-use tower::{Service, MakeService};
-
-use super::connect::Connect;
-use super::pool;
-
-pub struct Client<M> {
- // Hi there. So, let's take a 0.14.x hyper::Client, and build up its layers
- // here. We don't need to fully expose the layers to start with, but that
- // is the end goal.
- //
- // Client = MakeSvcAsService<
- // SetHost<
- // Http1RequestTarget<
- // DelayedRelease<
- // ConnectingPool<C, P>
- // >
- // >
- // >
- // >
- make_svc: M,
-}
-
-// We might change this... :shrug:
-type PoolKey = hyper::Uri;
-
-struct ConnectingPool<C, P> {
- connector: C,
- pool: P,
-}
-
-struct PoolableSvc<S>(S);
-
-/// A marker to identify what version a pooled connection is.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-#[allow(dead_code)]
-pub enum Ver {
- Auto,
- Http2,
-}
-
-// ===== impl Client =====
-
-impl<M, /*ReqBody, ResBody,*/ E> Client<M>
-where
- M: MakeService<
- hyper::Uri,
- Request<()>,
- Response = Response<()>,
- Error = E,
- MakeError = E,
- >,
- //M: Service<hyper::Uri, Error = E>,
- //M::Response: Service<Request<ReqBody>, Response = Response<ResBody>>,
-{
- pub async fn request(&mut self, req: Request<()>) -> Result<Response<()>, E> {
- let mut svc = self.make_svc.make_service(req.uri().clone()).await?;
- svc.call(req).await
- }
-}
-
-impl<M, /*ReqBody, ResBody,*/ E> Client<M>
-where
- M: MakeService<
- hyper::Uri,
- Request<()>,
- Response = Response<()>,
- Error = E,
- MakeError = E,
- >,
- //M: Service<hyper::Uri, Error = E>,
- //M::Response: Service<Request<ReqBody>, Response = Response<ResBody>>,
-{
-
-}
-
-// ===== impl ConnectingPool =====
-
-impl<C, P> ConnectingPool<C, P>
-where
- C: Connect,
- C::_Svc: Unpin + Send + 'static,
-{
- async fn connection_for(&self, target: PoolKey) -> Result<pool::Pooled<PoolableSvc<C::_Svc>, PoolKey>, ()> {
- todo!()
- }
-}
-
-impl<S> pool::Poolable for PoolableSvc<S>
-where
- S: Unpin + Send + 'static,
-{
- fn is_open(&self) -> bool {
- /*
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_ready(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_ready(),
- }
- */
- true
- }
-
- fn reserve(self) -> pool::Reservation<Self> {
- /*
- match self.tx {
- PoolTx::Http1(tx) => Reservation::Unique(PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http1(tx),
- }),
- #[cfg(feature = "http2")]
- PoolTx::Http2(tx) => {
- let b = PoolClient {
- conn_info: self.conn_info.clone(),
- tx: PoolTx::Http2(tx.clone()),
- };
- let a = PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http2(tx),
- };
- Reservation::Shared(a, b)
- }
- }
- */
- pool::Reservation::Unique(self)
- }
-
- fn can_share(&self) -> bool {
- false
- //self.is_http2()
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/client.rs b/vendor/hyper-util/src/client/legacy/client.rs
deleted file mode 100644
index 9899d346..00000000
--- a/vendor/hyper-util/src/client/legacy/client.rs
+++ /dev/null
@@ -1,1690 +0,0 @@
-//! The legacy HTTP Client from 0.14.x
-//!
-//! This `Client` will eventually be deconstructed into more composable parts.
-//! For now, to enable people to use hyper 1.0 quicker, this `Client` exists
-//! in much the same way it did in hyper 0.14.
-
-use std::error::Error as StdError;
-use std::fmt;
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{self, Poll};
-use std::time::Duration;
-
-use futures_util::future::{self, Either, FutureExt, TryFutureExt};
-use http::uri::Scheme;
-use hyper::client::conn::TrySendError as ConnTrySendError;
-use hyper::header::{HeaderValue, HOST};
-use hyper::rt::Timer;
-use hyper::{body::Body, Method, Request, Response, Uri, Version};
-use tracing::{debug, trace, warn};
-
-use super::connect::capture::CaptureConnectionExtension;
-#[cfg(feature = "tokio")]
-use super::connect::HttpConnector;
-use super::connect::{Alpn, Connect, Connected, Connection};
-use super::pool::{self, Ver};
-
-use crate::common::future::poll_fn;
-use crate::common::{lazy as hyper_lazy, timer, Exec, Lazy, SyncWrapper};
-
-type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
-
-/// A Client to make outgoing HTTP requests.
-///
-/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The
-/// underlying connection pool will be reused.
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-pub struct Client<C, B> {
- config: Config,
- connector: C,
- exec: Exec,
- #[cfg(feature = "http1")]
- h1_builder: hyper::client::conn::http1::Builder,
- #[cfg(feature = "http2")]
- h2_builder: hyper::client::conn::http2::Builder<Exec>,
- pool: pool::Pool<PoolClient<B>, PoolKey>,
-}
-
-#[derive(Clone, Copy, Debug)]
-struct Config {
- retry_canceled_requests: bool,
- set_host: bool,
- ver: Ver,
-}
-
-/// Client errors
-pub struct Error {
- kind: ErrorKind,
- source: Option<Box<dyn StdError + Send + Sync>>,
- #[cfg(any(feature = "http1", feature = "http2"))]
- connect_info: Option<Connected>,
-}
-
-#[derive(Debug)]
-enum ErrorKind {
- Canceled,
- ChannelClosed,
- Connect,
- UserUnsupportedRequestMethod,
- UserUnsupportedVersion,
- UserAbsoluteUriRequired,
- SendRequest,
-}
-
-macro_rules! e {
- ($kind:ident) => {
- Error {
- kind: ErrorKind::$kind,
- source: None,
- connect_info: None,
- }
- };
- ($kind:ident, $src:expr) => {
- Error {
- kind: ErrorKind::$kind,
- source: Some($src.into()),
- connect_info: None,
- }
- };
-}
-
-// We might change this... :shrug:
-type PoolKey = (http::uri::Scheme, http::uri::Authority);
-
-enum TrySendError<B> {
- Retryable {
- error: Error,
- req: Request<B>,
- connection_reused: bool,
- },
- Nope(Error),
-}
-
-/// A `Future` that will resolve to an HTTP Response.
-///
-/// This is returned by `Client::request` (and `Client::get`).
-#[must_use = "futures do nothing unless polled"]
-pub struct ResponseFuture {
- inner: SyncWrapper<
- Pin<Box<dyn Future<Output = Result<Response<hyper::body::Incoming>, Error>> + Send>>,
- >,
-}
-
-// ===== impl Client =====
-
-impl Client<(), ()> {
- /// Create a builder to configure a new `Client`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "tokio")]
- /// # fn run () {
- /// use std::time::Duration;
- /// use hyper_util::client::legacy::Client;
- /// use hyper_util::rt::{TokioExecutor, TokioTimer};
- ///
- /// let client = Client::builder(TokioExecutor::new())
- /// .pool_timer(TokioTimer::new())
- /// .pool_idle_timeout(Duration::from_secs(30))
- /// .http2_only(true)
- /// .build_http();
- /// # let infer: Client<_, http_body_util::Full<bytes::Bytes>> = client;
- /// # drop(infer);
- /// # }
- /// # fn main() {}
- /// ```
- pub fn builder<E>(executor: E) -> Builder
- where
- E: hyper::rt::Executor<BoxSendFuture> + Send + Sync + Clone + 'static,
- {
- Builder::new(executor)
- }
-}
-
-impl<C, B> Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: Body + Send + 'static + Unpin,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- /// Send a `GET` request to the supplied `Uri`.
- ///
- /// # Note
- ///
- /// This requires that the `Body` type have a `Default` implementation.
- /// It *should* return an "empty" version of itself, such that
- /// `Body::is_end_stream` is `true`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "tokio")]
- /// # fn run () {
- /// use hyper::Uri;
- /// use hyper_util::client::legacy::Client;
- /// use hyper_util::rt::TokioExecutor;
- /// use bytes::Bytes;
- /// use http_body_util::Full;
- ///
- /// let client: Client<_, Full<Bytes>> = Client::builder(TokioExecutor::new()).build_http();
- ///
- /// let future = client.get(Uri::from_static("http://httpbin.org/ip"));
- /// # }
- /// # fn main() {}
- /// ```
- pub fn get(&self, uri: Uri) -> ResponseFuture
- where
- B: Default,
- {
- let body = B::default();
- if !body.is_end_stream() {
- warn!("default Body used for get() does not return true for is_end_stream");
- }
-
- let mut req = Request::new(body);
- *req.uri_mut() = uri;
- self.request(req)
- }
-
- /// Send a constructed `Request` using this `Client`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "tokio")]
- /// # fn run () {
- /// use hyper::{Method, Request};
- /// use hyper_util::client::legacy::Client;
- /// use http_body_util::Full;
- /// use hyper_util::rt::TokioExecutor;
- /// use bytes::Bytes;
- ///
- /// let client: Client<_, Full<Bytes>> = Client::builder(TokioExecutor::new()).build_http();
- ///
- /// let req: Request<Full<Bytes>> = Request::builder()
- /// .method(Method::POST)
- /// .uri("http://httpbin.org/post")
- /// .body(Full::from("Hallo!"))
- /// .expect("request builder");
- ///
- /// let future = client.request(req);
- /// # }
- /// # fn main() {}
- /// ```
- pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
- let is_http_connect = req.method() == Method::CONNECT;
- match req.version() {
- Version::HTTP_11 => (),
- Version::HTTP_10 => {
- if is_http_connect {
- warn!("CONNECT is not allowed for HTTP/1.0");
- return ResponseFuture::new(future::err(e!(UserUnsupportedRequestMethod)));
- }
- }
- Version::HTTP_2 => (),
- // completely unsupported HTTP version (like HTTP/0.9)!
- other => return ResponseFuture::error_version(other),
- };
-
- let pool_key = match extract_domain(req.uri_mut(), is_http_connect) {
- Ok(s) => s,
- Err(err) => {
- return ResponseFuture::new(future::err(err));
- }
- };
-
- ResponseFuture::new(self.clone().send_request(req, pool_key))
- }
-
- async fn send_request(
- self,
- mut req: Request<B>,
- pool_key: PoolKey,
- ) -> Result<Response<hyper::body::Incoming>, Error> {
- let uri = req.uri().clone();
-
- loop {
- req = match self.try_send_request(req, pool_key.clone()).await {
- Ok(resp) => return Ok(resp),
- Err(TrySendError::Nope(err)) => return Err(err),
- Err(TrySendError::Retryable {
- mut req,
- error,
- connection_reused,
- }) => {
- if !self.config.retry_canceled_requests || !connection_reused {
- // if client disabled, don't retry
- // a fresh connection means we definitely can't retry
- return Err(error);
- }
-
- trace!(
- "unstarted request canceled, trying again (reason={:?})",
- error
- );
- *req.uri_mut() = uri.clone();
- req
- }
- }
- }
- }
-
- async fn try_send_request(
- &self,
- mut req: Request<B>,
- pool_key: PoolKey,
- ) -> Result<Response<hyper::body::Incoming>, TrySendError<B>> {
- let mut pooled = self
- .connection_for(pool_key)
- .await
- // `connection_for` already retries checkout errors, so if
- // it returns an error, there's not much else to retry
- .map_err(TrySendError::Nope)?;
-
- if let Some(conn) = req.extensions_mut().get_mut::<CaptureConnectionExtension>() {
- conn.set(&pooled.conn_info);
- }
-
- if pooled.is_http1() {
- if req.version() == Version::HTTP_2 {
- warn!("Connection is HTTP/1, but request requires HTTP/2");
- return Err(TrySendError::Nope(
- e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()),
- ));
- }
-
- if self.config.set_host {
- let uri = req.uri().clone();
- req.headers_mut().entry(HOST).or_insert_with(|| {
- let hostname = uri.host().expect("authority implies host");
- if let Some(port) = get_non_default_port(&uri) {
- let s = format!("{hostname}:{port}");
- HeaderValue::from_str(&s)
- } else {
- HeaderValue::from_str(hostname)
- }
- .expect("uri host is valid header value")
- });
- }
-
- // CONNECT always sends authority-form, so check it first...
- if req.method() == Method::CONNECT {
- authority_form(req.uri_mut());
- } else if pooled.conn_info.is_proxied {
- absolute_form(req.uri_mut());
- } else {
- origin_form(req.uri_mut());
- }
- } else if req.method() == Method::CONNECT && !pooled.is_http2() {
- authority_form(req.uri_mut());
- }
-
- let mut res = match pooled.try_send_request(req).await {
- Ok(res) => res,
- Err(mut err) => {
- return if let Some(req) = err.take_message() {
- Err(TrySendError::Retryable {
- connection_reused: pooled.is_reused(),
- error: e!(Canceled, err.into_error())
- .with_connect_info(pooled.conn_info.clone()),
- req,
- })
- } else {
- Err(TrySendError::Nope(
- e!(SendRequest, err.into_error())
- .with_connect_info(pooled.conn_info.clone()),
- ))
- }
- }
- };
-
- // If the Connector included 'extra' info, add to Response...
- if let Some(extra) = &pooled.conn_info.extra {
- extra.set(res.extensions_mut());
- }
-
- // If pooled is HTTP/2, we can toss this reference immediately.
- //
- // when pooled is dropped, it will try to insert back into the
- // pool. To delay that, spawn a future that completes once the
- // sender is ready again.
- //
- // This *should* only be once the related `Connection` has polled
- // for a new request to start.
- //
- // It won't be ready if there is a body to stream.
- if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
- drop(pooled);
- } else if !res.body().is_end_stream() {
- //let (delayed_tx, delayed_rx) = oneshot::channel::<()>();
- //res.body_mut().delayed_eof(delayed_rx);
- let on_idle = poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| {
- // At this point, `pooled` is dropped, and had a chance
- // to insert into the pool (if conn was idle)
- //drop(delayed_tx);
- });
-
- self.exec.execute(on_idle);
- } else {
- // There's no body to delay, but the connection isn't
- // ready yet. Only re-insert when it's ready
- let on_idle = poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());
-
- self.exec.execute(on_idle);
- }
-
- Ok(res)
- }
-
- async fn connection_for(
- &self,
- pool_key: PoolKey,
- ) -> Result<pool::Pooled<PoolClient<B>, PoolKey>, Error> {
- loop {
- match self.one_connection_for(pool_key.clone()).await {
- Ok(pooled) => return Ok(pooled),
- Err(ClientConnectError::Normal(err)) => return Err(err),
- Err(ClientConnectError::CheckoutIsClosed(reason)) => {
- if !self.config.retry_canceled_requests {
- return Err(e!(Connect, reason));
- }
-
- trace!(
- "unstarted request canceled, trying again (reason={:?})",
- reason,
- );
- continue;
- }
- };
- }
- }
-
- async fn one_connection_for(
- &self,
- pool_key: PoolKey,
- ) -> Result<pool::Pooled<PoolClient<B>, PoolKey>, ClientConnectError> {
- // Return a single connection if pooling is not enabled
- if !self.pool.is_enabled() {
- return self
- .connect_to(pool_key)
- .await
- .map_err(ClientConnectError::Normal);
- }
-
- // This actually races 2 different futures to try to get a ready
- // connection the fastest, and to reduce connection churn.
- //
- // - If the pool has an idle connection waiting, that's used
- // immediately.
- // - Otherwise, the Connector is asked to start connecting to
- // the destination Uri.
- // - Meanwhile, the pool Checkout is watching to see if any other
- // request finishes and tries to insert an idle connection.
- // - If a new connection is started, but the Checkout wins after
- // (an idle connection became available first), the started
- // connection future is spawned into the runtime to complete,
- // and then be inserted into the pool as an idle connection.
- let checkout = self.pool.checkout(pool_key.clone());
- let connect = self.connect_to(pool_key);
- let is_ver_h2 = self.config.ver == Ver::Http2;
-
- // The order of the `select` is depended on below...
-
- match future::select(checkout, connect).await {
- // Checkout won, connect future may have been started or not.
- //
- // If it has, let it finish and insert back into the pool,
- // so as to not waste the socket...
- Either::Left((Ok(checked_out), connecting)) => {
- // This depends on the `select` above having the correct
- // order, such that if the checkout future were ready
- // immediately, the connect future will never have been
- // started.
- //
- // If it *wasn't* ready yet, then the connect future will
- // have been started...
- if connecting.started() {
- let bg = connecting
- .map_err(|err| {
- trace!("background connect error: {}", err);
- })
- .map(|_pooled| {
- // dropping here should just place it in
- // the Pool for us...
- });
- // An execute error here isn't important, we're just trying
- // to prevent a waste of a socket...
- self.exec.execute(bg);
- }
- Ok(checked_out)
- }
- // Connect won, checkout can just be dropped.
- Either::Right((Ok(connected), _checkout)) => Ok(connected),
- // Either checkout or connect could get canceled:
- //
- // 1. Connect is canceled if this is HTTP/2 and there is
- // an outstanding HTTP/2 connecting task.
- // 2. Checkout is canceled if the pool cannot deliver an
- // idle connection reliably.
- //
- // In both cases, we should just wait for the other future.
- Either::Left((Err(err), connecting)) => {
- if err.is_canceled() {
- connecting.await.map_err(ClientConnectError::Normal)
- } else {
- Err(ClientConnectError::Normal(e!(Connect, err)))
- }
- }
- Either::Right((Err(err), checkout)) => {
- if err.is_canceled() {
- checkout.await.map_err(move |err| {
- if is_ver_h2 && err.is_canceled() {
- ClientConnectError::CheckoutIsClosed(err)
- } else {
- ClientConnectError::Normal(e!(Connect, err))
- }
- })
- } else {
- Err(ClientConnectError::Normal(err))
- }
- }
- }
- }
-
- #[cfg(any(feature = "http1", feature = "http2"))]
- fn connect_to(
- &self,
- pool_key: PoolKey,
- ) -> impl Lazy<Output = Result<pool::Pooled<PoolClient<B>, PoolKey>, Error>> + Send + Unpin
- {
- let executor = self.exec.clone();
- let pool = self.pool.clone();
- #[cfg(feature = "http1")]
- let h1_builder = self.h1_builder.clone();
- #[cfg(feature = "http2")]
- let h2_builder = self.h2_builder.clone();
- let ver = self.config.ver;
- let is_ver_h2 = ver == Ver::Http2;
- let connector = self.connector.clone();
- let dst = domain_as_uri(pool_key.clone());
- hyper_lazy(move || {
- // Try to take a "connecting lock".
- //
- // If the pool_key is for HTTP/2, and there is already a
- // connection being established, then this can't take a
- // second lock. The "connect_to" future is Canceled.
- let connecting = match pool.connecting(&pool_key, ver) {
- Some(lock) => lock,
- None => {
- let canceled = e!(Canceled);
- // TODO
- //crate::Error::new_canceled().with("HTTP/2 connection in progress");
- return Either::Right(future::err(canceled));
- }
- };
- Either::Left(
- connector
- .connect(super::connect::sealed::Internal, dst)
- .map_err(|src| e!(Connect, src))
- .and_then(move |io| {
- let connected = io.connected();
- // If ALPN is h2 and we aren't http2_only already,
- // then we need to convert our pool checkout into
- // a single HTTP2 one.
- let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 {
- match connecting.alpn_h2(&pool) {
- Some(lock) => {
- trace!("ALPN negotiated h2, updating pool");
- lock
- }
- None => {
- // Another connection has already upgraded,
- // the pool checkout should finish up for us.
- let canceled = e!(Canceled, "ALPN upgraded to HTTP/2");
- return Either::Right(future::err(canceled));
- }
- }
- } else {
- connecting
- };
-
- #[cfg_attr(not(feature = "http2"), allow(unused))]
- let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
-
- Either::Left(Box::pin(async move {
- let tx = if is_h2 {
- #[cfg(feature = "http2")] {
- let (mut tx, conn) =
- h2_builder.handshake(io).await.map_err(Error::tx)?;
-
- trace!(
- "http2 handshake complete, spawning background dispatcher task"
- );
- executor.execute(
- conn.map_err(|e| debug!("client connection error: {}", e))
- .map(|_| ()),
- );
-
- // Wait for 'conn' to ready up before we
- // declare this tx as usable
- tx.ready().await.map_err(Error::tx)?;
- PoolTx::Http2(tx)
- }
- #[cfg(not(feature = "http2"))]
- panic!("http2 feature is not enabled");
- } else {
- #[cfg(feature = "http1")] {
- // Perform the HTTP/1.1 handshake on the provided I/O stream.
- // Uses the h1_builder to establish a connection, returning a sender (tx) for requests
- // and a connection task (conn) that manages the connection lifecycle.
- let (mut tx, conn) =
- h1_builder.handshake(io).await.map_err(crate::client::legacy::client::Error::tx)?;
-
- // Log that the HTTP/1.1 handshake has completed successfully.
- // This indicates the connection is established and ready for request processing.
- trace!(
- "http1 handshake complete, spawning background dispatcher task"
- );
- // Create a oneshot channel to communicate errors from the connection task.
- // err_tx sends errors from the connection task, and err_rx receives them
- // to correlate connection failures with request readiness errors.
- let (err_tx, err_rx) = tokio::sync::oneshot::channel();
- // Spawn the connection task in the background using the executor.
- // The task manages the HTTP/1.1 connection, including upgrades (e.g., WebSocket).
- // Errors are sent via err_tx to ensure they can be checked if the sender (tx) fails.
- executor.execute(
- conn.with_upgrades()
- .map_err(|e| {
- // Log the connection error at debug level for diagnostic purposes.
- debug!("client connection error: {:?}", e);
- // Log that the error is being sent to the error channel.
- trace!("sending connection error to error channel");
- // Send the error via the oneshot channel, ignoring send failures
- // (e.g., if the receiver is dropped, which is handled later).
- let _ =err_tx.send(e);
- })
- .map(|_| ()),
- );
- // Log that the client is waiting for the connection to be ready.
- // Readiness indicates the sender (tx) can accept a request without blocking.
- trace!("waiting for connection to be ready");
- // Check if the sender is ready to accept a request.
- // This ensures the connection is fully established before proceeding.
- // aka:
- // Wait for 'conn' to ready up before we
- // declare this tx as usable
- match tx.ready().await {
- // If ready, the connection is usable for sending requests.
- Ok(_) => {
- // Log that the connection is ready for use.
- trace!("connection is ready");
- // Drop the error receiver, as it’s no longer needed since the sender is ready.
- // This prevents waiting for errors that won’t occur in a successful case.
- drop(err_rx);
- // Wrap the sender in PoolTx::Http1 for use in the connection pool.
- PoolTx::Http1(tx)
- }
- // If the sender fails with a closed channel error, check for a specific connection error.
- // This distinguishes between a vague ChannelClosed error and an actual connection failure.
- Err(e) if e.is_closed() => {
- // Log that the channel is closed, indicating a potential connection issue.
- trace!("connection channel closed, checking for connection error");
- // Check the oneshot channel for a specific error from the connection task.
- match err_rx.await {
- // If an error was received, it’s a specific connection failure.
- Ok(err) => {
- // Log the specific connection error for diagnostics.
- trace!("received connection error: {:?}", err);
- // Return the error wrapped in Error::tx to propagate it.
- return Err(crate::client::legacy::client::Error::tx(err));
- }
- // If the error channel is closed, no specific error was sent.
- // Fall back to the vague ChannelClosed error.
- Err(_) => {
- // Log that the error channel is closed, indicating no specific error.
- trace!("error channel closed, returning the vague ChannelClosed error");
- // Return the original error wrapped in Error::tx.
- return Err(crate::client::legacy::client::Error::tx(e));
- }
- }
- }
- // For other errors (e.g., timeout, I/O issues), propagate them directly.
- // These are not ChannelClosed errors and don’t require error channel checks.
- Err(e) => {
- // Log the specific readiness failure for diagnostics.
- trace!("connection readiness failed: {:?}", e);
- // Return the error wrapped in Error::tx to propagate it.
- return Err(crate::client::legacy::client::Error::tx(e));
- }
- }
- }
- #[cfg(not(feature = "http1"))] {
- panic!("http1 feature is not enabled");
- }
- };
-
- Ok(pool.pooled(
- connecting,
- PoolClient {
- conn_info: connected,
- tx,
- },
- ))
- }))
- }),
- )
- })
- }
-}
-
-impl<C, B> tower_service::Service<Request<B>> for Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: Body + Send + 'static + Unpin,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = Response<hyper::body::Incoming>;
- type Error = Error;
- type Future = ResponseFuture;
-
- fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Request<B>) -> Self::Future {
- self.request(req)
- }
-}
-
-impl<C, B> tower_service::Service<Request<B>> for &'_ Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: Body + Send + 'static + Unpin,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = Response<hyper::body::Incoming>;
- type Error = Error;
- type Future = ResponseFuture;
-
- fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Request<B>) -> Self::Future {
- self.request(req)
- }
-}
-
-impl<C: Clone, B> Clone for Client<C, B> {
- fn clone(&self) -> Client<C, B> {
- Client {
- config: self.config,
- exec: self.exec.clone(),
- #[cfg(feature = "http1")]
- h1_builder: self.h1_builder.clone(),
- #[cfg(feature = "http2")]
- h2_builder: self.h2_builder.clone(),
- connector: self.connector.clone(),
- pool: self.pool.clone(),
- }
- }
-}
-
-impl<C, B> fmt::Debug for Client<C, B> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Client").finish()
- }
-}
-
-// ===== impl ResponseFuture =====
-
-impl ResponseFuture {
- fn new<F>(value: F) -> Self
- where
- F: Future<Output = Result<Response<hyper::body::Incoming>, Error>> + Send + 'static,
- {
- Self {
- inner: SyncWrapper::new(Box::pin(value)),
- }
- }
-
- fn error_version(ver: Version) -> Self {
- warn!("Request has unsupported version \"{:?}\"", ver);
- ResponseFuture::new(Box::pin(future::err(e!(UserUnsupportedVersion))))
- }
-}
-
-impl fmt::Debug for ResponseFuture {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("Future<Response>")
- }
-}
-
-impl Future for ResponseFuture {
- type Output = Result<Response<hyper::body::Incoming>, Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.inner.get_mut().as_mut().poll(cx)
- }
-}
-
-// ===== impl PoolClient =====
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-struct PoolClient<B> {
- conn_info: Connected,
- tx: PoolTx<B>,
-}
-
-enum PoolTx<B> {
- #[cfg(feature = "http1")]
- Http1(hyper::client::conn::http1::SendRequest<B>),
- #[cfg(feature = "http2")]
- Http2(hyper::client::conn::http2::SendRequest<B>),
-}
-
-impl<B> PoolClient<B> {
- fn poll_ready(
- &mut self,
- #[allow(unused_variables)] cx: &mut task::Context<'_>,
- ) -> Poll<Result<(), Error>> {
- match self.tx {
- #[cfg(feature = "http1")]
- PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed),
- #[cfg(feature = "http2")]
- PoolTx::Http2(_) => Poll::Ready(Ok(())),
- }
- }
-
- fn is_http1(&self) -> bool {
- !self.is_http2()
- }
-
- fn is_http2(&self) -> bool {
- match self.tx {
- #[cfg(feature = "http1")]
- PoolTx::Http1(_) => false,
- #[cfg(feature = "http2")]
- PoolTx::Http2(_) => true,
- }
- }
-
- fn is_poisoned(&self) -> bool {
- self.conn_info.poisoned.poisoned()
- }
-
- fn is_ready(&self) -> bool {
- match self.tx {
- #[cfg(feature = "http1")]
- PoolTx::Http1(ref tx) => tx.is_ready(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_ready(),
- }
- }
-}
-
-impl<B: Body + 'static> PoolClient<B> {
- fn try_send_request(
- &mut self,
- req: Request<B>,
- ) -> impl Future<Output = Result<Response<hyper::body::Incoming>, ConnTrySendError<Request<B>>>>
- where
- B: Send,
- {
- #[cfg(all(feature = "http1", feature = "http2"))]
- return match self.tx {
- #[cfg(feature = "http1")]
- PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)),
- };
-
- #[cfg(feature = "http1")]
- #[cfg(not(feature = "http2"))]
- return match self.tx {
- #[cfg(feature = "http1")]
- PoolTx::Http1(ref mut tx) => tx.try_send_request(req),
- };
-
- #[cfg(not(feature = "http1"))]
- #[cfg(feature = "http2")]
- return match self.tx {
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref mut tx) => tx.try_send_request(req),
- };
- }
-}
-
-impl<B> pool::Poolable for PoolClient<B>
-where
- B: Send + 'static,
-{
- fn is_open(&self) -> bool {
- !self.is_poisoned() && self.is_ready()
- }
-
- fn reserve(self) -> pool::Reservation<Self> {
- match self.tx {
- #[cfg(feature = "http1")]
- PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http1(tx),
- }),
- #[cfg(feature = "http2")]
- PoolTx::Http2(tx) => {
- let b = PoolClient {
- conn_info: self.conn_info.clone(),
- tx: PoolTx::Http2(tx.clone()),
- };
- let a = PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http2(tx),
- };
- pool::Reservation::Shared(a, b)
- }
- }
- }
-
- fn can_share(&self) -> bool {
- self.is_http2()
- }
-}
-
-enum ClientConnectError {
- Normal(Error),
- CheckoutIsClosed(pool::Error),
-}
-
-fn origin_form(uri: &mut Uri) {
- let path = match uri.path_and_query() {
- Some(path) if path.as_str() != "/" => {
- let mut parts = ::http::uri::Parts::default();
- parts.path_and_query = Some(path.clone());
- Uri::from_parts(parts).expect("path is valid uri")
- }
- _none_or_just_slash => {
- debug_assert!(Uri::default() == "/");
- Uri::default()
- }
- };
- *uri = path
-}
-
-fn absolute_form(uri: &mut Uri) {
- debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme");
- debug_assert!(
- uri.authority().is_some(),
- "absolute_form needs an authority"
- );
- // If the URI is to HTTPS, and the connector claimed to be a proxy,
- // then it *should* have tunneled, and so we don't want to send
- // absolute-form in that case.
- if uri.scheme() == Some(&Scheme::HTTPS) {
- origin_form(uri);
- }
-}
-
-fn authority_form(uri: &mut Uri) {
- if let Some(path) = uri.path_and_query() {
- // `https://hyper.rs` would parse with `/` path, don't
- // annoy people about that...
- if path != "/" {
- warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path);
- }
- }
- *uri = match uri.authority() {
- Some(auth) => {
- let mut parts = ::http::uri::Parts::default();
- parts.authority = Some(auth.clone());
- Uri::from_parts(parts).expect("authority is valid")
- }
- None => {
- unreachable!("authority_form with relative uri");
- }
- };
-}
-
-fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> Result<PoolKey, Error> {
- let uri_clone = uri.clone();
- match (uri_clone.scheme(), uri_clone.authority()) {
- (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())),
- (None, Some(auth)) if is_http_connect => {
- let scheme = match auth.port_u16() {
- Some(443) => {
- set_scheme(uri, Scheme::HTTPS);
- Scheme::HTTPS
- }
- _ => {
- set_scheme(uri, Scheme::HTTP);
- Scheme::HTTP
- }
- };
- Ok((scheme, auth.clone()))
- }
- _ => {
- debug!("Client requires absolute-form URIs, received: {:?}", uri);
- Err(e!(UserAbsoluteUriRequired))
- }
- }
-}
-
-fn domain_as_uri((scheme, auth): PoolKey) -> Uri {
- http::uri::Builder::new()
- .scheme(scheme)
- .authority(auth)
- .path_and_query("/")
- .build()
- .expect("domain is valid Uri")
-}
-
-fn set_scheme(uri: &mut Uri, scheme: Scheme) {
- debug_assert!(
- uri.scheme().is_none(),
- "set_scheme expects no existing scheme"
- );
- let old = std::mem::take(uri);
- let mut parts: ::http::uri::Parts = old.into();
- parts.scheme = Some(scheme);
- parts.path_and_query = Some("/".parse().expect("slash is a valid path"));
- *uri = Uri::from_parts(parts).expect("scheme is valid");
-}
-
-fn get_non_default_port(uri: &Uri) -> Option<http::uri::Port<&str>> {
- match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) {
- (Some(443), true) => None,
- (Some(80), false) => None,
- _ => uri.port(),
- }
-}
-
-fn is_schema_secure(uri: &Uri) -> bool {
- uri.scheme_str()
- .map(|scheme_str| matches!(scheme_str, "wss" | "https"))
- .unwrap_or_default()
-}
-
-/// A builder to configure a new [`Client`](Client).
-///
-/// # Example
-///
-/// ```
-/// # #[cfg(feature = "tokio")]
-/// # fn run () {
-/// use std::time::Duration;
-/// use hyper_util::client::legacy::Client;
-/// use hyper_util::rt::TokioExecutor;
-///
-/// let client = Client::builder(TokioExecutor::new())
-/// .pool_idle_timeout(Duration::from_secs(30))
-/// .http2_only(true)
-/// .build_http();
-/// # let infer: Client<_, http_body_util::Full<bytes::Bytes>> = client;
-/// # drop(infer);
-/// # }
-/// # fn main() {}
-/// ```
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-#[derive(Clone)]
-pub struct Builder {
- client_config: Config,
- exec: Exec,
- #[cfg(feature = "http1")]
- h1_builder: hyper::client::conn::http1::Builder,
- #[cfg(feature = "http2")]
- h2_builder: hyper::client::conn::http2::Builder<Exec>,
- pool_config: pool::Config,
- pool_timer: Option<timer::Timer>,
-}
-
-impl Builder {
- /// Construct a new Builder.
- pub fn new<E>(executor: E) -> Self
- where
- E: hyper::rt::Executor<BoxSendFuture> + Send + Sync + Clone + 'static,
- {
- let exec = Exec::new(executor);
- Self {
- client_config: Config {
- retry_canceled_requests: true,
- set_host: true,
- ver: Ver::Auto,
- },
- exec: exec.clone(),
- #[cfg(feature = "http1")]
- h1_builder: hyper::client::conn::http1::Builder::new(),
- #[cfg(feature = "http2")]
- h2_builder: hyper::client::conn::http2::Builder::new(exec),
- pool_config: pool::Config {
- idle_timeout: Some(Duration::from_secs(90)),
- max_idle_per_host: usize::MAX,
- },
- pool_timer: None,
- }
- }
- /// Set an optional timeout for idle sockets being kept-alive.
- /// A `Timer` is required for this to take effect. See `Builder::pool_timer`
- ///
- /// Pass `None` to disable timeout.
- ///
- /// Default is 90 seconds.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "tokio")]
- /// # fn run () {
- /// use std::time::Duration;
- /// use hyper_util::client::legacy::Client;
- /// use hyper_util::rt::{TokioExecutor, TokioTimer};
- ///
- /// let client = Client::builder(TokioExecutor::new())
- /// .pool_idle_timeout(Duration::from_secs(30))
- /// .pool_timer(TokioTimer::new())
- /// .build_http();
- ///
- /// # let infer: Client<_, http_body_util::Full<bytes::Bytes>> = client;
- /// # }
- /// # fn main() {}
- /// ```
- pub fn pool_idle_timeout<D>(&mut self, val: D) -> &mut Self
- where
- D: Into<Option<Duration>>,
- {
- self.pool_config.idle_timeout = val.into();
- self
- }
-
- #[doc(hidden)]
- #[deprecated(note = "renamed to `pool_max_idle_per_host`")]
- pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
- self.pool_config.max_idle_per_host = max_idle;
- self
- }
-
- /// Sets the maximum idle connection per host allowed in the pool.
- ///
- /// Default is `usize::MAX` (no limit).
- pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
- self.pool_config.max_idle_per_host = max_idle;
- self
- }
-
- // HTTP/1 options
-
- /// Sets the exact size of the read buffer to *always* use.
- ///
- /// Note that setting this option unsets the `http1_max_buf_size` option.
- ///
- /// Default is an adaptive read buffer.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self {
- self.h1_builder.read_buf_exact_size(Some(sz));
- self
- }
-
- /// Set the maximum buffer size for the connection.
- ///
- /// Default is ~400kb.
- ///
- /// Note that setting this option unsets the `http1_read_exact_buf_size` option.
- ///
- /// # Panics
- ///
- /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
- self.h1_builder.max_buf_size(max);
- self
- }
-
- /// Set whether HTTP/1 connections will accept spaces between header names
- /// and the colon that follow them in responses.
- ///
- /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
- /// parsing.
- ///
- /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
- /// to say about it:
- ///
- /// > No whitespace is allowed between the header field-name and colon. In
- /// > the past, differences in the handling of such whitespace have led to
- /// > security vulnerabilities in request routing and response handling. A
- /// > server MUST reject any received request message that contains
- /// > whitespace between a header field-name and colon with a response code
- /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
- /// > response message before forwarding the message downstream.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- ///
- /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self {
- self.h1_builder
- .allow_spaces_after_header_name_in_responses(val);
- self
- }
-
- /// Set whether HTTP/1 connections will accept obsolete line folding for
- /// header values.
- ///
- /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
- /// to say about it:
- ///
- /// > A server that receives an obs-fold in a request message that is not
- /// > within a message/http container MUST either reject the message by
- /// > sending a 400 (Bad Request), preferably with a representation
- /// > explaining that obsolete line folding is unacceptable, or replace
- /// > each received obs-fold with one or more SP octets prior to
- /// > interpreting the field value or forwarding the message downstream.
- ///
- /// > A proxy or gateway that receives an obs-fold in a response message
- /// > that is not within a message/http container MUST either discard the
- /// > message and replace it with a 502 (Bad Gateway) response, preferably
- /// > with a representation explaining that unacceptable line folding was
- /// > received, or replace each received obs-fold with one or more SP
- /// > octets prior to interpreting the field value or forwarding the
- /// > message downstream.
- ///
- /// > A user agent that receives an obs-fold in a response message that is
- /// > not within a message/http container MUST replace each received
- /// > obs-fold with one or more SP octets prior to interpreting the field
- /// > value.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- ///
- /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self {
- self.h1_builder
- .allow_obsolete_multiline_headers_in_responses(val);
- self
- }
-
- /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses.
- ///
- /// This mimics the behaviour of major browsers. You probably don't want this.
- /// You should only want this if you are implementing a proxy whose main
- /// purpose is to sit in front of browsers whose users access arbitrary content
- /// which may be malformed, and they expect everything that works without
- /// the proxy to keep working with the proxy.
- ///
- /// This option will prevent Hyper's client from returning an error encountered
- /// when parsing a header, except if the error was caused by the character NUL
- /// (ASCII code 0), as Chrome specifically always reject those.
- ///
- /// The ignorable errors are:
- /// * empty header names;
- /// * characters that are not allowed in header names, except for `\0` and `\r`;
- /// * when `allow_spaces_after_header_name_in_responses` is not enabled,
- /// spaces and tabs between the header name and the colon;
- /// * missing colon between header name and colon;
- /// * characters that are not allowed in header values except for `\0` and `\r`.
- ///
- /// If an ignorable error is encountered, the parser tries to find the next
- /// line in the input to resume parsing the rest of the headers. An error
- /// will be emitted nonetheless if it finds `\0` or a lone `\r` while
- /// looking for the next line.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder {
- self.h1_builder.ignore_invalid_headers_in_responses(val);
- self
- }
-
- /// Set whether HTTP/1 connections should try to use vectored writes,
- /// or always flatten into a single buffer.
- ///
- /// Note that setting this to false may mean more copies of body data,
- /// but may also improve performance when an IO transport doesn't
- /// support vectored writes well, such as most TLS implementations.
- ///
- /// Setting this to true will force hyper to use queued strategy
- /// which may eliminate unnecessary cloning on some TLS backends
- ///
- /// Default is `auto`. In this mode hyper will try to guess which
- /// mode to use
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
- self.h1_builder.writev(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
- self.h1_builder.title_case_headers(val);
- self
- }
-
- /// Set whether to support preserving original header cases.
- ///
- /// Currently, this will record the original cases received, and store them
- /// in a private extension on the `Response`. It will also look for and use
- /// such an extension in any provided `Request`.
- ///
- /// Since the relevant extension is still private, there is no way to
- /// interact with the original cases. The only effect this can have now is
- /// to forward the cases in a proxy-like fashion.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self {
- self.h1_builder.preserve_header_case(val);
- self
- }
-
- /// Set the maximum number of headers.
- ///
- /// When a response is received, the parser will reserve a buffer to store headers for optimal
- /// performance.
- ///
- /// If client receives more headers than the buffer size, the error "message header too large"
- /// is returned.
- ///
- /// The headers is allocated on the stack by default, which has higher performance. After
- /// setting this value, headers will be allocated in heap memory, that is, heap memory
- /// allocation will occur for each response, and there will be a performance drop of about 5%.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is 100.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_max_headers(&mut self, val: usize) -> &mut Self {
- self.h1_builder.max_headers(val);
- self
- }
-
- /// Set whether HTTP/0.9 responses should be tolerated.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http09_responses(&mut self, val: bool) -> &mut Self {
- self.h1_builder.http09_responses(val);
- self
- }
-
- /// Set whether the connection **must** use HTTP/2.
- ///
- /// The destination must either allow HTTP2 Prior Knowledge, or the
- /// `Connect` should be configured to do use ALPN to upgrade to `h2`
- /// as part of the connection process. This will not make the `Client`
- /// utilize ALPN by itself.
- ///
- /// Note that setting this to true prevents HTTP/1 from being allowed.
- ///
- /// Default is false.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_only(&mut self, val: bool) -> &mut Self {
- self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto };
- self
- }
-
- /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent.
- ///
- /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2).
- /// As of v0.4.0, it is 20.
- ///
- /// See <https://github.com/hyperium/hyper/issues/2877> for more information.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_pending_accept_reset_streams(
- &mut self,
- max: impl Into<Option<usize>>,
- ) -> &mut Self {
- self.h2_builder.max_pending_accept_reset_streams(max.into());
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.h2_builder.initial_stream_window_size(sz.into());
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_connection_window_size(
- &mut self,
- sz: impl Into<Option<u32>>,
- ) -> &mut Self {
- self.h2_builder.initial_connection_window_size(sz.into());
- self
- }
-
- /// Sets the initial maximum of locally initiated (send) streams.
- ///
- /// This value will be overwritten by the value included in the initial
- /// SETTINGS frame received from the peer as part of a [connection preface].
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_max_send_streams(
- &mut self,
- initial: impl Into<Option<usize>>,
- ) -> &mut Self {
- self.h2_builder.initial_max_send_streams(initial);
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
- self.h2_builder.adaptive_window(enabled);
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.h2_builder.max_frame_size(sz);
- self
- }
-
- /// Sets the max size of received header frames for HTTP2.
- ///
- /// Default is currently 16KB, but can change.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self {
- self.h2_builder.max_header_list_size(max);
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `tokio` cargo feature to be enabled.
- #[cfg(feature = "tokio")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_interval(
- &mut self,
- interval: impl Into<Option<Duration>>,
- ) -> &mut Self {
- self.h2_builder.keep_alive_interval(interval);
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `tokio` cargo feature to be enabled.
- #[cfg(feature = "tokio")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
- self.h2_builder.keep_alive_timeout(timeout);
- self
- }
-
- /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
- ///
- /// If disabled, keep-alive pings are only sent while there are open
- /// request/responses streams. If enabled, pings are also sent when no
- /// streams are active. Does nothing if `http2_keep_alive_interval` is
- /// disabled.
- ///
- /// Default is `false`.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `tokio` cargo feature to be enabled.
- #[cfg(feature = "tokio")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
- self.h2_builder.keep_alive_while_idle(enabled);
- self
- }
-
- /// Sets the maximum number of HTTP2 concurrent locally reset streams.
- ///
- /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more
- /// details.
- ///
- /// The default value is determined by the `h2` crate.
- ///
- /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
- self.h2_builder.max_concurrent_reset_streams(max);
- self
- }
-
- /// Provide a timer to be used for h2
- ///
- /// See the documentation of [`h2::client::Builder::timer`] for more
- /// details.
- ///
- /// [`h2::client::Builder::timer`]: https://docs.rs/h2/client/struct.Builder.html#method.timer
- pub fn timer<M>(&mut self, timer: M) -> &mut Self
- where
- M: Timer + Send + Sync + 'static,
- {
- #[cfg(feature = "http2")]
- self.h2_builder.timer(timer);
- self
- }
-
- /// Provide a timer to be used for timeouts and intervals in connection pools.
- pub fn pool_timer<M>(&mut self, timer: M) -> &mut Self
- where
- M: Timer + Clone + Send + Sync + 'static,
- {
- self.pool_timer = Some(timer::Timer::new(timer.clone()));
- self
- }
-
- /// Set the maximum write buffer size for each HTTP/2 stream.
- ///
- /// Default is currently 1MB, but may change.
- ///
- /// # Panics
- ///
- /// The value must be no larger than `u32::MAX`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
- self.h2_builder.max_send_buf_size(max);
- self
- }
-
- /// Set whether to retry requests that get disrupted before ever starting
- /// to write.
- ///
- /// This means a request that is queued, and gets given an idle, reused
- /// connection, and then encounters an error immediately as the idle
- /// connection was found to be unusable.
- ///
- /// When this is set to `false`, the related `ResponseFuture` would instead
- /// resolve to an `Error::Cancel`.
- ///
- /// Default is `true`.
- #[inline]
- pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self {
- self.client_config.retry_canceled_requests = val;
- self
- }
-
- /// Set whether to automatically add the `Host` header to requests.
- ///
- /// If true, and a request does not include a `Host` header, one will be
- /// added automatically, derived from the authority of the `Uri`.
- ///
- /// Default is `true`.
- #[inline]
- pub fn set_host(&mut self, val: bool) -> &mut Self {
- self.client_config.set_host = val;
- self
- }
-
- /// Build a client with this configuration and the default `HttpConnector`.
- #[cfg(feature = "tokio")]
- pub fn build_http<B>(&self) -> Client<HttpConnector, B>
- where
- B: Body + Send,
- B::Data: Send,
- {
- let mut connector = HttpConnector::new();
- if self.pool_config.is_enabled() {
- connector.set_keepalive(self.pool_config.idle_timeout);
- }
- self.build(connector)
- }
-
- /// Combine the configuration of this builder with a connector to create a `Client`.
- pub fn build<C, B>(&self, connector: C) -> Client<C, B>
- where
- C: Connect + Clone,
- B: Body + Send,
- B::Data: Send,
- {
- let exec = self.exec.clone();
- let timer = self.pool_timer.clone();
- Client {
- config: self.client_config,
- exec: exec.clone(),
- #[cfg(feature = "http1")]
- h1_builder: self.h1_builder.clone(),
- #[cfg(feature = "http2")]
- h2_builder: self.h2_builder.clone(),
- connector,
- pool: pool::Pool::new(self.pool_config, exec, timer),
- }
- }
-}
-
-impl fmt::Debug for Builder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Builder")
- .field("client_config", &self.client_config)
- .field("pool_config", &self.pool_config)
- .finish()
- }
-}
-
-// ==== impl Error ====
-
-impl fmt::Debug for Error {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let mut f = f.debug_tuple("hyper_util::client::legacy::Error");
- f.field(&self.kind);
- if let Some(ref cause) = self.source {
- f.field(cause);
- }
- f.finish()
- }
-}
-
-impl fmt::Display for Error {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "client error ({:?})", self.kind)
- }
-}
-
-impl StdError for Error {
- fn source(&self) -> Option<&(dyn StdError + 'static)> {
- self.source.as_ref().map(|e| &**e as _)
- }
-}
-
-impl Error {
- /// Returns true if this was an error from `Connect`.
- pub fn is_connect(&self) -> bool {
- matches!(self.kind, ErrorKind::Connect)
- }
-
- /// Returns the info of the client connection on which this error occurred.
- #[cfg(any(feature = "http1", feature = "http2"))]
- pub fn connect_info(&self) -> Option<&Connected> {
- self.connect_info.as_ref()
- }
-
- #[cfg(any(feature = "http1", feature = "http2"))]
- fn with_connect_info(self, connect_info: Connected) -> Self {
- Self {
- connect_info: Some(connect_info),
- ..self
- }
- }
- fn is_canceled(&self) -> bool {
- matches!(self.kind, ErrorKind::Canceled)
- }
-
- fn tx(src: hyper::Error) -> Self {
- e!(SendRequest, src)
- }
-
- fn closed(src: hyper::Error) -> Self {
- e!(ChannelClosed, src)
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/capture.rs b/vendor/hyper-util/src/client/legacy/connect/capture.rs
deleted file mode 100644
index b31b6433..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/capture.rs
+++ /dev/null
@@ -1,187 +0,0 @@
-use std::{ops::Deref, sync::Arc};
-
-use http::Request;
-use tokio::sync::watch;
-
-use super::Connected;
-
-/// [`CaptureConnection`] allows callers to capture [`Connected`] information
-///
-/// To capture a connection for a request, use [`capture_connection`].
-#[derive(Debug, Clone)]
-pub struct CaptureConnection {
- rx: watch::Receiver<Option<Connected>>,
-}
-
-/// Capture the connection for a given request
-///
-/// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait.
-/// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon
-/// as the connection is established.
-///
-/// [`Connection`]: crate::client::legacy::connect::Connection
-///
-/// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none.
-///
-/// # Examples
-///
-/// **Synchronous access**:
-/// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been
-/// established. This is ideal for situations where you are certain the connection has already
-/// been established (e.g. after the response future has already completed).
-/// ```rust
-/// use hyper_util::client::legacy::connect::capture_connection;
-/// let mut request = http::Request::builder()
-/// .uri("http://foo.com")
-/// .body(())
-/// .unwrap();
-///
-/// let captured_connection = capture_connection(&mut request);
-/// // some time later after the request has been sent...
-/// let connection_info = captured_connection.connection_metadata();
-/// println!("we are connected! {:?}", connection_info.as_ref());
-/// ```
-///
-/// **Asynchronous access**:
-/// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the
-/// connection is available.
-///
-/// ```rust
-/// # #[cfg(feature = "tokio")]
-/// # async fn example() {
-/// use hyper_util::client::legacy::connect::capture_connection;
-/// use hyper_util::client::legacy::Client;
-/// use hyper_util::rt::TokioExecutor;
-/// use bytes::Bytes;
-/// use http_body_util::Empty;
-/// let mut request = http::Request::builder()
-/// .uri("http://foo.com")
-/// .body(Empty::<Bytes>::new())
-/// .unwrap();
-///
-/// let mut captured = capture_connection(&mut request);
-/// tokio::task::spawn(async move {
-/// let connection_info = captured.wait_for_connection_metadata().await;
-/// println!("we are connected! {:?}", connection_info.as_ref());
-/// });
-///
-/// let client = Client::builder(TokioExecutor::new()).build_http();
-/// client.request(request).await.expect("request failed");
-/// # }
-/// ```
-pub fn capture_connection<B>(request: &mut Request<B>) -> CaptureConnection {
- let (tx, rx) = CaptureConnection::new();
- request.extensions_mut().insert(tx);
- rx
-}
-
-/// TxSide for [`CaptureConnection`]
-///
-/// This is inserted into `Extensions` to allow Hyper to back channel connection info
-#[derive(Clone)]
-pub(crate) struct CaptureConnectionExtension {
- tx: Arc<watch::Sender<Option<Connected>>>,
-}
-
-impl CaptureConnectionExtension {
- pub(crate) fn set(&self, connected: &Connected) {
- self.tx.send_replace(Some(connected.clone()));
- }
-}
-
-impl CaptureConnection {
- /// Internal API to create the tx and rx half of [`CaptureConnection`]
- pub(crate) fn new() -> (CaptureConnectionExtension, Self) {
- let (tx, rx) = watch::channel(None);
- (
- CaptureConnectionExtension { tx: Arc::new(tx) },
- CaptureConnection { rx },
- )
- }
-
- /// Retrieve the connection metadata, if available
- pub fn connection_metadata(&self) -> impl Deref<Target = Option<Connected>> + '_ {
- self.rx.borrow()
- }
-
- /// Wait for the connection to be established
- ///
- /// If a connection was established, this will always return `Some(...)`. If the request never
- /// successfully connected (e.g. DNS resolution failure), this method will never return.
- pub async fn wait_for_connection_metadata(
- &mut self,
- ) -> impl Deref<Target = Option<Connected>> + '_ {
- if self.rx.borrow().is_some() {
- return self.rx.borrow();
- }
- let _ = self.rx.changed().await;
- self.rx.borrow()
- }
-}
-
-#[cfg(all(test, not(miri)))]
-mod test {
- use super::*;
-
- #[test]
- fn test_sync_capture_connection() {
- let (tx, rx) = CaptureConnection::new();
- assert!(
- rx.connection_metadata().is_none(),
- "connection has not been set"
- );
- tx.set(&Connected::new().proxy(true));
- assert!(rx
- .connection_metadata()
- .as_ref()
- .expect("connected should be set")
- .is_proxied());
-
- // ensure it can be called multiple times
- assert!(rx
- .connection_metadata()
- .as_ref()
- .expect("connected should be set")
- .is_proxied());
- }
-
- #[tokio::test]
- async fn async_capture_connection() {
- let (tx, mut rx) = CaptureConnection::new();
- assert!(
- rx.connection_metadata().is_none(),
- "connection has not been set"
- );
- let test_task = tokio::spawn(async move {
- assert!(rx
- .wait_for_connection_metadata()
- .await
- .as_ref()
- .expect("connection should be set")
- .is_proxied());
- // can be awaited multiple times
- assert!(
- rx.wait_for_connection_metadata().await.is_some(),
- "should be awaitable multiple times"
- );
-
- assert!(rx.connection_metadata().is_some());
- });
- // can't be finished, we haven't set the connection yet
- assert!(!test_task.is_finished());
- tx.set(&Connected::new().proxy(true));
-
- assert!(test_task.await.is_ok());
- }
-
- #[tokio::test]
- async fn capture_connection_sender_side_dropped() {
- let (tx, mut rx) = CaptureConnection::new();
- assert!(
- rx.connection_metadata().is_none(),
- "connection has not been set"
- );
- drop(tx);
- assert!(rx.wait_for_connection_metadata().await.is_none());
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/dns.rs b/vendor/hyper-util/src/client/legacy/connect/dns.rs
deleted file mode 100644
index abeb2cca..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/dns.rs
+++ /dev/null
@@ -1,360 +0,0 @@
-//! DNS Resolution used by the `HttpConnector`.
-//!
-//! This module contains:
-//!
-//! - A [`GaiResolver`] that is the default resolver for the `HttpConnector`.
-//! - The `Name` type used as an argument to custom resolvers.
-//!
-//! # Resolvers are `Service`s
-//!
-//! A resolver is just a
-//! `Service<Name, Response = impl Iterator<Item = SocketAddr>>`.
-//!
-//! A simple resolver that ignores the name and always returns a specific
-//! address:
-//!
-//! ```rust,ignore
-//! use std::{convert::Infallible, iter, net::SocketAddr};
-//!
-//! let resolver = tower::service_fn(|_name| async {
-//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080))))
-//! });
-//! ```
-use std::error::Error;
-use std::future::Future;
-use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
-use std::pin::Pin;
-use std::str::FromStr;
-use std::task::{self, Poll};
-use std::{fmt, io, vec};
-
-use tokio::task::JoinHandle;
-use tower_service::Service;
-
-pub(super) use self::sealed::Resolve;
-
-/// A domain name to resolve into IP addresses.
-#[derive(Clone, Hash, Eq, PartialEq)]
-pub struct Name {
- host: Box<str>,
-}
-
-/// A resolver using blocking `getaddrinfo` calls in a threadpool.
-#[derive(Clone)]
-pub struct GaiResolver {
- _priv: (),
-}
-
-/// An iterator of IP addresses returned from `getaddrinfo`.
-pub struct GaiAddrs {
- inner: SocketAddrs,
-}
-
-/// A future to resolve a name returned by `GaiResolver`.
-pub struct GaiFuture {
- inner: JoinHandle<Result<SocketAddrs, io::Error>>,
-}
-
-impl Name {
- pub(super) fn new(host: Box<str>) -> Name {
- Name { host }
- }
-
- /// View the hostname as a string slice.
- pub fn as_str(&self) -> &str {
- &self.host
- }
-}
-
-impl fmt::Debug for Name {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&self.host, f)
- }
-}
-
-impl fmt::Display for Name {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&self.host, f)
- }
-}
-
-impl FromStr for Name {
- type Err = InvalidNameError;
-
- fn from_str(host: &str) -> Result<Self, Self::Err> {
- // Possibly add validation later
- Ok(Name::new(host.into()))
- }
-}
-
-/// Error indicating a given string was not a valid domain name.
-#[derive(Debug)]
-pub struct InvalidNameError(());
-
-impl fmt::Display for InvalidNameError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("Not a valid domain name")
- }
-}
-
-impl Error for InvalidNameError {}
-
-impl GaiResolver {
- /// Construct a new `GaiResolver`.
- pub fn new() -> Self {
- GaiResolver { _priv: () }
- }
-}
-
-impl Service<Name> for GaiResolver {
- type Response = GaiAddrs;
- type Error = io::Error;
- type Future = GaiFuture;
-
- fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, name: Name) -> Self::Future {
- let blocking = tokio::task::spawn_blocking(move || {
- (&*name.host, 0)
- .to_socket_addrs()
- .map(|i| SocketAddrs { iter: i })
- });
-
- GaiFuture { inner: blocking }
- }
-}
-
-impl fmt::Debug for GaiResolver {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("GaiResolver")
- }
-}
-
-impl Future for GaiFuture {
- type Output = Result<GaiAddrs, io::Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- Pin::new(&mut self.inner).poll(cx).map(|res| match res {
- Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }),
- Ok(Err(err)) => Err(err),
- Err(join_err) => {
- if join_err.is_cancelled() {
- Err(io::Error::new(io::ErrorKind::Interrupted, join_err))
- } else {
- panic!("gai background task failed: {join_err:?}")
- }
- }
- })
- }
-}
-
-impl fmt::Debug for GaiFuture {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("GaiFuture")
- }
-}
-
-impl Drop for GaiFuture {
- fn drop(&mut self) {
- self.inner.abort();
- }
-}
-
-impl Iterator for GaiAddrs {
- type Item = SocketAddr;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.inner.next()
- }
-}
-
-impl fmt::Debug for GaiAddrs {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("GaiAddrs")
- }
-}
-
-pub(super) struct SocketAddrs {
- iter: vec::IntoIter<SocketAddr>,
-}
-
-impl SocketAddrs {
- pub(super) fn new(addrs: Vec<SocketAddr>) -> Self {
- SocketAddrs {
- iter: addrs.into_iter(),
- }
- }
-
- pub(super) fn try_parse(host: &str, port: u16) -> Option<SocketAddrs> {
- if let Ok(addr) = host.parse::<Ipv4Addr>() {
- let addr = SocketAddrV4::new(addr, port);
- return Some(SocketAddrs {
- iter: vec![SocketAddr::V4(addr)].into_iter(),
- });
- }
- if let Ok(addr) = host.parse::<Ipv6Addr>() {
- let addr = SocketAddrV6::new(addr, port, 0, 0);
- return Some(SocketAddrs {
- iter: vec![SocketAddr::V6(addr)].into_iter(),
- });
- }
- None
- }
-
- #[inline]
- fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs {
- SocketAddrs::new(self.iter.filter(predicate).collect())
- }
-
- pub(super) fn split_by_preference(
- self,
- local_addr_ipv4: Option<Ipv4Addr>,
- local_addr_ipv6: Option<Ipv6Addr>,
- ) -> (SocketAddrs, SocketAddrs) {
- match (local_addr_ipv4, local_addr_ipv6) {
- (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])),
- (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])),
- _ => {
- let preferring_v6 = self
- .iter
- .as_slice()
- .first()
- .map(SocketAddr::is_ipv6)
- .unwrap_or(false);
-
- let (preferred, fallback) = self
- .iter
- .partition::<Vec<_>, _>(|addr| addr.is_ipv6() == preferring_v6);
-
- (SocketAddrs::new(preferred), SocketAddrs::new(fallback))
- }
- }
- }
-
- pub(super) fn is_empty(&self) -> bool {
- self.iter.as_slice().is_empty()
- }
-
- pub(super) fn len(&self) -> usize {
- self.iter.as_slice().len()
- }
-}
-
-impl Iterator for SocketAddrs {
- type Item = SocketAddr;
- #[inline]
- fn next(&mut self) -> Option<SocketAddr> {
- self.iter.next()
- }
-}
-
-mod sealed {
- use std::future::Future;
- use std::task::{self, Poll};
-
- use super::{Name, SocketAddr};
- use tower_service::Service;
-
- // "Trait alias" for `Service<Name, Response = Addrs>`
- pub trait Resolve {
- type Addrs: Iterator<Item = SocketAddr>;
- type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
- type Future: Future<Output = Result<Self::Addrs, Self::Error>>;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
- fn resolve(&mut self, name: Name) -> Self::Future;
- }
-
- impl<S> Resolve for S
- where
- S: Service<Name>,
- S::Response: Iterator<Item = SocketAddr>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- {
- type Addrs = S::Response;
- type Error = S::Error;
- type Future = S::Future;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Service::poll_ready(self, cx)
- }
-
- fn resolve(&mut self, name: Name) -> Self::Future {
- Service::call(self, name)
- }
- }
-}
-
-pub(super) async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>
-where
- R: Resolve,
-{
- crate::common::future::poll_fn(|cx| resolver.poll_ready(cx)).await?;
- resolver.resolve(name).await
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use std::net::{Ipv4Addr, Ipv6Addr};
-
- #[test]
- fn test_ip_addrs_split_by_preference() {
- let ip_v4 = Ipv4Addr::new(127, 0, 0, 1);
- let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
- let v4_addr = (ip_v4, 80).into();
- let v6_addr = (ip_v6, 80).into();
-
- let (mut preferred, mut fallback) = SocketAddrs {
- iter: vec![v4_addr, v6_addr].into_iter(),
- }
- .split_by_preference(None, None);
- assert!(preferred.next().unwrap().is_ipv4());
- assert!(fallback.next().unwrap().is_ipv6());
-
- let (mut preferred, mut fallback) = SocketAddrs {
- iter: vec![v6_addr, v4_addr].into_iter(),
- }
- .split_by_preference(None, None);
- assert!(preferred.next().unwrap().is_ipv6());
- assert!(fallback.next().unwrap().is_ipv4());
-
- let (mut preferred, mut fallback) = SocketAddrs {
- iter: vec![v4_addr, v6_addr].into_iter(),
- }
- .split_by_preference(Some(ip_v4), Some(ip_v6));
- assert!(preferred.next().unwrap().is_ipv4());
- assert!(fallback.next().unwrap().is_ipv6());
-
- let (mut preferred, mut fallback) = SocketAddrs {
- iter: vec![v6_addr, v4_addr].into_iter(),
- }
- .split_by_preference(Some(ip_v4), Some(ip_v6));
- assert!(preferred.next().unwrap().is_ipv6());
- assert!(fallback.next().unwrap().is_ipv4());
-
- let (mut preferred, fallback) = SocketAddrs {
- iter: vec![v4_addr, v6_addr].into_iter(),
- }
- .split_by_preference(Some(ip_v4), None);
- assert!(preferred.next().unwrap().is_ipv4());
- assert!(fallback.is_empty());
-
- let (mut preferred, fallback) = SocketAddrs {
- iter: vec![v4_addr, v6_addr].into_iter(),
- }
- .split_by_preference(None, Some(ip_v6));
- assert!(preferred.next().unwrap().is_ipv6());
- assert!(fallback.is_empty());
- }
-
- #[test]
- fn test_name_from_str() {
- const DOMAIN: &str = "test.example.com";
- let name = Name::from_str(DOMAIN).expect("Should be a valid domain");
- assert_eq!(name.as_str(), DOMAIN);
- assert_eq!(name.to_string(), DOMAIN);
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/http.rs b/vendor/hyper-util/src/client/legacy/connect/http.rs
deleted file mode 100644
index f19a78eb..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/http.rs
+++ /dev/null
@@ -1,1468 +0,0 @@
-use std::error::Error as StdError;
-use std::fmt;
-use std::future::Future;
-use std::io;
-use std::marker::PhantomData;
-use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
-use std::pin::Pin;
-use std::sync::Arc;
-use std::task::{self, Poll};
-use std::time::Duration;
-
-use futures_core::ready;
-use futures_util::future::Either;
-use http::uri::{Scheme, Uri};
-use pin_project_lite::pin_project;
-use socket2::TcpKeepalive;
-use tokio::net::{TcpSocket, TcpStream};
-use tokio::time::Sleep;
-use tracing::{debug, trace, warn};
-
-use super::dns::{self, resolve, GaiResolver, Resolve};
-use super::{Connected, Connection};
-use crate::rt::TokioIo;
-
-/// A connector for the `http` scheme.
-///
-/// Performs DNS resolution in a thread pool, and then connects over TCP.
-///
-/// # Note
-///
-/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes
-/// transport information such as the remote socket address used.
-#[derive(Clone)]
-pub struct HttpConnector<R = GaiResolver> {
- config: Arc<Config>,
- resolver: R,
-}
-
-/// Extra information about the transport when an HttpConnector is used.
-///
-/// # Example
-///
-/// ```
-/// # fn doc(res: http::Response<()>) {
-/// use hyper_util::client::legacy::connect::HttpInfo;
-///
-/// // res = http::Response
-/// res
-/// .extensions()
-/// .get::<HttpInfo>()
-/// .map(|info| {
-/// println!("remote addr = {}", info.remote_addr());
-/// });
-/// # }
-/// ```
-///
-/// # Note
-///
-/// If a different connector is used besides [`HttpConnector`](HttpConnector),
-/// this value will not exist in the extensions. Consult that specific
-/// connector to see what "extra" information it might provide to responses.
-#[derive(Clone, Debug)]
-pub struct HttpInfo {
- remote_addr: SocketAddr,
- local_addr: SocketAddr,
-}
-
-#[derive(Clone)]
-struct Config {
- connect_timeout: Option<Duration>,
- enforce_http: bool,
- happy_eyeballs_timeout: Option<Duration>,
- tcp_keepalive_config: TcpKeepaliveConfig,
- local_address_ipv4: Option<Ipv4Addr>,
- local_address_ipv6: Option<Ipv6Addr>,
- nodelay: bool,
- reuse_address: bool,
- send_buffer_size: Option<usize>,
- recv_buffer_size: Option<usize>,
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- interface: Option<String>,
- #[cfg(any(
- target_os = "illumos",
- target_os = "ios",
- target_os = "macos",
- target_os = "solaris",
- target_os = "tvos",
- target_os = "visionos",
- target_os = "watchos",
- ))]
- interface: Option<std::ffi::CString>,
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- tcp_user_timeout: Option<Duration>,
-}
-
-#[derive(Default, Debug, Clone, Copy)]
-struct TcpKeepaliveConfig {
- time: Option<Duration>,
- interval: Option<Duration>,
- retries: Option<u32>,
-}
-
-impl TcpKeepaliveConfig {
- /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration.
- fn into_tcpkeepalive(self) -> Option<TcpKeepalive> {
- let mut dirty = false;
- let mut ka = TcpKeepalive::new();
- if let Some(time) = self.time {
- ka = ka.with_time(time);
- dirty = true
- }
- if let Some(interval) = self.interval {
- ka = Self::ka_with_interval(ka, interval, &mut dirty)
- };
- if let Some(retries) = self.retries {
- ka = Self::ka_with_retries(ka, retries, &mut dirty)
- };
- if dirty {
- Some(ka)
- } else {
- None
- }
- }
-
- #[cfg(
- // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525
- any(
- target_os = "android",
- target_os = "dragonfly",
- target_os = "freebsd",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "visionos",
- target_os = "linux",
- target_os = "macos",
- target_os = "netbsd",
- target_os = "tvos",
- target_os = "watchos",
- target_os = "windows",
- )
- )]
- fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive {
- *dirty = true;
- ka.with_interval(interval)
- }
-
- #[cfg(not(
- // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525
- any(
- target_os = "android",
- target_os = "dragonfly",
- target_os = "freebsd",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "visionos",
- target_os = "linux",
- target_os = "macos",
- target_os = "netbsd",
- target_os = "tvos",
- target_os = "watchos",
- target_os = "windows",
- )
- ))]
- fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive {
- ka // no-op as keepalive interval is not supported on this platform
- }
-
- #[cfg(
- // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570
- any(
- target_os = "android",
- target_os = "dragonfly",
- target_os = "freebsd",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "visionos",
- target_os = "linux",
- target_os = "macos",
- target_os = "netbsd",
- target_os = "tvos",
- target_os = "watchos",
- )
- )]
- fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive {
- *dirty = true;
- ka.with_retries(retries)
- }
-
- #[cfg(not(
- // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570
- any(
- target_os = "android",
- target_os = "dragonfly",
- target_os = "freebsd",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "visionos",
- target_os = "linux",
- target_os = "macos",
- target_os = "netbsd",
- target_os = "tvos",
- target_os = "watchos",
- )
- ))]
- fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive {
- ka // no-op as keepalive retries is not supported on this platform
- }
-}
-
-// ===== impl HttpConnector =====
-
-impl HttpConnector {
- /// Construct a new HttpConnector.
- pub fn new() -> HttpConnector {
- HttpConnector::new_with_resolver(GaiResolver::new())
- }
-}
-
-impl<R> HttpConnector<R> {
- /// Construct a new HttpConnector.
- ///
- /// Takes a [`Resolver`](crate::client::legacy::connect::dns#resolvers-are-services) to handle DNS lookups.
- pub fn new_with_resolver(resolver: R) -> HttpConnector<R> {
- HttpConnector {
- config: Arc::new(Config {
- connect_timeout: None,
- enforce_http: true,
- happy_eyeballs_timeout: Some(Duration::from_millis(300)),
- tcp_keepalive_config: TcpKeepaliveConfig::default(),
- local_address_ipv4: None,
- local_address_ipv6: None,
- nodelay: false,
- reuse_address: false,
- send_buffer_size: None,
- recv_buffer_size: None,
- #[cfg(any(
- target_os = "android",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "linux",
- target_os = "macos",
- target_os = "solaris",
- target_os = "tvos",
- target_os = "visionos",
- target_os = "watchos",
- ))]
- interface: None,
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- tcp_user_timeout: None,
- }),
- resolver,
- }
- }
-
- /// Option to enforce all `Uri`s have the `http` scheme.
- ///
- /// Enabled by default.
- #[inline]
- pub fn enforce_http(&mut self, is_enforced: bool) {
- self.config_mut().enforce_http = is_enforced;
- }
-
- /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration
- /// to remain idle before sending TCP keepalive probes.
- ///
- /// If `None`, keepalive is disabled.
- ///
- /// Default is `None`.
- #[inline]
- pub fn set_keepalive(&mut self, time: Option<Duration>) {
- self.config_mut().tcp_keepalive_config.time = time;
- }
-
- /// Set the duration between two successive TCP keepalive retransmissions,
- /// if acknowledgement to the previous keepalive transmission is not received.
- #[inline]
- pub fn set_keepalive_interval(&mut self, interval: Option<Duration>) {
- self.config_mut().tcp_keepalive_config.interval = interval;
- }
-
- /// Set the number of retransmissions to be carried out before declaring that remote end is not available.
- #[inline]
- pub fn set_keepalive_retries(&mut self, retries: Option<u32>) {
- self.config_mut().tcp_keepalive_config.retries = retries;
- }
-
- /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`.
- ///
- /// Default is `false`.
- #[inline]
- pub fn set_nodelay(&mut self, nodelay: bool) {
- self.config_mut().nodelay = nodelay;
- }
-
- /// Sets the value of the SO_SNDBUF option on the socket.
- #[inline]
- pub fn set_send_buffer_size(&mut self, size: Option<usize>) {
- self.config_mut().send_buffer_size = size;
- }
-
- /// Sets the value of the SO_RCVBUF option on the socket.
- #[inline]
- pub fn set_recv_buffer_size(&mut self, size: Option<usize>) {
- self.config_mut().recv_buffer_size = size;
- }
-
- /// Set that all sockets are bound to the configured address before connection.
- ///
- /// If `None`, the sockets will not be bound.
- ///
- /// Default is `None`.
- #[inline]
- pub fn set_local_address(&mut self, addr: Option<IpAddr>) {
- let (v4, v6) = match addr {
- Some(IpAddr::V4(a)) => (Some(a), None),
- Some(IpAddr::V6(a)) => (None, Some(a)),
- _ => (None, None),
- };
-
- let cfg = self.config_mut();
-
- cfg.local_address_ipv4 = v4;
- cfg.local_address_ipv6 = v6;
- }
-
- /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's
- /// preferences) before connection.
- #[inline]
- pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) {
- let cfg = self.config_mut();
-
- cfg.local_address_ipv4 = Some(addr_ipv4);
- cfg.local_address_ipv6 = Some(addr_ipv6);
- }
-
- /// Set the connect timeout.
- ///
- /// If a domain resolves to multiple IP addresses, the timeout will be
- /// evenly divided across them.
- ///
- /// Default is `None`.
- #[inline]
- pub fn set_connect_timeout(&mut self, dur: Option<Duration>) {
- self.config_mut().connect_timeout = dur;
- }
-
- /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.
- ///
- /// If hostname resolves to both IPv4 and IPv6 addresses and connection
- /// cannot be established using preferred address family before timeout
- /// elapses, then connector will in parallel attempt connection using other
- /// address family.
- ///
- /// If `None`, parallel connection attempts are disabled.
- ///
- /// Default is 300 milliseconds.
- ///
- /// [RFC 6555]: https://tools.ietf.org/html/rfc6555
- #[inline]
- pub fn set_happy_eyeballs_timeout(&mut self, dur: Option<Duration>) {
- self.config_mut().happy_eyeballs_timeout = dur;
- }
-
- /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`.
- ///
- /// Default is `false`.
- #[inline]
- pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self {
- self.config_mut().reuse_address = reuse_address;
- self
- }
-
- /// Sets the name of the interface to bind sockets produced by this
- /// connector.
- ///
- /// On Linux, this sets the `SO_BINDTODEVICE` option on this socket (see
- /// [`man 7 socket`] for details). On macOS (and macOS-derived systems like
- /// iOS), illumos, and Solaris, this will instead use the `IP_BOUND_IF`
- /// socket option (see [`man 7p ip`]).
- ///
- /// If a socket is bound to an interface, only packets received from that particular
- /// interface are processed by the socket. Note that this only works for some socket
- /// types, particularly `AF_INET`` sockets.
- ///
- /// On Linux it can be used to specify a [VRF], but the binary needs
- /// to either have `CAP_NET_RAW` or to be run as root.
- ///
- /// This function is only available on the following operating systems:
- /// - Linux, including Android
- /// - Fuchsia
- /// - illumos and Solaris
- /// - macOS, iOS, visionOS, watchOS, and tvOS
- ///
- /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt
- /// [`man 7 socket`] https://man7.org/linux/man-pages/man7/socket.7.html
- /// [`man 7p ip`]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html
- #[cfg(any(
- target_os = "android",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "linux",
- target_os = "macos",
- target_os = "solaris",
- target_os = "tvos",
- target_os = "visionos",
- target_os = "watchos",
- ))]
- #[inline]
- pub fn set_interface<S: Into<String>>(&mut self, interface: S) -> &mut Self {
- let interface = interface.into();
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- {
- self.config_mut().interface = Some(interface);
- }
- #[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))]
- {
- let interface = std::ffi::CString::new(interface)
- .expect("interface name should not have nulls in it");
- self.config_mut().interface = Some(interface);
- }
- self
- }
-
- /// Sets the value of the TCP_USER_TIMEOUT option on the socket.
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- #[inline]
- pub fn set_tcp_user_timeout(&mut self, time: Option<Duration>) {
- self.config_mut().tcp_user_timeout = time;
- }
-
- // private
-
- fn config_mut(&mut self) -> &mut Config {
- // If the are HttpConnector clones, this will clone the inner
- // config. So mutating the config won't ever affect previous
- // clones.
- Arc::make_mut(&mut self.config)
- }
-}
-
-static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http";
-static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing";
-static INVALID_MISSING_HOST: &str = "invalid URL, host is missing";
-
-// R: Debug required for now to allow adding it to debug output later...
-impl<R: fmt::Debug> fmt::Debug for HttpConnector<R> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("HttpConnector").finish()
- }
-}
-
-impl<R> tower_service::Service<Uri> for HttpConnector<R>
-where
- R: Resolve + Clone + Send + Sync + 'static,
- R::Future: Send,
-{
- type Response = TokioIo<TcpStream>;
- type Error = ConnectError;
- type Future = HttpConnecting<R>;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- let mut self_ = self.clone();
- HttpConnecting {
- fut: Box::pin(async move { self_.call_async(dst).await }),
- _marker: PhantomData,
- }
- }
-}
-
-fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> {
- trace!(
- "Http::connect; scheme={:?}, host={:?}, port={:?}",
- dst.scheme(),
- dst.host(),
- dst.port(),
- );
-
- if config.enforce_http {
- if dst.scheme() != Some(&Scheme::HTTP) {
- return Err(ConnectError {
- msg: INVALID_NOT_HTTP,
- addr: None,
- cause: None,
- });
- }
- } else if dst.scheme().is_none() {
- return Err(ConnectError {
- msg: INVALID_MISSING_SCHEME,
- addr: None,
- cause: None,
- });
- }
-
- let host = match dst.host() {
- Some(s) => s,
- None => {
- return Err(ConnectError {
- msg: INVALID_MISSING_HOST,
- addr: None,
- cause: None,
- })
- }
- };
- let port = match dst.port() {
- Some(port) => port.as_u16(),
- None => {
- if dst.scheme() == Some(&Scheme::HTTPS) {
- 443
- } else {
- 80
- }
- }
- };
-
- Ok((host, port))
-}
-
-impl<R> HttpConnector<R>
-where
- R: Resolve,
-{
- async fn call_async(&mut self, dst: Uri) -> Result<TokioIo<TcpStream>, ConnectError> {
- let config = &self.config;
-
- let (host, port) = get_host_port(config, &dst)?;
- let host = host.trim_start_matches('[').trim_end_matches(']');
-
- // If the host is already an IP addr (v4 or v6),
- // skip resolving the dns and start connecting right away.
- let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) {
- addrs
- } else {
- let addrs = resolve(&mut self.resolver, dns::Name::new(host.into()))
- .await
- .map_err(ConnectError::dns)?;
- let addrs = addrs
- .map(|mut addr| {
- set_port(&mut addr, port, dst.port().is_some());
-
- addr
- })
- .collect();
- dns::SocketAddrs::new(addrs)
- };
-
- let c = ConnectingTcp::new(addrs, config);
-
- let sock = c.connect().await?;
-
- if let Err(e) = sock.set_nodelay(config.nodelay) {
- warn!("tcp set_nodelay error: {}", e);
- }
-
- Ok(TokioIo::new(sock))
- }
-}
-
-impl Connection for TcpStream {
- fn connected(&self) -> Connected {
- let connected = Connected::new();
- if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) {
- connected.extra(HttpInfo {
- remote_addr,
- local_addr,
- })
- } else {
- connected
- }
- }
-}
-
-#[cfg(unix)]
-impl Connection for tokio::net::UnixStream {
- fn connected(&self) -> Connected {
- Connected::new()
- }
-}
-
-#[cfg(windows)]
-impl Connection for tokio::net::windows::named_pipe::NamedPipeClient {
- fn connected(&self) -> Connected {
- Connected::new()
- }
-}
-
-// Implement `Connection` for generic `TokioIo<T>` so that external crates can
-// implement their own `HttpConnector` with `TokioIo<CustomTcpStream>`.
-impl<T> Connection for TokioIo<T>
-where
- T: Connection,
-{
- fn connected(&self) -> Connected {
- self.inner().connected()
- }
-}
-
-impl HttpInfo {
- /// Get the remote address of the transport used.
- pub fn remote_addr(&self) -> SocketAddr {
- self.remote_addr
- }
-
- /// Get the local address of the transport used.
- pub fn local_addr(&self) -> SocketAddr {
- self.local_addr
- }
-}
-
-pin_project! {
- // Not publicly exported (so missing_docs doesn't trigger).
- //
- // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
- // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
- // (and thus we can change the type in the future).
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct HttpConnecting<R> {
- #[pin]
- fut: BoxConnecting,
- _marker: PhantomData<R>,
- }
-}
-
-type ConnectResult = Result<TokioIo<TcpStream>, ConnectError>;
-type BoxConnecting = Pin<Box<dyn Future<Output = ConnectResult> + Send>>;
-
-impl<R: Resolve> Future for HttpConnecting<R> {
- type Output = ConnectResult;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.project().fut.poll(cx)
- }
-}
-
-// Not publicly exported (so missing_docs doesn't trigger).
-pub struct ConnectError {
- msg: &'static str,
- addr: Option<SocketAddr>,
- cause: Option<Box<dyn StdError + Send + Sync>>,
-}
-
-impl ConnectError {
- fn new<E>(msg: &'static str, cause: E) -> ConnectError
- where
- E: Into<Box<dyn StdError + Send + Sync>>,
- {
- ConnectError {
- msg,
- addr: None,
- cause: Some(cause.into()),
- }
- }
-
- fn dns<E>(cause: E) -> ConnectError
- where
- E: Into<Box<dyn StdError + Send + Sync>>,
- {
- ConnectError::new("dns error", cause)
- }
-
- fn m<E>(msg: &'static str) -> impl FnOnce(E) -> ConnectError
- where
- E: Into<Box<dyn StdError + Send + Sync>>,
- {
- move |cause| ConnectError::new(msg, cause)
- }
-}
-
-impl fmt::Debug for ConnectError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let mut b = f.debug_tuple("ConnectError");
- b.field(&self.msg);
- if let Some(ref addr) = self.addr {
- b.field(addr);
- }
- if let Some(ref cause) = self.cause {
- b.field(cause);
- }
- b.finish()
- }
-}
-
-impl fmt::Display for ConnectError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(self.msg)
- }
-}
-
-impl StdError for ConnectError {
- fn source(&self) -> Option<&(dyn StdError + 'static)> {
- self.cause.as_ref().map(|e| &**e as _)
- }
-}
-
-struct ConnectingTcp<'a> {
- preferred: ConnectingTcpRemote,
- fallback: Option<ConnectingTcpFallback>,
- config: &'a Config,
-}
-
-impl<'a> ConnectingTcp<'a> {
- fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self {
- if let Some(fallback_timeout) = config.happy_eyeballs_timeout {
- let (preferred_addrs, fallback_addrs) = remote_addrs
- .split_by_preference(config.local_address_ipv4, config.local_address_ipv6);
- if fallback_addrs.is_empty() {
- return ConnectingTcp {
- preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout),
- fallback: None,
- config,
- };
- }
-
- ConnectingTcp {
- preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout),
- fallback: Some(ConnectingTcpFallback {
- delay: tokio::time::sleep(fallback_timeout),
- remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout),
- }),
- config,
- }
- } else {
- ConnectingTcp {
- preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout),
- fallback: None,
- config,
- }
- }
- }
-}
-
-struct ConnectingTcpFallback {
- delay: Sleep,
- remote: ConnectingTcpRemote,
-}
-
-struct ConnectingTcpRemote {
- addrs: dns::SocketAddrs,
- connect_timeout: Option<Duration>,
-}
-
-impl ConnectingTcpRemote {
- fn new(addrs: dns::SocketAddrs, connect_timeout: Option<Duration>) -> Self {
- let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32));
-
- Self {
- addrs,
- connect_timeout,
- }
- }
-}
-
-impl ConnectingTcpRemote {
- async fn connect(&mut self, config: &Config) -> Result<TcpStream, ConnectError> {
- let mut err = None;
- for addr in &mut self.addrs {
- debug!("connecting to {}", addr);
- match connect(&addr, config, self.connect_timeout)?.await {
- Ok(tcp) => {
- debug!("connected to {}", addr);
- return Ok(tcp);
- }
- Err(mut e) => {
- trace!("connect error for {}: {:?}", addr, e);
- e.addr = Some(addr);
- // only return the first error, we assume it's the most relevant
- if err.is_none() {
- err = Some(e);
- }
- }
- }
- }
-
- match err {
- Some(e) => Err(e),
- None => Err(ConnectError::new(
- "tcp connect error",
- std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"),
- )),
- }
- }
-}
-
-fn bind_local_address(
- socket: &socket2::Socket,
- dst_addr: &SocketAddr,
- local_addr_ipv4: &Option<Ipv4Addr>,
- local_addr_ipv6: &Option<Ipv6Addr>,
-) -> io::Result<()> {
- match (*dst_addr, local_addr_ipv4, local_addr_ipv6) {
- (SocketAddr::V4(_), Some(addr), _) => {
- socket.bind(&SocketAddr::new((*addr).into(), 0).into())?;
- }
- (SocketAddr::V6(_), _, Some(addr)) => {
- socket.bind(&SocketAddr::new((*addr).into(), 0).into())?;
- }
- _ => {
- if cfg!(windows) {
- // Windows requires a socket be bound before calling connect
- let any: SocketAddr = match *dst_addr {
- SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(),
- SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(),
- };
- socket.bind(&any.into())?;
- }
- }
- }
-
- Ok(())
-}
-
-fn connect(
- addr: &SocketAddr,
- config: &Config,
- connect_timeout: Option<Duration>,
-) -> Result<impl Future<Output = Result<TcpStream, ConnectError>>, ConnectError> {
- // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the
- // keepalive timeout, it would be nice to use that instead of socket2,
- // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance...
- use socket2::{Domain, Protocol, Socket, Type};
-
- let domain = Domain::for_address(*addr);
- let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))
- .map_err(ConnectError::m("tcp open error"))?;
-
- // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is
- // responsible for ensuring O_NONBLOCK is set.
- socket
- .set_nonblocking(true)
- .map_err(ConnectError::m("tcp set_nonblocking error"))?;
-
- if let Some(tcp_keepalive) = &config.tcp_keepalive_config.into_tcpkeepalive() {
- if let Err(e) = socket.set_tcp_keepalive(tcp_keepalive) {
- warn!("tcp set_keepalive error: {}", e);
- }
- }
-
- // That this only works for some socket types, particularly AF_INET sockets.
- #[cfg(any(
- target_os = "android",
- target_os = "fuchsia",
- target_os = "illumos",
- target_os = "ios",
- target_os = "linux",
- target_os = "macos",
- target_os = "solaris",
- target_os = "tvos",
- target_os = "visionos",
- target_os = "watchos",
- ))]
- if let Some(interface) = &config.interface {
- // On Linux-like systems, set the interface to bind using
- // `SO_BINDTODEVICE`.
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- socket
- .bind_device(Some(interface.as_bytes()))
- .map_err(ConnectError::m("tcp bind interface error"))?;
-
- // On macOS-like and Solaris-like systems, we instead use `IP_BOUND_IF`.
- // This socket option desires an integer index for the interface, so we
- // must first determine the index of the requested interface name using
- // `if_nametoindex`.
- #[cfg(any(
- target_os = "illumos",
- target_os = "ios",
- target_os = "macos",
- target_os = "solaris",
- target_os = "tvos",
- target_os = "visionos",
- target_os = "watchos",
- ))]
- {
- let idx = unsafe { libc::if_nametoindex(interface.as_ptr()) };
- let idx = std::num::NonZeroU32::new(idx).ok_or_else(|| {
- // If the index is 0, check errno and return an I/O error.
- ConnectError::new(
- "error converting interface name to index",
- io::Error::last_os_error(),
- )
- })?;
- // Different setsockopt calls are necessary depending on whether the
- // address is IPv4 or IPv6.
- match addr {
- SocketAddr::V4(_) => socket.bind_device_by_index_v4(Some(idx)),
- SocketAddr::V6(_) => socket.bind_device_by_index_v6(Some(idx)),
- }
- .map_err(ConnectError::m("tcp bind interface error"))?;
- }
- }
-
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- if let Some(tcp_user_timeout) = &config.tcp_user_timeout {
- if let Err(e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) {
- warn!("tcp set_tcp_user_timeout error: {}", e);
- }
- }
-
- bind_local_address(
- &socket,
- addr,
- &config.local_address_ipv4,
- &config.local_address_ipv6,
- )
- .map_err(ConnectError::m("tcp bind local error"))?;
-
- #[cfg(unix)]
- let socket = unsafe {
- // Safety: `from_raw_fd` is only safe to call if ownership of the raw
- // file descriptor is transferred. Since we call `into_raw_fd` on the
- // socket2 socket, it gives up ownership of the fd and will not close
- // it, so this is safe.
- use std::os::unix::io::{FromRawFd, IntoRawFd};
- TcpSocket::from_raw_fd(socket.into_raw_fd())
- };
- #[cfg(windows)]
- let socket = unsafe {
- // Safety: `from_raw_socket` is only safe to call if ownership of the raw
- // Windows SOCKET is transferred. Since we call `into_raw_socket` on the
- // socket2 socket, it gives up ownership of the SOCKET and will not close
- // it, so this is safe.
- use std::os::windows::io::{FromRawSocket, IntoRawSocket};
- TcpSocket::from_raw_socket(socket.into_raw_socket())
- };
-
- if config.reuse_address {
- if let Err(e) = socket.set_reuseaddr(true) {
- warn!("tcp set_reuse_address error: {}", e);
- }
- }
-
- if let Some(size) = config.send_buffer_size {
- if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(u32::MAX)) {
- warn!("tcp set_buffer_size error: {}", e);
- }
- }
-
- if let Some(size) = config.recv_buffer_size {
- if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(u32::MAX)) {
- warn!("tcp set_recv_buffer_size error: {}", e);
- }
- }
-
- let connect = socket.connect(*addr);
- Ok(async move {
- match connect_timeout {
- Some(dur) => match tokio::time::timeout(dur, connect).await {
- Ok(Ok(s)) => Ok(s),
- Ok(Err(e)) => Err(e),
- Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)),
- },
- None => connect.await,
- }
- .map_err(ConnectError::m("tcp connect error"))
- })
-}
-
-impl ConnectingTcp<'_> {
- async fn connect(mut self) -> Result<TcpStream, ConnectError> {
- match self.fallback {
- None => self.preferred.connect(self.config).await,
- Some(mut fallback) => {
- let preferred_fut = self.preferred.connect(self.config);
- futures_util::pin_mut!(preferred_fut);
-
- let fallback_fut = fallback.remote.connect(self.config);
- futures_util::pin_mut!(fallback_fut);
-
- let fallback_delay = fallback.delay;
- futures_util::pin_mut!(fallback_delay);
-
- let (result, future) =
- match futures_util::future::select(preferred_fut, fallback_delay).await {
- Either::Left((result, _fallback_delay)) => {
- (result, Either::Right(fallback_fut))
- }
- Either::Right(((), preferred_fut)) => {
- // Delay is done, start polling both the preferred and the fallback
- futures_util::future::select(preferred_fut, fallback_fut)
- .await
- .factor_first()
- }
- };
-
- if result.is_err() {
- // Fallback to the remaining future (could be preferred or fallback)
- // if we get an error
- future.await
- } else {
- result
- }
- }
- }
- }
-}
-
-/// Respect explicit ports in the URI, if none, either
-/// keep non `0` ports resolved from a custom dns resolver,
-/// or use the default port for the scheme.
-fn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) {
- if explicit || addr.port() == 0 {
- addr.set_port(host_port)
- };
-}
-
-#[cfg(test)]
-mod tests {
- use std::io;
- use std::net::SocketAddr;
-
- use ::http::Uri;
-
- use crate::client::legacy::connect::http::TcpKeepaliveConfig;
-
- use super::super::sealed::{Connect, ConnectSvc};
- use super::{Config, ConnectError, HttpConnector};
-
- use super::set_port;
-
- async fn connect<C>(
- connector: C,
- dst: Uri,
- ) -> Result<<C::_Svc as ConnectSvc>::Connection, <C::_Svc as ConnectSvc>::Error>
- where
- C: Connect,
- {
- connector.connect(super::super::sealed::Internal, dst).await
- }
-
- #[tokio::test]
- #[cfg_attr(miri, ignore)]
- async fn test_errors_enforce_http() {
- let dst = "https://example.domain/foo/bar?baz".parse().unwrap();
- let connector = HttpConnector::new();
-
- let err = connect(connector, dst).await.unwrap_err();
- assert_eq!(&*err.msg, super::INVALID_NOT_HTTP);
- }
-
- #[cfg(any(target_os = "linux", target_os = "macos"))]
- fn get_local_ips() -> (Option<std::net::Ipv4Addr>, Option<std::net::Ipv6Addr>) {
- use std::net::{IpAddr, TcpListener};
-
- let mut ip_v4 = None;
- let mut ip_v6 = None;
-
- let ips = pnet_datalink::interfaces()
- .into_iter()
- .flat_map(|i| i.ips.into_iter().map(|n| n.ip()));
-
- for ip in ips {
- match ip {
- IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip),
- IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip),
- _ => (),
- }
-
- if ip_v4.is_some() && ip_v6.is_some() {
- break;
- }
- }
-
- (ip_v4, ip_v6)
- }
-
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- fn default_interface() -> Option<String> {
- pnet_datalink::interfaces()
- .iter()
- .find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty())
- .map(|e| e.name.clone())
- }
-
- #[tokio::test]
- #[cfg_attr(miri, ignore)]
- async fn test_errors_missing_scheme() {
- let dst = "example.domain".parse().unwrap();
- let mut connector = HttpConnector::new();
- connector.enforce_http(false);
-
- let err = connect(connector, dst).await.unwrap_err();
- assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME);
- }
-
- // NOTE: pnet crate that we use in this test doesn't compile on Windows
- #[cfg(any(target_os = "linux", target_os = "macos"))]
- #[cfg_attr(miri, ignore)]
- #[tokio::test]
- async fn local_address() {
- use std::net::{IpAddr, TcpListener};
-
- let (bind_ip_v4, bind_ip_v6) = get_local_ips();
- let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
- let port = server4.local_addr().unwrap().port();
- let server6 = TcpListener::bind(format!("[::1]:{port}")).unwrap();
-
- let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move {
- let mut connector = HttpConnector::new();
-
- match (bind_ip_v4, bind_ip_v6) {
- (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6),
- (Some(v4), None) => connector.set_local_address(Some(v4.into())),
- (None, Some(v6)) => connector.set_local_address(Some(v6.into())),
- _ => unreachable!(),
- }
-
- connect(connector, dst.parse().unwrap()).await.unwrap();
-
- let (_, client_addr) = server.accept().unwrap();
-
- assert_eq!(client_addr.ip(), expected_ip);
- };
-
- if let Some(ip) = bind_ip_v4 {
- assert_client_ip(format!("http://127.0.0.1:{port}"), server4, ip.into()).await;
- }
-
- if let Some(ip) = bind_ip_v6 {
- assert_client_ip(format!("http://[::1]:{port}"), server6, ip.into()).await;
- }
- }
-
- // NOTE: pnet crate that we use in this test doesn't compile on Windows
- #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))]
- #[tokio::test]
- #[ignore = "setting `SO_BINDTODEVICE` requires the `CAP_NET_RAW` capability (works when running as root)"]
- async fn interface() {
- use socket2::{Domain, Protocol, Socket, Type};
- use std::net::TcpListener;
-
- let interface: Option<String> = default_interface();
-
- let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
- let port = server4.local_addr().unwrap().port();
-
- let server6 = TcpListener::bind(format!("[::1]:{port}")).unwrap();
-
- let assert_interface_name =
- |dst: String,
- server: TcpListener,
- bind_iface: Option<String>,
- expected_interface: Option<String>| async move {
- let mut connector = HttpConnector::new();
- if let Some(iface) = bind_iface {
- connector.set_interface(iface);
- }
-
- connect(connector, dst.parse().unwrap()).await.unwrap();
- let domain = Domain::for_address(server.local_addr().unwrap());
- let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap();
-
- assert_eq!(
- socket.device().unwrap().as_deref(),
- expected_interface.as_deref().map(|val| val.as_bytes())
- );
- };
-
- assert_interface_name(
- format!("http://127.0.0.1:{port}"),
- server4,
- interface.clone(),
- interface.clone(),
- )
- .await;
- assert_interface_name(
- format!("http://[::1]:{port}"),
- server6,
- interface.clone(),
- interface.clone(),
- )
- .await;
- }
-
- #[test]
- #[ignore] // TODO
- #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)]
- fn client_happy_eyeballs() {
- use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener};
- use std::time::{Duration, Instant};
-
- use super::dns;
- use super::ConnectingTcp;
-
- let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server4.local_addr().unwrap();
- let _server6 = TcpListener::bind(format!("[::1]:{}", addr.port())).unwrap();
- let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .unwrap();
-
- let local_timeout = Duration::default();
- let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1;
- let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1;
- let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout)
- + Duration::from_millis(250);
-
- let scenarios = &[
- // Fast primary, without fallback.
- (&[local_ipv4_addr()][..], 4, local_timeout, false),
- (&[local_ipv6_addr()][..], 6, local_timeout, false),
- // Fast primary, with (unused) fallback.
- (
- &[local_ipv4_addr(), local_ipv6_addr()][..],
- 4,
- local_timeout,
- false,
- ),
- (
- &[local_ipv6_addr(), local_ipv4_addr()][..],
- 6,
- local_timeout,
- false,
- ),
- // Unreachable + fast primary, without fallback.
- (
- &[unreachable_ipv4_addr(), local_ipv4_addr()][..],
- 4,
- unreachable_v4_timeout,
- false,
- ),
- (
- &[unreachable_ipv6_addr(), local_ipv6_addr()][..],
- 6,
- unreachable_v6_timeout,
- false,
- ),
- // Unreachable + fast primary, with (unused) fallback.
- (
- &[
- unreachable_ipv4_addr(),
- local_ipv4_addr(),
- local_ipv6_addr(),
- ][..],
- 4,
- unreachable_v4_timeout,
- false,
- ),
- (
- &[
- unreachable_ipv6_addr(),
- local_ipv6_addr(),
- local_ipv4_addr(),
- ][..],
- 6,
- unreachable_v6_timeout,
- true,
- ),
- // Slow primary, with (used) fallback.
- (
- &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
- 6,
- fallback_timeout,
- false,
- ),
- (
- &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
- 4,
- fallback_timeout,
- true,
- ),
- // Slow primary, with (used) unreachable + fast fallback.
- (
- &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..],
- 6,
- fallback_timeout + unreachable_v6_timeout,
- false,
- ),
- (
- &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..],
- 4,
- fallback_timeout + unreachable_v4_timeout,
- true,
- ),
- ];
-
- // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network.
- // Otherwise, connection to "slow" IPv6 address will error-out immediately.
- let ipv6_accessible = measure_connect(slow_ipv6_addr()).0;
-
- for &(hosts, family, timeout, needs_ipv6_access) in scenarios {
- if needs_ipv6_access && !ipv6_accessible {
- continue;
- }
-
- let (start, stream) = rt
- .block_on(async move {
- let addrs = hosts
- .iter()
- .map(|host| (*host, addr.port()).into())
- .collect();
- let cfg = Config {
- local_address_ipv4: None,
- local_address_ipv6: None,
- connect_timeout: None,
- tcp_keepalive_config: TcpKeepaliveConfig::default(),
- happy_eyeballs_timeout: Some(fallback_timeout),
- nodelay: false,
- reuse_address: false,
- enforce_http: false,
- send_buffer_size: None,
- recv_buffer_size: None,
- #[cfg(any(
- target_os = "android",
- target_os = "fuchsia",
- target_os = "linux"
- ))]
- interface: None,
- #[cfg(any(
- target_os = "illumos",
- target_os = "ios",
- target_os = "macos",
- target_os = "solaris",
- target_os = "tvos",
- target_os = "visionos",
- target_os = "watchos",
- ))]
- interface: None,
- #[cfg(any(
- target_os = "android",
- target_os = "fuchsia",
- target_os = "linux"
- ))]
- tcp_user_timeout: None,
- };
- let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg);
- let start = Instant::now();
- Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?))
- })
- .unwrap();
- let res = if stream.peer_addr().unwrap().is_ipv4() {
- 4
- } else {
- 6
- };
- let duration = start.elapsed();
-
- // Allow actual duration to be +/- 150ms off.
- let min_duration = if timeout >= Duration::from_millis(150) {
- timeout - Duration::from_millis(150)
- } else {
- Duration::default()
- };
- let max_duration = timeout + Duration::from_millis(150);
-
- assert_eq!(res, family);
- assert!(duration >= min_duration);
- assert!(duration <= max_duration);
- }
-
- fn local_ipv4_addr() -> IpAddr {
- Ipv4Addr::new(127, 0, 0, 1).into()
- }
-
- fn local_ipv6_addr() -> IpAddr {
- Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into()
- }
-
- fn unreachable_ipv4_addr() -> IpAddr {
- Ipv4Addr::new(127, 0, 0, 2).into()
- }
-
- fn unreachable_ipv6_addr() -> IpAddr {
- Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into()
- }
-
- fn slow_ipv4_addr() -> IpAddr {
- // RFC 6890 reserved IPv4 address.
- Ipv4Addr::new(198, 18, 0, 25).into()
- }
-
- fn slow_ipv6_addr() -> IpAddr {
- // RFC 6890 reserved IPv6 address.
- Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into()
- }
-
- fn measure_connect(addr: IpAddr) -> (bool, Duration) {
- let start = Instant::now();
- let result =
- std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1));
-
- let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut;
- let duration = start.elapsed();
- (reachable, duration)
- }
- }
-
- use std::time::Duration;
-
- #[test]
- fn no_tcp_keepalive_config() {
- assert!(TcpKeepaliveConfig::default().into_tcpkeepalive().is_none());
- }
-
- #[test]
- fn tcp_keepalive_time_config() {
- let kac = TcpKeepaliveConfig {
- time: Some(Duration::from_secs(60)),
- ..Default::default()
- };
- if let Some(tcp_keepalive) = kac.into_tcpkeepalive() {
- assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)"));
- } else {
- panic!("test failed");
- }
- }
-
- #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))]
- #[test]
- fn tcp_keepalive_interval_config() {
- let kac = TcpKeepaliveConfig {
- interval: Some(Duration::from_secs(1)),
- ..Default::default()
- };
- if let Some(tcp_keepalive) = kac.into_tcpkeepalive() {
- assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)"));
- } else {
- panic!("test failed");
- }
- }
-
- #[cfg(not(any(
- target_os = "openbsd",
- target_os = "redox",
- target_os = "solaris",
- target_os = "windows"
- )))]
- #[test]
- fn tcp_keepalive_retries_config() {
- let kac = TcpKeepaliveConfig {
- retries: Some(3),
- ..Default::default()
- };
- if let Some(tcp_keepalive) = kac.into_tcpkeepalive() {
- assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)"));
- } else {
- panic!("test failed");
- }
- }
-
- #[test]
- fn test_set_port() {
- // Respect explicit ports no matter what the resolved port is.
- let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881));
- set_port(&mut addr, 42, true);
- assert_eq!(addr.port(), 42);
-
- // Ignore default host port, and use the socket port instead.
- let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881));
- set_port(&mut addr, 443, false);
- assert_eq!(addr.port(), 6881);
-
- // Use the default port if the resolved port is `0`.
- let mut addr = SocketAddr::from(([0, 0, 0, 0], 0));
- set_port(&mut addr, 443, false);
- assert_eq!(addr.port(), 443);
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/mod.rs b/vendor/hyper-util/src/client/legacy/connect/mod.rs
deleted file mode 100644
index 90a97679..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/mod.rs
+++ /dev/null
@@ -1,444 +0,0 @@
-//! Connectors used by the `Client`.
-//!
-//! This module contains:
-//!
-//! - A default [`HttpConnector`][] that does DNS resolution and establishes
-//! connections over TCP.
-//! - Types to build custom connectors.
-//!
-//! # Connectors
-//!
-//! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and
-//! its `Response` is some type implementing [`Read`][], [`Write`][],
-//! and [`Connection`][].
-//!
-//! ## Custom Connectors
-//!
-//! A simple connector that ignores the `Uri` destination and always returns
-//! a TCP connection to the same address could be written like this:
-//!
-//! ```rust,ignore
-//! let connector = tower::service_fn(|_dst| async {
-//! tokio::net::TcpStream::connect("127.0.0.1:1337")
-//! })
-//! ```
-//!
-//! Or, fully written out:
-//!
-//! ```
-//! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}};
-//! use http::Uri;
-//! use tokio::net::TcpStream;
-//! use tower_service::Service;
-//!
-//! #[derive(Clone)]
-//! struct LocalConnector;
-//!
-//! impl Service<Uri> for LocalConnector {
-//! type Response = TcpStream;
-//! type Error = std::io::Error;
-//! // We can't "name" an `async` generated future.
-//! type Future = Pin<Box<
-//! dyn Future<Output = Result<Self::Response, Self::Error>> + Send
-//! >>;
-//!
-//! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
-//! // This connector is always ready, but others might not be.
-//! Poll::Ready(Ok(()))
-//! }
-//!
-//! fn call(&mut self, _: Uri) -> Self::Future {
-//! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337))))
-//! }
-//! }
-//! ```
-//!
-//! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a
-//! better starting place to extend from.
-//!
-//! [`HttpConnector`]: HttpConnector
-//! [`Service`]: tower_service::Service
-//! [`Uri`]: ::http::Uri
-//! [`Read`]: hyper::rt::Read
-//! [`Write`]: hyper::rt::Write
-//! [`Connection`]: Connection
-use std::{
- fmt::{self, Formatter},
- sync::{
- atomic::{AtomicBool, Ordering},
- Arc,
- },
-};
-
-use ::http::Extensions;
-
-#[cfg(feature = "tokio")]
-pub use self::http::{HttpConnector, HttpInfo};
-
-#[cfg(feature = "tokio")]
-pub mod dns;
-#[cfg(feature = "tokio")]
-mod http;
-
-pub mod proxy;
-
-pub(crate) mod capture;
-pub use capture::{capture_connection, CaptureConnection};
-
-pub use self::sealed::Connect;
-
-/// Describes a type returned by a connector.
-pub trait Connection {
- /// Return metadata describing the connection.
- fn connected(&self) -> Connected;
-}
-
-/// Extra information about the connected transport.
-///
-/// This can be used to inform recipients about things like if ALPN
-/// was used, or if connected to an HTTP proxy.
-#[derive(Debug)]
-pub struct Connected {
- pub(super) alpn: Alpn,
- pub(super) is_proxied: bool,
- pub(super) extra: Option<Extra>,
- pub(super) poisoned: PoisonPill,
-}
-
-#[derive(Clone)]
-pub(crate) struct PoisonPill {
- poisoned: Arc<AtomicBool>,
-}
-
-impl fmt::Debug for PoisonPill {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- // print the address of the pill—this makes debugging issues much easier
- write!(
- f,
- "PoisonPill@{:p} {{ poisoned: {} }}",
- self.poisoned,
- self.poisoned.load(Ordering::Relaxed)
- )
- }
-}
-
-impl PoisonPill {
- pub(crate) fn healthy() -> Self {
- Self {
- poisoned: Arc::new(AtomicBool::new(false)),
- }
- }
- pub(crate) fn poison(&self) {
- self.poisoned.store(true, Ordering::Relaxed)
- }
-
- pub(crate) fn poisoned(&self) -> bool {
- self.poisoned.load(Ordering::Relaxed)
- }
-}
-
-pub(super) struct Extra(Box<dyn ExtraInner>);
-
-#[derive(Clone, Copy, Debug, PartialEq)]
-pub(super) enum Alpn {
- H2,
- None,
-}
-
-impl Connected {
- /// Create new `Connected` type with empty metadata.
- pub fn new() -> Connected {
- Connected {
- alpn: Alpn::None,
- is_proxied: false,
- extra: None,
- poisoned: PoisonPill::healthy(),
- }
- }
-
- /// Set whether the connected transport is to an HTTP proxy.
- ///
- /// This setting will affect if HTTP/1 requests written on the transport
- /// will have the request-target in absolute-form or origin-form:
- ///
- /// - When `proxy(false)`:
- ///
- /// ```http
- /// GET /guide HTTP/1.1
- /// ```
- ///
- /// - When `proxy(true)`:
- ///
- /// ```http
- /// GET http://hyper.rs/guide HTTP/1.1
- /// ```
- ///
- /// Default is `false`.
- pub fn proxy(mut self, is_proxied: bool) -> Connected {
- self.is_proxied = is_proxied;
- self
- }
-
- /// Determines if the connected transport is to an HTTP proxy.
- pub fn is_proxied(&self) -> bool {
- self.is_proxied
- }
-
- /// Set extra connection information to be set in the extensions of every `Response`.
- pub fn extra<T: Clone + Send + Sync + 'static>(mut self, extra: T) -> Connected {
- if let Some(prev) = self.extra {
- self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra))));
- } else {
- self.extra = Some(Extra(Box::new(ExtraEnvelope(extra))));
- }
- self
- }
-
- /// Copies the extra connection information into an `Extensions` map.
- pub fn get_extras(&self, extensions: &mut Extensions) {
- if let Some(extra) = &self.extra {
- extra.set(extensions);
- }
- }
-
- /// Set that the connected transport negotiated HTTP/2 as its next protocol.
- pub fn negotiated_h2(mut self) -> Connected {
- self.alpn = Alpn::H2;
- self
- }
-
- /// Determines if the connected transport negotiated HTTP/2 as its next protocol.
- pub fn is_negotiated_h2(&self) -> bool {
- self.alpn == Alpn::H2
- }
-
- /// Poison this connection
- ///
- /// A poisoned connection will not be reused for subsequent requests by the pool
- pub fn poison(&self) {
- self.poisoned.poison();
- tracing::debug!(
- poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests"
- );
- }
-
- // Don't public expose that `Connected` is `Clone`, unsure if we want to
- // keep that contract...
- pub(super) fn clone(&self) -> Connected {
- Connected {
- alpn: self.alpn,
- is_proxied: self.is_proxied,
- extra: self.extra.clone(),
- poisoned: self.poisoned.clone(),
- }
- }
-}
-
-// ===== impl Extra =====
-
-impl Extra {
- pub(super) fn set(&self, res: &mut Extensions) {
- self.0.set(res);
- }
-}
-
-impl Clone for Extra {
- fn clone(&self) -> Extra {
- Extra(self.0.clone_box())
- }
-}
-
-impl fmt::Debug for Extra {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Extra").finish()
- }
-}
-
-trait ExtraInner: Send + Sync {
- fn clone_box(&self) -> Box<dyn ExtraInner>;
- fn set(&self, res: &mut Extensions);
-}
-
-// This indirection allows the `Connected` to have a type-erased "extra" value,
-// while that type still knows its inner extra type. This allows the correct
-// TypeId to be used when inserting into `res.extensions_mut()`.
-#[derive(Clone)]
-struct ExtraEnvelope<T>(T);
-
-impl<T> ExtraInner for ExtraEnvelope<T>
-where
- T: Clone + Send + Sync + 'static,
-{
- fn clone_box(&self) -> Box<dyn ExtraInner> {
- Box::new(self.clone())
- }
-
- fn set(&self, res: &mut Extensions) {
- res.insert(self.0.clone());
- }
-}
-
-struct ExtraChain<T>(Box<dyn ExtraInner>, T);
-
-impl<T: Clone> Clone for ExtraChain<T> {
- fn clone(&self) -> Self {
- ExtraChain(self.0.clone_box(), self.1.clone())
- }
-}
-
-impl<T> ExtraInner for ExtraChain<T>
-where
- T: Clone + Send + Sync + 'static,
-{
- fn clone_box(&self) -> Box<dyn ExtraInner> {
- Box::new(self.clone())
- }
-
- fn set(&self, res: &mut Extensions) {
- self.0.set(res);
- res.insert(self.1.clone());
- }
-}
-
-pub(super) mod sealed {
- use std::error::Error as StdError;
- use std::future::Future;
-
- use ::http::Uri;
- use hyper::rt::{Read, Write};
-
- use super::Connection;
-
- /// Connect to a destination, returning an IO transport.
- ///
- /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the
- /// ready connection.
- ///
- /// # Trait Alias
- ///
- /// This is really just an *alias* for the `tower::Service` trait, with
- /// additional bounds set for convenience *inside* hyper. You don't actually
- /// implement this trait, but `tower::Service<Uri>` instead.
- // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot
- // fit the `Connect` bounds because of the blanket impl for `Service`.
- pub trait Connect: Sealed + Sized {
- #[doc(hidden)]
- type _Svc: ConnectSvc;
- #[doc(hidden)]
- fn connect(self, internal_only: Internal, dst: Uri) -> <Self::_Svc as ConnectSvc>::Future;
- }
-
- pub trait ConnectSvc {
- type Connection: Read + Write + Connection + Unpin + Send + 'static;
- type Error: Into<Box<dyn StdError + Send + Sync>>;
- type Future: Future<Output = Result<Self::Connection, Self::Error>> + Unpin + Send + 'static;
-
- fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future;
- }
-
- impl<S, T> Connect for S
- where
- S: tower_service::Service<Uri, Response = T> + Send + 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Future: Unpin + Send,
- T: Read + Write + Connection + Unpin + Send + 'static,
- {
- type _Svc = S;
-
- fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot<S, Uri> {
- crate::service::Oneshot::new(self, dst)
- }
- }
-
- impl<S, T> ConnectSvc for S
- where
- S: tower_service::Service<Uri, Response = T> + Send + 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Future: Unpin + Send,
- T: Read + Write + Connection + Unpin + Send + 'static,
- {
- type Connection = T;
- type Error = S::Error;
- type Future = crate::service::Oneshot<S, Uri>;
-
- fn connect(self, _: Internal, dst: Uri) -> Self::Future {
- crate::service::Oneshot::new(self, dst)
- }
- }
-
- impl<S, T> Sealed for S
- where
- S: tower_service::Service<Uri, Response = T> + Send,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Future: Unpin + Send,
- T: Read + Write + Connection + Unpin + Send + 'static,
- {
- }
-
- pub trait Sealed {}
- #[allow(missing_debug_implementations)]
- pub struct Internal;
-}
-
-#[cfg(test)]
-mod tests {
- use super::Connected;
-
- #[derive(Clone, Debug, PartialEq)]
- struct Ex1(usize);
-
- #[derive(Clone, Debug, PartialEq)]
- struct Ex2(&'static str);
-
- #[derive(Clone, Debug, PartialEq)]
- struct Ex3(&'static str);
-
- #[test]
- fn test_connected_extra() {
- let c1 = Connected::new().extra(Ex1(41));
-
- let mut ex = ::http::Extensions::new();
-
- assert_eq!(ex.get::<Ex1>(), None);
-
- c1.extra.as_ref().expect("c1 extra").set(&mut ex);
-
- assert_eq!(ex.get::<Ex1>(), Some(&Ex1(41)));
- }
-
- #[test]
- fn test_connected_extra_chain() {
- // If a user composes connectors and at each stage, there's "extra"
- // info to attach, it shouldn't override the previous extras.
-
- let c1 = Connected::new()
- .extra(Ex1(45))
- .extra(Ex2("zoom"))
- .extra(Ex3("pew pew"));
-
- let mut ex1 = ::http::Extensions::new();
-
- assert_eq!(ex1.get::<Ex1>(), None);
- assert_eq!(ex1.get::<Ex2>(), None);
- assert_eq!(ex1.get::<Ex3>(), None);
-
- c1.extra.as_ref().expect("c1 extra").set(&mut ex1);
-
- assert_eq!(ex1.get::<Ex1>(), Some(&Ex1(45)));
- assert_eq!(ex1.get::<Ex2>(), Some(&Ex2("zoom")));
- assert_eq!(ex1.get::<Ex3>(), Some(&Ex3("pew pew")));
-
- // Just like extensions, inserting the same type overrides previous type.
- let c2 = Connected::new()
- .extra(Ex1(33))
- .extra(Ex2("hiccup"))
- .extra(Ex1(99));
-
- let mut ex2 = ::http::Extensions::new();
-
- c2.extra.as_ref().expect("c2 extra").set(&mut ex2);
-
- assert_eq!(ex2.get::<Ex1>(), Some(&Ex1(99)));
- assert_eq!(ex2.get::<Ex2>(), Some(&Ex2("hiccup")));
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/mod.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/mod.rs
deleted file mode 100644
index 56ca3291..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/mod.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-//! Proxy helpers
-mod socks;
-mod tunnel;
-
-pub use self::socks::{SocksV4, SocksV5};
-pub use self::tunnel::Tunnel;
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/mod.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/mod.rs
deleted file mode 100644
index d6077b94..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/mod.rs
+++ /dev/null
@@ -1,121 +0,0 @@
-mod v5;
-pub use v5::{SocksV5, SocksV5Error};
-
-mod v4;
-pub use v4::{SocksV4, SocksV4Error};
-
-use bytes::BytesMut;
-
-use hyper::rt::Read;
-
-#[derive(Debug)]
-pub enum SocksError<C> {
- Inner(C),
- Io(std::io::Error),
-
- DnsFailure,
- MissingHost,
- MissingPort,
-
- V4(SocksV4Error),
- V5(SocksV5Error),
-
- Parsing(ParsingError),
- Serialize(SerializeError),
-}
-
-#[derive(Debug)]
-pub enum ParsingError {
- Incomplete,
- WouldOverflow,
- Other,
-}
-
-#[derive(Debug)]
-pub enum SerializeError {
- WouldOverflow,
-}
-
-async fn read_message<T, M, C>(mut conn: &mut T, buf: &mut BytesMut) -> Result<M, SocksError<C>>
-where
- T: Read + Unpin,
- M: for<'a> TryFrom<&'a mut BytesMut, Error = ParsingError>,
-{
- let mut tmp = [0; 513];
-
- loop {
- let n = crate::rt::read(&mut conn, &mut tmp).await?;
- buf.extend_from_slice(&tmp[..n]);
-
- match M::try_from(buf) {
- Err(ParsingError::Incomplete) => {
- if n == 0 {
- if buf.spare_capacity_mut().is_empty() {
- return Err(SocksError::Parsing(ParsingError::WouldOverflow));
- } else {
- return Err(std::io::Error::new(
- std::io::ErrorKind::UnexpectedEof,
- "unexpected eof",
- )
- .into());
- }
- }
- }
- Err(err) => return Err(err.into()),
- Ok(res) => return Ok(res),
- }
- }
-}
-
-impl<C> std::fmt::Display for SocksError<C> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.write_str("SOCKS error: ")?;
-
- match self {
- Self::Inner(_) => f.write_str("failed to create underlying connection"),
- Self::Io(_) => f.write_str("io error during SOCKS handshake"),
-
- Self::DnsFailure => f.write_str("could not resolve to acceptable address type"),
- Self::MissingHost => f.write_str("missing destination host"),
- Self::MissingPort => f.write_str("missing destination port"),
-
- Self::Parsing(_) => f.write_str("failed parsing server response"),
- Self::Serialize(_) => f.write_str("failed serialize request"),
-
- Self::V4(e) => e.fmt(f),
- Self::V5(e) => e.fmt(f),
- }
- }
-}
-
-impl<C: std::fmt::Debug + std::fmt::Display> std::error::Error for SocksError<C> {}
-
-impl<C> From<std::io::Error> for SocksError<C> {
- fn from(err: std::io::Error) -> Self {
- Self::Io(err)
- }
-}
-
-impl<C> From<ParsingError> for SocksError<C> {
- fn from(err: ParsingError) -> Self {
- Self::Parsing(err)
- }
-}
-
-impl<C> From<SerializeError> for SocksError<C> {
- fn from(err: SerializeError) -> Self {
- Self::Serialize(err)
- }
-}
-
-impl<C> From<SocksV4Error> for SocksError<C> {
- fn from(err: SocksV4Error) -> Self {
- Self::V4(err)
- }
-}
-
-impl<C> From<SocksV5Error> for SocksError<C> {
- fn from(err: SocksV5Error) -> Self {
- Self::V5(err)
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/errors.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/errors.rs
deleted file mode 100644
index 5fdbd05c..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/errors.rs
+++ /dev/null
@@ -1,22 +0,0 @@
-use super::Status;
-
-#[derive(Debug)]
-pub enum SocksV4Error {
- IpV6,
- Command(Status),
-}
-
-impl From<Status> for SocksV4Error {
- fn from(err: Status) -> Self {
- Self::Command(err)
- }
-}
-
-impl std::fmt::Display for SocksV4Error {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- Self::IpV6 => f.write_str("IPV6 is not supported"),
- Self::Command(status) => status.fmt(f),
- }
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/messages.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/messages.rs
deleted file mode 100644
index bec8d081..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/messages.rs
+++ /dev/null
@@ -1,131 +0,0 @@
-use super::super::{ParsingError, SerializeError};
-
-use bytes::{Buf, BufMut, BytesMut};
-use std::net::SocketAddrV4;
-
-/// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+
-/// | VN | CD | DSTPORT | DSTIP | USERID | NULL | DOMAIN | NULL |
-/// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+
-/// | 1 | 1 | 2 | 4 | Variable | 1 | Variable | 1 |
-/// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+
-/// ^^^^^^^^^^^^^^^^^^^^^
-/// optional: only do IP is 0.0.0.X
-#[derive(Debug)]
-pub struct Request<'a>(pub &'a Address);
-
-/// +-----+-----+----+----+----+----+----+----+
-/// | VN | CD | DSTPORT | DSTIP |
-/// +-----+-----+----+----+----+----+----+----+
-/// | 1 | 1 | 2 | 4 |
-/// +-----+-----+----+----+----+----+----+----+
-/// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-/// ignore: only for SOCKSv4 BIND
-#[derive(Debug)]
-pub struct Response(pub Status);
-
-#[derive(Debug)]
-pub enum Address {
- Socket(SocketAddrV4),
- Domain(String, u16),
-}
-
-#[derive(Debug, PartialEq)]
-pub enum Status {
- Success = 90,
- Failed = 91,
- IdentFailure = 92,
- IdentMismatch = 93,
-}
-
-impl Request<'_> {
- pub fn write_to_buf<B: BufMut>(&self, mut buf: B) -> Result<usize, SerializeError> {
- match self.0 {
- Address::Socket(socket) => {
- if buf.remaining_mut() < 10 {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x04); // Version
- buf.put_u8(0x01); // CONNECT
-
- buf.put_u16(socket.port()); // Port
- buf.put_slice(&socket.ip().octets()); // IP
-
- buf.put_u8(0x00); // USERID
- buf.put_u8(0x00); // NULL
-
- Ok(10)
- }
-
- Address::Domain(domain, port) => {
- if buf.remaining_mut() < 10 + domain.len() + 1 {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x04); // Version
- buf.put_u8(0x01); // CONNECT
-
- buf.put_u16(*port); // IP
- buf.put_slice(&[0x00, 0x00, 0x00, 0xFF]); // Invalid IP
-
- buf.put_u8(0x00); // USERID
- buf.put_u8(0x00); // NULL
-
- buf.put_slice(domain.as_bytes()); // Domain
- buf.put_u8(0x00); // NULL
-
- Ok(10 + domain.len() + 1)
- }
- }
- }
-}
-
-impl TryFrom<&mut BytesMut> for Response {
- type Error = ParsingError;
-
- fn try_from(buf: &mut BytesMut) -> Result<Self, Self::Error> {
- if buf.remaining() < 8 {
- return Err(ParsingError::Incomplete);
- }
-
- if buf.get_u8() != 0x00 {
- return Err(ParsingError::Other);
- }
-
- let status = buf.get_u8().try_into()?;
- let _addr = {
- let port = buf.get_u16();
- let mut ip = [0; 4];
- buf.copy_to_slice(&mut ip);
-
- SocketAddrV4::new(ip.into(), port)
- };
-
- Ok(Self(status))
- }
-}
-
-impl TryFrom<u8> for Status {
- type Error = ParsingError;
-
- fn try_from(byte: u8) -> Result<Self, Self::Error> {
- Ok(match byte {
- 90 => Self::Success,
- 91 => Self::Failed,
- 92 => Self::IdentFailure,
- 93 => Self::IdentMismatch,
- _ => return Err(ParsingError::Other),
- })
- }
-}
-
-impl std::fmt::Display for Status {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.write_str(match self {
- Self::Success => "success",
- Self::Failed => "server failed to execute command",
- Self::IdentFailure => "server ident service failed",
- Self::IdentMismatch => "server ident service did not recognise client identifier",
- })
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/mod.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/mod.rs
deleted file mode 100644
index bee7e6dc..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v4/mod.rs
+++ /dev/null
@@ -1,183 +0,0 @@
-mod errors;
-pub use errors::*;
-
-mod messages;
-use messages::*;
-
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use std::net::{IpAddr, SocketAddr, SocketAddrV4, ToSocketAddrs};
-
-use http::Uri;
-use hyper::rt::{Read, Write};
-use tower_service::Service;
-
-use bytes::BytesMut;
-
-use pin_project_lite::pin_project;
-
-/// Tunnel Proxy via SOCKSv4
-///
-/// This is a connector that can be used by the `legacy::Client`. It wraps
-/// another connector, and after getting an underlying connection, it established
-/// a TCP tunnel over it using SOCKSv4.
-#[derive(Debug, Clone)]
-pub struct SocksV4<C> {
- inner: C,
- config: SocksConfig,
-}
-
-#[derive(Debug, Clone)]
-struct SocksConfig {
- proxy: Uri,
- local_dns: bool,
-}
-
-pin_project! {
- // Not publicly exported (so missing_docs doesn't trigger).
- //
- // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
- // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
- // (and thus we can change the type in the future).
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct Handshaking<F, T, E> {
- #[pin]
- fut: BoxHandshaking<T, E>,
- _marker: std::marker::PhantomData<F>
- }
-}
-
-type BoxHandshaking<T, E> = Pin<Box<dyn Future<Output = Result<T, super::SocksError<E>>> + Send>>;
-
-impl<C> SocksV4<C> {
- /// Create a new SOCKSv4 handshake service
- ///
- /// Wraps an underlying connector and stores the address of a tunneling
- /// proxying server.
- ///
- /// A `SocksV4` can then be called with any destination. The `dst` passed to
- /// `call` will not be used to create the underlying connection, but will
- /// be used in a SOCKS handshake with the proxy destination.
- pub fn new(proxy_dst: Uri, connector: C) -> Self {
- Self {
- inner: connector,
- config: SocksConfig::new(proxy_dst),
- }
- }
-
- /// Resolve domain names locally on the client, rather than on the proxy server.
- ///
- /// Disabled by default as local resolution of domain names can be detected as a
- /// DNS leak.
- pub fn local_dns(mut self, local_dns: bool) -> Self {
- self.config.local_dns = local_dns;
- self
- }
-}
-
-impl SocksConfig {
- pub fn new(proxy: Uri) -> Self {
- Self {
- proxy,
- local_dns: false,
- }
- }
-
- async fn execute<T, E>(
- self,
- mut conn: T,
- host: String,
- port: u16,
- ) -> Result<T, super::SocksError<E>>
- where
- T: Read + Write + Unpin,
- {
- let address = match host.parse::<IpAddr>() {
- Ok(IpAddr::V6(_)) => return Err(SocksV4Error::IpV6.into()),
- Ok(IpAddr::V4(ip)) => Address::Socket(SocketAddrV4::new(ip, port)),
- Err(_) => {
- if self.local_dns {
- (host, port)
- .to_socket_addrs()?
- .find_map(|s| {
- if let SocketAddr::V4(v4) = s {
- Some(Address::Socket(v4))
- } else {
- None
- }
- })
- .ok_or(super::SocksError::DnsFailure)?
- } else {
- Address::Domain(host, port)
- }
- }
- };
-
- let mut send_buf = BytesMut::with_capacity(1024);
- let mut recv_buf = BytesMut::with_capacity(1024);
-
- // Send Request
- let req = Request(&address);
- let n = req.write_to_buf(&mut send_buf)?;
- crate::rt::write_all(&mut conn, &send_buf[..n]).await?;
-
- // Read Response
- let res: Response = super::read_message(&mut conn, &mut recv_buf).await?;
- if res.0 == Status::Success {
- Ok(conn)
- } else {
- Err(SocksV4Error::Command(res.0).into())
- }
- }
-}
-
-impl<C> Service<Uri> for SocksV4<C>
-where
- C: Service<Uri>,
- C::Future: Send + 'static,
- C::Response: Read + Write + Unpin + Send + 'static,
- C::Error: Send + 'static,
-{
- type Response = C::Response;
- type Error = super::SocksError<C::Error>;
- type Future = Handshaking<C::Future, C::Response, C::Error>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(super::SocksError::Inner)
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- let config = self.config.clone();
- let connecting = self.inner.call(config.proxy.clone());
-
- let fut = async move {
- let port = dst.port().map(|p| p.as_u16()).unwrap_or(443);
- let host = dst
- .host()
- .ok_or(super::SocksError::MissingHost)?
- .to_string();
-
- let conn = connecting.await.map_err(super::SocksError::Inner)?;
- config.execute(conn, host, port).await
- };
-
- Handshaking {
- fut: Box::pin(fut),
- _marker: Default::default(),
- }
- }
-}
-
-impl<F, T, E> Future for Handshaking<F, T, E>
-where
- F: Future<Output = Result<T, E>>,
-{
- type Output = Result<T, super::SocksError<E>>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.project().fut.poll(cx)
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/errors.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/errors.rs
deleted file mode 100644
index 06b1a9a8..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/errors.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-use super::Status;
-
-#[derive(Debug)]
-pub enum SocksV5Error {
- HostTooLong,
- Auth(AuthError),
- Command(Status),
-}
-
-#[derive(Debug)]
-pub enum AuthError {
- Unsupported,
- MethodMismatch,
- Failed,
-}
-
-impl From<Status> for SocksV5Error {
- fn from(err: Status) -> Self {
- Self::Command(err)
- }
-}
-
-impl From<AuthError> for SocksV5Error {
- fn from(err: AuthError) -> Self {
- Self::Auth(err)
- }
-}
-
-impl std::fmt::Display for SocksV5Error {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- Self::HostTooLong => f.write_str("host address is more than 255 characters"),
- Self::Command(e) => e.fmt(f),
- Self::Auth(e) => e.fmt(f),
- }
- }
-}
-
-impl std::fmt::Display for AuthError {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.write_str(match self {
- Self::Unsupported => "server does not support user/pass authentication",
- Self::MethodMismatch => "server implements authentication incorrectly",
- Self::Failed => "credentials not accepted",
- })
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/messages.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/messages.rs
deleted file mode 100644
index ddf93538..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/messages.rs
+++ /dev/null
@@ -1,347 +0,0 @@
-use super::super::{ParsingError, SerializeError};
-
-use bytes::{Buf, BufMut, BytesMut};
-use std::net::SocketAddr;
-
-/// +----+----------+----------+
-/// |VER | NMETHODS | METHODS |
-/// +----+----------+----------+
-/// | 1 | 1 | 1 to 255 |
-/// +----+----------+----------+
-#[derive(Debug)]
-pub struct NegotiationReq<'a>(pub &'a AuthMethod);
-
-/// +----+--------+
-/// |VER | METHOD |
-/// +----+--------+
-/// | 1 | 1 |
-/// +----+--------+
-#[derive(Debug)]
-pub struct NegotiationRes(pub AuthMethod);
-
-/// +----+------+----------+------+----------+
-/// |VER | ULEN | UNAME | PLEN | PASSWD |
-/// +----+------+----------+------+----------+
-/// | 1 | 1 | 1 to 255 | 1 | 1 to 255 |
-/// +----+------+----------+------+----------+
-#[derive(Debug)]
-pub struct AuthenticationReq<'a>(pub &'a str, pub &'a str);
-
-/// +----+--------+
-/// |VER | STATUS |
-/// +----+--------+
-/// | 1 | 1 |
-/// +----+--------+
-#[derive(Debug)]
-pub struct AuthenticationRes(pub bool);
-
-/// +----+-----+-------+------+----------+----------+
-/// |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT |
-/// +----+-----+-------+------+----------+----------+
-/// | 1 | 1 | X'00' | 1 | Variable | 2 |
-/// +----+-----+-------+------+----------+----------+
-#[derive(Debug)]
-pub struct ProxyReq<'a>(pub &'a Address);
-
-/// +----+-----+-------+------+----------+----------+
-/// |VER | REP | RSV | ATYP | BND.ADDR | BND.PORT |
-/// +----+-----+-------+------+----------+----------+
-/// | 1 | 1 | X'00' | 1 | Variable | 2 |
-/// +----+-----+-------+------+----------+----------+
-#[derive(Debug)]
-pub struct ProxyRes(pub Status);
-
-#[repr(u8)]
-#[derive(Debug, Copy, Clone, PartialEq)]
-pub enum AuthMethod {
- NoAuth = 0x00,
- UserPass = 0x02,
- NoneAcceptable = 0xFF,
-}
-
-#[derive(Debug)]
-pub enum Address {
- Socket(SocketAddr),
- Domain(String, u16),
-}
-
-#[derive(Debug, Copy, Clone, PartialEq)]
-pub enum Status {
- Success,
- GeneralServerFailure,
- ConnectionNotAllowed,
- NetworkUnreachable,
- HostUnreachable,
- ConnectionRefused,
- TtlExpired,
- CommandNotSupported,
- AddressTypeNotSupported,
-}
-
-impl NegotiationReq<'_> {
- pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
- if buf.capacity() - buf.len() < 3 {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x05); // Version
- buf.put_u8(0x01); // Number of authentication methods
- buf.put_u8(*self.0 as u8); // Authentication method
-
- Ok(3)
- }
-}
-
-impl TryFrom<&mut BytesMut> for NegotiationRes {
- type Error = ParsingError;
-
- fn try_from(buf: &mut BytesMut) -> Result<Self, ParsingError> {
- if buf.remaining() < 2 {
- return Err(ParsingError::Incomplete);
- }
-
- if buf.get_u8() != 0x05 {
- return Err(ParsingError::Other);
- }
-
- let method = buf.get_u8().try_into()?;
- Ok(Self(method))
- }
-}
-
-impl AuthenticationReq<'_> {
- pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
- if buf.capacity() - buf.len() < 3 + self.0.len() + self.1.len() {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x01); // Version
-
- buf.put_u8(self.0.len() as u8); // Username length (guarenteed to be 255 or less)
- buf.put_slice(self.0.as_bytes()); // Username
-
- buf.put_u8(self.1.len() as u8); // Password length (guarenteed to be 255 or less)
- buf.put_slice(self.1.as_bytes()); // Password
-
- Ok(3 + self.0.len() + self.1.len())
- }
-}
-
-impl TryFrom<&mut BytesMut> for AuthenticationRes {
- type Error = ParsingError;
-
- fn try_from(buf: &mut BytesMut) -> Result<Self, ParsingError> {
- if buf.remaining() < 2 {
- return Err(ParsingError::Incomplete);
- }
-
- if buf.get_u8() != 0x01 {
- return Err(ParsingError::Other);
- }
-
- if buf.get_u8() == 0 {
- Ok(Self(true))
- } else {
- Ok(Self(false))
- }
- }
-}
-
-impl ProxyReq<'_> {
- pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
- let addr_len = match self.0 {
- Address::Socket(SocketAddr::V4(_)) => 1 + 4 + 2,
- Address::Socket(SocketAddr::V6(_)) => 1 + 16 + 2,
- Address::Domain(ref domain, _) => 1 + 1 + domain.len() + 2,
- };
-
- if buf.capacity() - buf.len() < 3 + addr_len {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x05); // Version
- buf.put_u8(0x01); // TCP tunneling command
- buf.put_u8(0x00); // Reserved
- let _ = self.0.write_to_buf(buf); // Address
-
- Ok(3 + addr_len)
- }
-}
-
-impl TryFrom<&mut BytesMut> for ProxyRes {
- type Error = ParsingError;
-
- fn try_from(buf: &mut BytesMut) -> Result<Self, ParsingError> {
- if buf.remaining() < 2 {
- return Err(ParsingError::Incomplete);
- }
-
- // VER
- if buf.get_u8() != 0x05 {
- return Err(ParsingError::Other);
- }
-
- // REP
- let status = buf.get_u8().try_into()?;
-
- // RSV
- if buf.get_u8() != 0x00 {
- return Err(ParsingError::Other);
- }
-
- // ATYP + ADDR
- Address::try_from(buf)?;
-
- Ok(Self(status))
- }
-}
-
-impl Address {
- pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
- match self {
- Self::Socket(SocketAddr::V4(v4)) => {
- if buf.capacity() - buf.len() < 1 + 4 + 2 {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x01);
- buf.put_slice(&v4.ip().octets());
- buf.put_u16(v4.port()); // Network Order/BigEndian for port
-
- Ok(7)
- }
-
- Self::Socket(SocketAddr::V6(v6)) => {
- if buf.capacity() - buf.len() < 1 + 16 + 2 {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x04);
- buf.put_slice(&v6.ip().octets());
- buf.put_u16(v6.port()); // Network Order/BigEndian for port
-
- Ok(19)
- }
-
- Self::Domain(domain, port) => {
- if buf.capacity() - buf.len() < 1 + 1 + domain.len() + 2 {
- return Err(SerializeError::WouldOverflow);
- }
-
- buf.put_u8(0x03);
- buf.put_u8(domain.len() as u8); // Guarenteed to be less than 255
- buf.put_slice(domain.as_bytes());
- buf.put_u16(*port);
-
- Ok(4 + domain.len())
- }
- }
- }
-}
-
-impl TryFrom<&mut BytesMut> for Address {
- type Error = ParsingError;
-
- fn try_from(buf: &mut BytesMut) -> Result<Self, Self::Error> {
- if buf.remaining() < 2 {
- return Err(ParsingError::Incomplete);
- }
-
- Ok(match buf.get_u8() {
- 0x01 => {
- let mut ip = [0; 4];
-
- if buf.remaining() < 6 {
- return Err(ParsingError::Incomplete);
- }
-
- buf.copy_to_slice(&mut ip);
- let port = buf.get_u16();
-
- Self::Socket(SocketAddr::new(ip.into(), port))
- }
-
- 0x03 => {
- let len = buf.get_u8();
-
- if len == 0 {
- return Err(ParsingError::Other);
- } else if buf.remaining() < (len as usize) + 2 {
- return Err(ParsingError::Incomplete);
- }
-
- let domain = std::str::from_utf8(&buf[..len as usize])
- .map_err(|_| ParsingError::Other)?
- .to_string();
-
- let port = buf.get_u16();
-
- Self::Domain(domain, port)
- }
-
- 0x04 => {
- let mut ip = [0; 16];
-
- if buf.remaining() < 6 {
- return Err(ParsingError::Incomplete);
- }
- buf.copy_to_slice(&mut ip);
- let port = buf.get_u16();
-
- Self::Socket(SocketAddr::new(ip.into(), port))
- }
-
- _ => return Err(ParsingError::Other),
- })
- }
-}
-
-impl TryFrom<u8> for Status {
- type Error = ParsingError;
-
- fn try_from(byte: u8) -> Result<Self, Self::Error> {
- Ok(match byte {
- 0x00 => Self::Success,
-
- 0x01 => Self::GeneralServerFailure,
- 0x02 => Self::ConnectionNotAllowed,
- 0x03 => Self::NetworkUnreachable,
- 0x04 => Self::HostUnreachable,
- 0x05 => Self::ConnectionRefused,
- 0x06 => Self::TtlExpired,
- 0x07 => Self::CommandNotSupported,
- 0x08 => Self::AddressTypeNotSupported,
- _ => return Err(ParsingError::Other),
- })
- }
-}
-
-impl TryFrom<u8> for AuthMethod {
- type Error = ParsingError;
-
- fn try_from(byte: u8) -> Result<Self, Self::Error> {
- Ok(match byte {
- 0x00 => Self::NoAuth,
- 0x02 => Self::UserPass,
- 0xFF => Self::NoneAcceptable,
-
- _ => return Err(ParsingError::Other),
- })
- }
-}
-
-impl std::fmt::Display for Status {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.write_str(match self {
- Self::Success => "success",
- Self::GeneralServerFailure => "general server failure",
- Self::ConnectionNotAllowed => "connection not allowed",
- Self::NetworkUnreachable => "network unreachable",
- Self::HostUnreachable => "host unreachable",
- Self::ConnectionRefused => "connection refused",
- Self::TtlExpired => "ttl expired",
- Self::CommandNotSupported => "command not supported",
- Self::AddressTypeNotSupported => "address type not supported",
- })
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/mod.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/mod.rs
deleted file mode 100644
index caf2446b..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/socks/v5/mod.rs
+++ /dev/null
@@ -1,313 +0,0 @@
-mod errors;
-pub use errors::*;
-
-mod messages;
-use messages::*;
-
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-
-use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
-
-use http::Uri;
-use hyper::rt::{Read, Write};
-use tower_service::Service;
-
-use bytes::BytesMut;
-
-use pin_project_lite::pin_project;
-
-/// Tunnel Proxy via SOCKSv5
-///
-/// This is a connector that can be used by the `legacy::Client`. It wraps
-/// another connector, and after getting an underlying connection, it established
-/// a TCP tunnel over it using SOCKSv5.
-#[derive(Debug, Clone)]
-pub struct SocksV5<C> {
- inner: C,
- config: SocksConfig,
-}
-
-#[derive(Debug, Clone)]
-pub struct SocksConfig {
- proxy: Uri,
- proxy_auth: Option<(String, String)>,
-
- local_dns: bool,
- optimistic: bool,
-}
-
-#[derive(Debug)]
-enum State {
- SendingNegReq,
- ReadingNegRes,
- SendingAuthReq,
- ReadingAuthRes,
- SendingProxyReq,
- ReadingProxyRes,
-}
-
-pin_project! {
- // Not publicly exported (so missing_docs doesn't trigger).
- //
- // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
- // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
- // (and thus we can change the type in the future).
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct Handshaking<F, T, E> {
- #[pin]
- fut: BoxHandshaking<T, E>,
- _marker: std::marker::PhantomData<F>
- }
-}
-
-type BoxHandshaking<T, E> = Pin<Box<dyn Future<Output = Result<T, super::SocksError<E>>> + Send>>;
-
-impl<C> SocksV5<C> {
- /// Create a new SOCKSv5 handshake service.
- ///
- /// Wraps an underlying connector and stores the address of a tunneling
- /// proxying server.
- ///
- /// A `SocksV5` can then be called with any destination. The `dst` passed to
- /// `call` will not be used to create the underlying connection, but will
- /// be used in a SOCKS handshake with the proxy destination.
- pub fn new(proxy_dst: Uri, connector: C) -> Self {
- Self {
- inner: connector,
- config: SocksConfig::new(proxy_dst),
- }
- }
-
- /// Use User/Pass authentication method during handshake.
- ///
- /// Username and Password must be maximum of 255 characters each.
- /// 0 length strings are allowed despite RFC prohibiting it. This is done so that
- /// for compatablity with server implementations that require it for IP authentication.
- pub fn with_auth(mut self, user: String, pass: String) -> Self {
- self.config.proxy_auth = Some((user, pass));
- self
- }
-
- /// Resolve domain names locally on the client, rather than on the proxy server.
- ///
- /// Disabled by default as local resolution of domain names can be detected as a
- /// DNS leak.
- pub fn local_dns(mut self, local_dns: bool) -> Self {
- self.config.local_dns = local_dns;
- self
- }
-
- /// Send all messages of the handshake optmistically (without waiting for server response).
- ///
- /// Typical SOCKS handshake with auithentication takes 3 round trips. Optimistic sending
- /// can reduce round trip times and dramatically increase speed of handshake at the cost of
- /// reduced portability; many server implementations do not support optimistic sending as it
- /// is not defined in the RFC (RFC 1928).
- ///
- /// Recommended to ensure connector works correctly without optimistic sending before trying
- /// with optimistic sending.
- pub fn send_optimistically(mut self, optimistic: bool) -> Self {
- self.config.optimistic = optimistic;
- self
- }
-}
-
-impl SocksConfig {
- fn new(proxy: Uri) -> Self {
- Self {
- proxy,
- proxy_auth: None,
-
- local_dns: false,
- optimistic: false,
- }
- }
-
- async fn execute<T, E>(
- self,
- mut conn: T,
- host: String,
- port: u16,
- ) -> Result<T, super::SocksError<E>>
- where
- T: Read + Write + Unpin,
- {
- let address = match host.parse::<IpAddr>() {
- Ok(ip) => Address::Socket(SocketAddr::new(ip, port)),
- Err(_) if host.len() <= 255 => {
- if self.local_dns {
- let socket = (host, port)
- .to_socket_addrs()?
- .next()
- .ok_or(super::SocksError::DnsFailure)?;
-
- Address::Socket(socket)
- } else {
- Address::Domain(host, port)
- }
- }
- Err(_) => return Err(SocksV5Error::HostTooLong.into()),
- };
-
- let method = if self.proxy_auth.is_some() {
- AuthMethod::UserPass
- } else {
- AuthMethod::NoAuth
- };
-
- let mut recv_buf = BytesMut::with_capacity(513); // Max length of valid recievable message is 513 from Auth Request
- let mut send_buf = BytesMut::with_capacity(262); // Max length of valid sendable message is 262 from Auth Response
- let mut state = State::SendingNegReq;
-
- loop {
- match state {
- State::SendingNegReq => {
- let req = NegotiationReq(&method);
-
- let start = send_buf.len();
- req.write_to_buf(&mut send_buf)?;
- crate::rt::write_all(&mut conn, &send_buf[start..]).await?;
-
- if self.optimistic {
- if method == AuthMethod::UserPass {
- state = State::SendingAuthReq;
- } else {
- state = State::SendingProxyReq;
- }
- } else {
- state = State::ReadingNegRes;
- }
- }
-
- State::ReadingNegRes => {
- let res: NegotiationRes = super::read_message(&mut conn, &mut recv_buf).await?;
-
- if res.0 == AuthMethod::NoneAcceptable {
- return Err(SocksV5Error::Auth(AuthError::Unsupported).into());
- }
-
- if res.0 != method {
- return Err(SocksV5Error::Auth(AuthError::MethodMismatch).into());
- }
-
- if self.optimistic {
- if res.0 == AuthMethod::UserPass {
- state = State::ReadingAuthRes;
- } else {
- state = State::ReadingProxyRes;
- }
- } else if res.0 == AuthMethod::UserPass {
- state = State::SendingAuthReq;
- } else {
- state = State::SendingProxyReq;
- }
- }
-
- State::SendingAuthReq => {
- let (user, pass) = self.proxy_auth.as_ref().unwrap();
- let req = AuthenticationReq(user, pass);
-
- let start = send_buf.len();
- req.write_to_buf(&mut send_buf)?;
- crate::rt::write_all(&mut conn, &send_buf[start..]).await?;
-
- if self.optimistic {
- state = State::SendingProxyReq;
- } else {
- state = State::ReadingAuthRes;
- }
- }
-
- State::ReadingAuthRes => {
- let res: AuthenticationRes =
- super::read_message(&mut conn, &mut recv_buf).await?;
-
- if !res.0 {
- return Err(SocksV5Error::Auth(AuthError::Failed).into());
- }
-
- if self.optimistic {
- state = State::ReadingProxyRes;
- } else {
- state = State::SendingProxyReq;
- }
- }
-
- State::SendingProxyReq => {
- let req = ProxyReq(&address);
-
- let start = send_buf.len();
- req.write_to_buf(&mut send_buf)?;
- crate::rt::write_all(&mut conn, &send_buf[start..]).await?;
-
- if self.optimistic {
- state = State::ReadingNegRes;
- } else {
- state = State::ReadingProxyRes;
- }
- }
-
- State::ReadingProxyRes => {
- let res: ProxyRes = super::read_message(&mut conn, &mut recv_buf).await?;
-
- if res.0 == Status::Success {
- return Ok(conn);
- } else {
- return Err(SocksV5Error::Command(res.0).into());
- }
- }
- }
- }
- }
-}
-
-impl<C> Service<Uri> for SocksV5<C>
-where
- C: Service<Uri>,
- C::Future: Send + 'static,
- C::Response: Read + Write + Unpin + Send + 'static,
- C::Error: Send + 'static,
-{
- type Response = C::Response;
- type Error = super::SocksError<C::Error>;
- type Future = Handshaking<C::Future, C::Response, C::Error>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- self.inner.poll_ready(cx).map_err(super::SocksError::Inner)
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- let config = self.config.clone();
- let connecting = self.inner.call(config.proxy.clone());
-
- let fut = async move {
- let port = dst.port().map(|p| p.as_u16()).unwrap_or(443);
- let host = dst
- .host()
- .ok_or(super::SocksError::MissingHost)?
- .to_string();
-
- let conn = connecting.await.map_err(super::SocksError::Inner)?;
- config.execute(conn, host, port).await
- };
-
- Handshaking {
- fut: Box::pin(fut),
- _marker: Default::default(),
- }
- }
-}
-
-impl<F, T, E> Future for Handshaking<F, T, E>
-where
- F: Future<Output = Result<T, E>>,
-{
- type Output = Result<T, super::SocksError<E>>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.project().fut.poll(cx)
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/connect/proxy/tunnel.rs b/vendor/hyper-util/src/client/legacy/connect/proxy/tunnel.rs
deleted file mode 100644
index ad948596..00000000
--- a/vendor/hyper-util/src/client/legacy/connect/proxy/tunnel.rs
+++ /dev/null
@@ -1,258 +0,0 @@
-use std::error::Error as StdError;
-use std::future::Future;
-use std::marker::{PhantomData, Unpin};
-use std::pin::Pin;
-use std::task::{self, Poll};
-
-use futures_core::ready;
-use http::{HeaderMap, HeaderValue, Uri};
-use hyper::rt::{Read, Write};
-use pin_project_lite::pin_project;
-use tower_service::Service;
-
-/// Tunnel Proxy via HTTP CONNECT
-///
-/// This is a connector that can be used by the `legacy::Client`. It wraps
-/// another connector, and after getting an underlying connection, it creates
-/// an HTTP CONNECT tunnel over it.
-#[derive(Debug)]
-pub struct Tunnel<C> {
- headers: Headers,
- inner: C,
- proxy_dst: Uri,
-}
-
-#[derive(Clone, Debug)]
-enum Headers {
- Empty,
- Auth(HeaderValue),
- Extra(HeaderMap),
-}
-
-#[derive(Debug)]
-pub enum TunnelError {
- ConnectFailed(Box<dyn StdError + Send + Sync>),
- Io(std::io::Error),
- MissingHost,
- ProxyAuthRequired,
- ProxyHeadersTooLong,
- TunnelUnexpectedEof,
- TunnelUnsuccessful,
-}
-
-pin_project! {
- // Not publicly exported (so missing_docs doesn't trigger).
- //
- // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
- // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
- // (and thus we can change the type in the future).
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct Tunneling<F, T> {
- #[pin]
- fut: BoxTunneling<T>,
- _marker: PhantomData<F>,
- }
-}
-
-type BoxTunneling<T> = Pin<Box<dyn Future<Output = Result<T, TunnelError>> + Send>>;
-
-impl<C> Tunnel<C> {
- /// Create a new Tunnel service.
- ///
- /// This wraps an underlying connector, and stores the address of a
- /// tunneling proxy server.
- ///
- /// A `Tunnel` can then be called with any destination. The `dst` passed to
- /// `call` will not be used to create the underlying connection, but will
- /// be used in an HTTP CONNECT request sent to the proxy destination.
- pub fn new(proxy_dst: Uri, connector: C) -> Self {
- Self {
- headers: Headers::Empty,
- inner: connector,
- proxy_dst,
- }
- }
-
- /// Add `proxy-authorization` header value to the CONNECT request.
- pub fn with_auth(mut self, mut auth: HeaderValue) -> Self {
- // just in case the user forgot
- auth.set_sensitive(true);
- match self.headers {
- Headers::Empty => {
- self.headers = Headers::Auth(auth);
- }
- Headers::Auth(ref mut existing) => {
- *existing = auth;
- }
- Headers::Extra(ref mut extra) => {
- extra.insert(http::header::PROXY_AUTHORIZATION, auth);
- }
- }
-
- self
- }
-
- /// Add extra headers to be sent with the CONNECT request.
- ///
- /// If existing headers have been set, these will be merged.
- pub fn with_headers(mut self, mut headers: HeaderMap) -> Self {
- match self.headers {
- Headers::Empty => {
- self.headers = Headers::Extra(headers);
- }
- Headers::Auth(auth) => {
- headers
- .entry(http::header::PROXY_AUTHORIZATION)
- .or_insert(auth);
- self.headers = Headers::Extra(headers);
- }
- Headers::Extra(ref mut extra) => {
- extra.extend(headers);
- }
- }
-
- self
- }
-}
-
-impl<C> Service<Uri> for Tunnel<C>
-where
- C: Service<Uri>,
- C::Future: Send + 'static,
- C::Response: Read + Write + Unpin + Send + 'static,
- C::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = C::Response;
- type Error = TunnelError;
- type Future = Tunneling<C::Future, C::Response>;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- ready!(self.inner.poll_ready(cx)).map_err(|e| TunnelError::ConnectFailed(e.into()))?;
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- let connecting = self.inner.call(self.proxy_dst.clone());
- let headers = self.headers.clone();
-
- Tunneling {
- fut: Box::pin(async move {
- let conn = connecting
- .await
- .map_err(|e| TunnelError::ConnectFailed(e.into()))?;
- tunnel(
- conn,
- dst.host().ok_or(TunnelError::MissingHost)?,
- dst.port().map(|p| p.as_u16()).unwrap_or(443),
- &headers,
- )
- .await
- }),
- _marker: PhantomData,
- }
- }
-}
-
-impl<F, T, E> Future for Tunneling<F, T>
-where
- F: Future<Output = Result<T, E>>,
-{
- type Output = Result<T, TunnelError>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.project().fut.poll(cx)
- }
-}
-
-async fn tunnel<T>(mut conn: T, host: &str, port: u16, headers: &Headers) -> Result<T, TunnelError>
-where
- T: Read + Write + Unpin,
-{
- let mut buf = format!(
- "\
- CONNECT {host}:{port} HTTP/1.1\r\n\
- Host: {host}:{port}\r\n\
- "
- )
- .into_bytes();
-
- match headers {
- Headers::Auth(auth) => {
- buf.extend_from_slice(b"Proxy-Authorization: ");
- buf.extend_from_slice(auth.as_bytes());
- buf.extend_from_slice(b"\r\n");
- }
- Headers::Extra(extra) => {
- for (name, value) in extra {
- buf.extend_from_slice(name.as_str().as_bytes());
- buf.extend_from_slice(b": ");
- buf.extend_from_slice(value.as_bytes());
- buf.extend_from_slice(b"\r\n");
- }
- }
- Headers::Empty => (),
- }
-
- // headers end
- buf.extend_from_slice(b"\r\n");
-
- crate::rt::write_all(&mut conn, &buf)
- .await
- .map_err(TunnelError::Io)?;
-
- let mut buf = [0; 8192];
- let mut pos = 0;
-
- loop {
- let n = crate::rt::read(&mut conn, &mut buf[pos..])
- .await
- .map_err(TunnelError::Io)?;
-
- if n == 0 {
- return Err(TunnelError::TunnelUnexpectedEof);
- }
- pos += n;
-
- let recvd = &buf[..pos];
- if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200") {
- if recvd.ends_with(b"\r\n\r\n") {
- return Ok(conn);
- }
- if pos == buf.len() {
- return Err(TunnelError::ProxyHeadersTooLong);
- }
- // else read more
- } else if recvd.starts_with(b"HTTP/1.1 407") {
- return Err(TunnelError::ProxyAuthRequired);
- } else {
- return Err(TunnelError::TunnelUnsuccessful);
- }
- }
-}
-
-impl std::fmt::Display for TunnelError {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- f.write_str("tunnel error: ")?;
-
- f.write_str(match self {
- TunnelError::MissingHost => "missing destination host",
- TunnelError::ProxyAuthRequired => "proxy authorization required",
- TunnelError::ProxyHeadersTooLong => "proxy response headers too long",
- TunnelError::TunnelUnexpectedEof => "unexpected end of file",
- TunnelError::TunnelUnsuccessful => "unsuccessful",
- TunnelError::ConnectFailed(_) => "failed to create underlying connection",
- TunnelError::Io(_) => "io error establishing tunnel",
- })
- }
-}
-
-impl std::error::Error for TunnelError {
- fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
- match self {
- TunnelError::Io(ref e) => Some(e),
- TunnelError::ConnectFailed(ref e) => Some(&**e),
- _ => None,
- }
- }
-}
diff --git a/vendor/hyper-util/src/client/legacy/mod.rs b/vendor/hyper-util/src/client/legacy/mod.rs
deleted file mode 100644
index 1649ae7e..00000000
--- a/vendor/hyper-util/src/client/legacy/mod.rs
+++ /dev/null
@@ -1,10 +0,0 @@
-#[cfg(any(feature = "http1", feature = "http2"))]
-mod client;
-#[cfg(any(feature = "http1", feature = "http2"))]
-pub use client::{Builder, Client, Error, ResponseFuture};
-
-pub mod connect;
-#[doc(hidden)]
-// Publicly available, but just for legacy purposes. A better pool will be
-// designed.
-pub mod pool;
diff --git a/vendor/hyper-util/src/client/legacy/pool.rs b/vendor/hyper-util/src/client/legacy/pool.rs
deleted file mode 100644
index 727f54b2..00000000
--- a/vendor/hyper-util/src/client/legacy/pool.rs
+++ /dev/null
@@ -1,1093 +0,0 @@
-#![allow(dead_code)]
-
-use std::collections::{HashMap, HashSet, VecDeque};
-use std::convert::Infallible;
-use std::error::Error as StdError;
-use std::fmt::{self, Debug};
-use std::future::Future;
-use std::hash::Hash;
-use std::ops::{Deref, DerefMut};
-use std::pin::Pin;
-use std::sync::{Arc, Mutex, Weak};
-use std::task::{self, Poll};
-
-use std::time::{Duration, Instant};
-
-use futures_channel::oneshot;
-use futures_core::ready;
-use tracing::{debug, trace};
-
-use hyper::rt::Sleep;
-use hyper::rt::Timer as _;
-
-use crate::common::{exec, exec::Exec, timer::Timer};
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub struct Pool<T, K: Key> {
- // If the pool is disabled, this is None.
- inner: Option<Arc<Mutex<PoolInner<T, K>>>>,
-}
-
-// Before using a pooled connection, make sure the sender is not dead.
-//
-// This is a trait to allow the `client::pool::tests` to work for `i32`.
-//
-// See https://github.com/hyperium/hyper/issues/1429
-pub trait Poolable: Unpin + Send + Sized + 'static {
- fn is_open(&self) -> bool;
- /// Reserve this connection.
- ///
- /// Allows for HTTP/2 to return a shared reservation.
- fn reserve(self) -> Reservation<Self>;
- fn can_share(&self) -> bool;
-}
-
-pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {}
-
-impl<T> Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {}
-
-/// A marker to identify what version a pooled connection is.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-#[allow(dead_code)]
-pub enum Ver {
- Auto,
- Http2,
-}
-
-/// When checking out a pooled connection, it might be that the connection
-/// only supports a single reservation, or it might be usable for many.
-///
-/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be
-/// used for multiple requests.
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub enum Reservation<T> {
- /// This connection could be used multiple times, the first one will be
- /// reinserted into the `idle` pool, and the second will be given to
- /// the `Checkout`.
- #[cfg(feature = "http2")]
- Shared(T, T),
- /// This connection requires unique access. It will be returned after
- /// use is complete.
- Unique(T),
-}
-
-/// Simple type alias in case the key type needs to be adjusted.
-// pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc<String>;
-
-struct PoolInner<T, K: Eq + Hash> {
- // A flag that a connection is being established, and the connection
- // should be shared. This prevents making multiple HTTP/2 connections
- // to the same host.
- connecting: HashSet<K>,
- // These are internal Conns sitting in the event loop in the KeepAlive
- // state, waiting to receive a new Request to send on the socket.
- idle: HashMap<K, Vec<Idle<T>>>,
- max_idle_per_host: usize,
- // These are outstanding Checkouts that are waiting for a socket to be
- // able to send a Request one. This is used when "racing" for a new
- // connection.
- //
- // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait
- // for the Pool to receive an idle Conn. When a Conn becomes idle,
- // this list is checked for any parked Checkouts, and tries to notify
- // them that the Conn could be used instead of waiting for a brand new
- // connection.
- waiters: HashMap<K, VecDeque<oneshot::Sender<T>>>,
- // A oneshot channel is used to allow the interval to be notified when
- // the Pool completely drops. That way, the interval can cancel immediately.
- idle_interval_ref: Option<oneshot::Sender<Infallible>>,
- exec: Exec,
- timer: Option<Timer>,
- timeout: Option<Duration>,
-}
-
-// This is because `Weak::new()` *allocates* space for `T`, even if it
-// doesn't need it!
-struct WeakOpt<T>(Option<Weak<T>>);
-
-#[derive(Clone, Copy, Debug)]
-pub struct Config {
- pub idle_timeout: Option<Duration>,
- pub max_idle_per_host: usize,
-}
-
-impl Config {
- pub fn is_enabled(&self) -> bool {
- self.max_idle_per_host > 0
- }
-}
-
-impl<T, K: Key> Pool<T, K> {
- pub fn new<E, M>(config: Config, executor: E, timer: Option<M>) -> Pool<T, K>
- where
- E: hyper::rt::Executor<exec::BoxSendFuture> + Send + Sync + Clone + 'static,
- M: hyper::rt::Timer + Send + Sync + Clone + 'static,
- {
- let exec = Exec::new(executor);
- let timer = timer.map(|t| Timer::new(t));
- let inner = if config.is_enabled() {
- Some(Arc::new(Mutex::new(PoolInner {
- connecting: HashSet::new(),
- idle: HashMap::new(),
- idle_interval_ref: None,
- max_idle_per_host: config.max_idle_per_host,
- waiters: HashMap::new(),
- exec,
- timer,
- timeout: config.idle_timeout,
- })))
- } else {
- None
- };
-
- Pool { inner }
- }
-
- pub(crate) fn is_enabled(&self) -> bool {
- self.inner.is_some()
- }
-
- #[cfg(test)]
- pub(super) fn no_timer(&self) {
- // Prevent an actual interval from being created for this pool...
- {
- let mut inner = self.inner.as_ref().unwrap().lock().unwrap();
- assert!(inner.idle_interval_ref.is_none(), "timer already spawned");
- let (tx, _) = oneshot::channel();
- inner.idle_interval_ref = Some(tx);
- }
- }
-}
-
-impl<T: Poolable, K: Key> Pool<T, K> {
- /// Returns a `Checkout` which is a future that resolves if an idle
- /// connection becomes available.
- pub fn checkout(&self, key: K) -> Checkout<T, K> {
- Checkout {
- key,
- pool: self.clone(),
- waiter: None,
- }
- }
-
- /// Ensure that there is only ever 1 connecting task for HTTP/2
- /// connections. This does nothing for HTTP/1.
- pub fn connecting(&self, key: &K, ver: Ver) -> Option<Connecting<T, K>> {
- if ver == Ver::Http2 {
- if let Some(ref enabled) = self.inner {
- let mut inner = enabled.lock().unwrap();
- return if inner.connecting.insert(key.clone()) {
- let connecting = Connecting {
- key: key.clone(),
- pool: WeakOpt::downgrade(enabled),
- };
- Some(connecting)
- } else {
- trace!("HTTP/2 connecting already in progress for {:?}", key);
- None
- };
- }
- }
-
- // else
- Some(Connecting {
- key: key.clone(),
- // in HTTP/1's case, there is never a lock, so we don't
- // need to do anything in Drop.
- pool: WeakOpt::none(),
- })
- }
-
- #[cfg(test)]
- fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner<T, K>> {
- self.inner.as_ref().expect("enabled").lock().expect("lock")
- }
-
- /* Used in client/tests.rs...
- #[cfg(test)]
- pub(super) fn h1_key(&self, s: &str) -> Key {
- Arc::new(s.to_string())
- }
-
- #[cfg(test)]
- pub(super) fn idle_count(&self, key: &Key) -> usize {
- self
- .locked()
- .idle
- .get(key)
- .map(|list| list.len())
- .unwrap_or(0)
- }
- */
-
- pub fn pooled(
- &self,
- #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting<T, K>,
- value: T,
- ) -> Pooled<T, K> {
- let (value, pool_ref) = if let Some(ref enabled) = self.inner {
- match value.reserve() {
- #[cfg(feature = "http2")]
- Reservation::Shared(to_insert, to_return) => {
- let mut inner = enabled.lock().unwrap();
- inner.put(connecting.key.clone(), to_insert, enabled);
- // Do this here instead of Drop for Connecting because we
- // already have a lock, no need to lock the mutex twice.
- inner.connected(&connecting.key);
- // prevent the Drop of Connecting from repeating inner.connected()
- connecting.pool = WeakOpt::none();
-
- // Shared reservations don't need a reference to the pool,
- // since the pool always keeps a copy.
- (to_return, WeakOpt::none())
- }
- Reservation::Unique(value) => {
- // Unique reservations must take a reference to the pool
- // since they hope to reinsert once the reservation is
- // completed
- (value, WeakOpt::downgrade(enabled))
- }
- }
- } else {
- // If pool is not enabled, skip all the things...
-
- // The Connecting should have had no pool ref
- debug_assert!(connecting.pool.upgrade().is_none());
-
- (value, WeakOpt::none())
- };
- Pooled {
- key: connecting.key.clone(),
- is_reused: false,
- pool: pool_ref,
- value: Some(value),
- }
- }
-
- fn reuse(&self, key: &K, value: T) -> Pooled<T, K> {
- debug!("reuse idle connection for {:?}", key);
- // TODO: unhack this
- // In Pool::pooled(), which is used for inserting brand new connections,
- // there's some code that adjusts the pool reference taken depending
- // on if the Reservation can be shared or is unique. By the time
- // reuse() is called, the reservation has already been made, and
- // we just have the final value, without knowledge of if this is
- // unique or shared. So, the hack is to just assume Ver::Http2 means
- // shared... :(
- let mut pool_ref = WeakOpt::none();
- if !value.can_share() {
- if let Some(ref enabled) = self.inner {
- pool_ref = WeakOpt::downgrade(enabled);
- }
- }
-
- Pooled {
- is_reused: true,
- key: key.clone(),
- pool: pool_ref,
- value: Some(value),
- }
- }
-}
-
-/// Pop off this list, looking for a usable connection that hasn't expired.
-struct IdlePopper<'a, T, K> {
- key: &'a K,
- list: &'a mut Vec<Idle<T>>,
-}
-
-impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> {
- fn pop(self, expiration: &Expiration) -> Option<Idle<T>> {
- while let Some(entry) = self.list.pop() {
- // If the connection has been closed, or is older than our idle
- // timeout, simply drop it and keep looking...
- if !entry.value.is_open() {
- trace!("removing closed connection for {:?}", self.key);
- continue;
- }
- // TODO: Actually, since the `idle` list is pushed to the end always,
- // that would imply that if *this* entry is expired, then anything
- // "earlier" in the list would *have* to be expired also... Right?
- //
- // In that case, we could just break out of the loop and drop the
- // whole list...
- if expiration.expires(entry.idle_at) {
- trace!("removing expired connection for {:?}", self.key);
- continue;
- }
-
- let value = match entry.value.reserve() {
- #[cfg(feature = "http2")]
- Reservation::Shared(to_reinsert, to_checkout) => {
- self.list.push(Idle {
- idle_at: Instant::now(),
- value: to_reinsert,
- });
- to_checkout
- }
- Reservation::Unique(unique) => unique,
- };
-
- return Some(Idle {
- idle_at: entry.idle_at,
- value,
- });
- }
-
- None
- }
-}
-
-impl<T: Poolable, K: Key> PoolInner<T, K> {
- fn put(&mut self, key: K, value: T, __pool_ref: &Arc<Mutex<PoolInner<T, K>>>) {
- if value.can_share() && self.idle.contains_key(&key) {
- trace!("put; existing idle HTTP/2 connection for {:?}", key);
- return;
- }
- trace!("put; add idle connection for {:?}", key);
- let mut remove_waiters = false;
- let mut value = Some(value);
- if let Some(waiters) = self.waiters.get_mut(&key) {
- while let Some(tx) = waiters.pop_front() {
- if !tx.is_canceled() {
- let reserved = value.take().expect("value already sent");
- let reserved = match reserved.reserve() {
- #[cfg(feature = "http2")]
- Reservation::Shared(to_keep, to_send) => {
- value = Some(to_keep);
- to_send
- }
- Reservation::Unique(uniq) => uniq,
- };
- match tx.send(reserved) {
- Ok(()) => {
- if value.is_none() {
- break;
- } else {
- continue;
- }
- }
- Err(e) => {
- value = Some(e);
- }
- }
- }
-
- trace!("put; removing canceled waiter for {:?}", key);
- }
- remove_waiters = waiters.is_empty();
- }
- if remove_waiters {
- self.waiters.remove(&key);
- }
-
- match value {
- Some(value) => {
- // borrow-check scope...
- {
- let idle_list = self.idle.entry(key.clone()).or_default();
- if self.max_idle_per_host <= idle_list.len() {
- trace!("max idle per host for {:?}, dropping connection", key);
- return;
- }
-
- debug!("pooling idle connection for {:?}", key);
- idle_list.push(Idle {
- value,
- idle_at: Instant::now(),
- });
- }
-
- self.spawn_idle_interval(__pool_ref);
- }
- None => trace!("put; found waiter for {:?}", key),
- }
- }
-
- /// A `Connecting` task is complete. Not necessarily successfully,
- /// but the lock is going away, so clean up.
- fn connected(&mut self, key: &K) {
- let existed = self.connecting.remove(key);
- debug_assert!(existed, "Connecting dropped, key not in pool.connecting");
- // cancel any waiters. if there are any, it's because
- // this Connecting task didn't complete successfully.
- // those waiters would never receive a connection.
- self.waiters.remove(key);
- }
-
- fn spawn_idle_interval(&mut self, pool_ref: &Arc<Mutex<PoolInner<T, K>>>) {
- if self.idle_interval_ref.is_some() {
- return;
- }
- let dur = if let Some(dur) = self.timeout {
- dur
- } else {
- return;
- };
- let timer = if let Some(timer) = self.timer.clone() {
- timer
- } else {
- return;
- };
- let (tx, rx) = oneshot::channel();
- self.idle_interval_ref = Some(tx);
-
- let interval = IdleTask {
- timer: timer.clone(),
- duration: dur,
- deadline: Instant::now(),
- fut: timer.sleep_until(Instant::now()), // ready at first tick
- pool: WeakOpt::downgrade(pool_ref),
- pool_drop_notifier: rx,
- };
-
- self.exec.execute(interval);
- }
-}
-
-impl<T, K: Eq + Hash> PoolInner<T, K> {
- /// Any `FutureResponse`s that were created will have made a `Checkout`,
- /// and possibly inserted into the pool that it is waiting for an idle
- /// connection. If a user ever dropped that future, we need to clean out
- /// those parked senders.
- fn clean_waiters(&mut self, key: &K) {
- let mut remove_waiters = false;
- if let Some(waiters) = self.waiters.get_mut(key) {
- waiters.retain(|tx| !tx.is_canceled());
- remove_waiters = waiters.is_empty();
- }
- if remove_waiters {
- self.waiters.remove(key);
- }
- }
-}
-
-impl<T: Poolable, K: Key> PoolInner<T, K> {
- /// This should *only* be called by the IdleTask
- fn clear_expired(&mut self) {
- let dur = self.timeout.expect("interval assumes timeout");
-
- let now = Instant::now();
- //self.last_idle_check_at = now;
-
- self.idle.retain(|key, values| {
- values.retain(|entry| {
- if !entry.value.is_open() {
- trace!("idle interval evicting closed for {:?}", key);
- return false;
- }
-
- // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470.
- if now.saturating_duration_since(entry.idle_at) > dur {
- trace!("idle interval evicting expired for {:?}", key);
- return false;
- }
-
- // Otherwise, keep this value...
- true
- });
-
- // returning false evicts this key/val
- !values.is_empty()
- });
- }
-}
-
-impl<T, K: Key> Clone for Pool<T, K> {
- fn clone(&self) -> Pool<T, K> {
- Pool {
- inner: self.inner.clone(),
- }
- }
-}
-
-/// A wrapped poolable value that tries to reinsert to the Pool on Drop.
-// Note: The bounds `T: Poolable` is needed for the Drop impl.
-pub struct Pooled<T: Poolable, K: Key> {
- value: Option<T>,
- is_reused: bool,
- key: K,
- pool: WeakOpt<Mutex<PoolInner<T, K>>>,
-}
-
-impl<T: Poolable, K: Key> Pooled<T, K> {
- pub fn is_reused(&self) -> bool {
- self.is_reused
- }
-
- pub fn is_pool_enabled(&self) -> bool {
- self.pool.0.is_some()
- }
-
- fn as_ref(&self) -> &T {
- self.value.as_ref().expect("not dropped")
- }
-
- fn as_mut(&mut self) -> &mut T {
- self.value.as_mut().expect("not dropped")
- }
-}
-
-impl<T: Poolable, K: Key> Deref for Pooled<T, K> {
- type Target = T;
- fn deref(&self) -> &T {
- self.as_ref()
- }
-}
-
-impl<T: Poolable, K: Key> DerefMut for Pooled<T, K> {
- fn deref_mut(&mut self) -> &mut T {
- self.as_mut()
- }
-}
-
-impl<T: Poolable, K: Key> Drop for Pooled<T, K> {
- fn drop(&mut self) {
- if let Some(value) = self.value.take() {
- if !value.is_open() {
- // If we *already* know the connection is done here,
- // it shouldn't be re-inserted back into the pool.
- return;
- }
-
- if let Some(pool) = self.pool.upgrade() {
- if let Ok(mut inner) = pool.lock() {
- inner.put(self.key.clone(), value, &pool);
- }
- } else if !value.can_share() {
- trace!("pool dropped, dropping pooled ({:?})", self.key);
- }
- // Ver::Http2 is already in the Pool (or dead), so we wouldn't
- // have an actual reference to the Pool.
- }
- }
-}
-
-impl<T: Poolable, K: Key> fmt::Debug for Pooled<T, K> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Pooled").field("key", &self.key).finish()
- }
-}
-
-struct Idle<T> {
- idle_at: Instant,
- value: T,
-}
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub struct Checkout<T, K: Key> {
- key: K,
- pool: Pool<T, K>,
- waiter: Option<oneshot::Receiver<T>>,
-}
-
-#[derive(Debug)]
-#[non_exhaustive]
-pub enum Error {
- PoolDisabled,
- CheckoutNoLongerWanted,
- CheckedOutClosedValue,
-}
-
-impl Error {
- pub(super) fn is_canceled(&self) -> bool {
- matches!(self, Error::CheckedOutClosedValue)
- }
-}
-
-impl fmt::Display for Error {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(match self {
- Error::PoolDisabled => "pool is disabled",
- Error::CheckedOutClosedValue => "checked out connection was closed",
- Error::CheckoutNoLongerWanted => "request was canceled",
- })
- }
-}
-
-impl StdError for Error {}
-
-impl<T: Poolable, K: Key> Checkout<T, K> {
- fn poll_waiter(
- &mut self,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Pooled<T, K>, Error>>> {
- if let Some(mut rx) = self.waiter.take() {
- match Pin::new(&mut rx).poll(cx) {
- Poll::Ready(Ok(value)) => {
- if value.is_open() {
- Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value))))
- } else {
- Poll::Ready(Some(Err(Error::CheckedOutClosedValue)))
- }
- }
- Poll::Pending => {
- self.waiter = Some(rx);
- Poll::Pending
- }
- Poll::Ready(Err(_canceled)) => {
- Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted)))
- }
- }
- } else {
- Poll::Ready(None)
- }
- }
-
- fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option<Pooled<T, K>> {
- let entry = {
- let mut inner = self.pool.inner.as_ref()?.lock().unwrap();
- let expiration = Expiration::new(inner.timeout);
- let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| {
- trace!("take? {:?}: expiration = {:?}", self.key, expiration.0);
- // A block to end the mutable borrow on list,
- // so the map below can check is_empty()
- {
- let popper = IdlePopper {
- key: &self.key,
- list,
- };
- popper.pop(&expiration)
- }
- .map(|e| (e, list.is_empty()))
- });
-
- let (entry, empty) = if let Some((e, empty)) = maybe_entry {
- (Some(e), empty)
- } else {
- // No entry found means nuke the list for sure.
- (None, true)
- };
- if empty {
- //TODO: This could be done with the HashMap::entry API instead.
- inner.idle.remove(&self.key);
- }
-
- if entry.is_none() && self.waiter.is_none() {
- let (tx, mut rx) = oneshot::channel();
- trace!("checkout waiting for idle connection: {:?}", self.key);
- inner
- .waiters
- .entry(self.key.clone())
- .or_insert_with(VecDeque::new)
- .push_back(tx);
-
- // register the waker with this oneshot
- assert!(Pin::new(&mut rx).poll(cx).is_pending());
- self.waiter = Some(rx);
- }
-
- entry
- };
-
- entry.map(|e| self.pool.reuse(&self.key, e.value))
- }
-}
-
-impl<T: Poolable, K: Key> Future for Checkout<T, K> {
- type Output = Result<Pooled<T, K>, Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- if let Some(pooled) = ready!(self.poll_waiter(cx)?) {
- return Poll::Ready(Ok(pooled));
- }
-
- if let Some(pooled) = self.checkout(cx) {
- Poll::Ready(Ok(pooled))
- } else if !self.pool.is_enabled() {
- Poll::Ready(Err(Error::PoolDisabled))
- } else {
- // There's a new waiter, already registered in self.checkout()
- debug_assert!(self.waiter.is_some());
- Poll::Pending
- }
- }
-}
-
-impl<T, K: Key> Drop for Checkout<T, K> {
- fn drop(&mut self) {
- if self.waiter.take().is_some() {
- trace!("checkout dropped for {:?}", self.key);
- if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) {
- inner.clean_waiters(&self.key);
- }
- }
- }
-}
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub struct Connecting<T: Poolable, K: Key> {
- key: K,
- pool: WeakOpt<Mutex<PoolInner<T, K>>>,
-}
-
-impl<T: Poolable, K: Key> Connecting<T, K> {
- pub fn alpn_h2(self, pool: &Pool<T, K>) -> Option<Self> {
- debug_assert!(
- self.pool.0.is_none(),
- "Connecting::alpn_h2 but already Http2"
- );
-
- pool.connecting(&self.key, Ver::Http2)
- }
-}
-
-impl<T: Poolable, K: Key> Drop for Connecting<T, K> {
- fn drop(&mut self) {
- if let Some(pool) = self.pool.upgrade() {
- // No need to panic on drop, that could abort!
- if let Ok(mut inner) = pool.lock() {
- inner.connected(&self.key);
- }
- }
- }
-}
-
-struct Expiration(Option<Duration>);
-
-impl Expiration {
- fn new(dur: Option<Duration>) -> Expiration {
- Expiration(dur)
- }
-
- fn expires(&self, instant: Instant) -> bool {
- match self.0 {
- // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470.
- Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout,
- None => false,
- }
- }
-}
-
-pin_project_lite::pin_project! {
- struct IdleTask<T, K: Key> {
- timer: Timer,
- duration: Duration,
- deadline: Instant,
- fut: Pin<Box<dyn Sleep>>,
- pool: WeakOpt<Mutex<PoolInner<T, K>>>,
- // This allows the IdleTask to be notified as soon as the entire
- // Pool is fully dropped, and shutdown. This channel is never sent on,
- // but Err(Canceled) will be received when the Pool is dropped.
- #[pin]
- pool_drop_notifier: oneshot::Receiver<Infallible>,
- }
-}
-
-impl<T: Poolable + 'static, K: Key> Future for IdleTask<T, K> {
- type Output = ();
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
- loop {
- match this.pool_drop_notifier.as_mut().poll(cx) {
- Poll::Ready(Ok(n)) => match n {},
- Poll::Pending => (),
- Poll::Ready(Err(_canceled)) => {
- trace!("pool closed, canceling idle interval");
- return Poll::Ready(());
- }
- }
-
- ready!(Pin::new(&mut this.fut).poll(cx));
- // Set this task to run after the next deadline
- // If the poll missed the deadline by a lot, set the deadline
- // from the current time instead
- *this.deadline += *this.duration;
- if *this.deadline < Instant::now() - Duration::from_millis(5) {
- *this.deadline = Instant::now() + *this.duration;
- }
- *this.fut = this.timer.sleep_until(*this.deadline);
-
- if let Some(inner) = this.pool.upgrade() {
- if let Ok(mut inner) = inner.lock() {
- trace!("idle interval checking for expired");
- inner.clear_expired();
- continue;
- }
- }
- return Poll::Ready(());
- }
- }
-}
-
-impl<T> WeakOpt<T> {
- fn none() -> Self {
- WeakOpt(None)
- }
-
- fn downgrade(arc: &Arc<T>) -> Self {
- WeakOpt(Some(Arc::downgrade(arc)))
- }
-
- fn upgrade(&self) -> Option<Arc<T>> {
- self.0.as_ref().and_then(Weak::upgrade)
- }
-}
-
-#[cfg(all(test, not(miri)))]
-mod tests {
- use std::fmt::Debug;
- use std::future::Future;
- use std::hash::Hash;
- use std::pin::Pin;
- use std::task::{self, Poll};
- use std::time::Duration;
-
- use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt};
- use crate::rt::{TokioExecutor, TokioTimer};
-
- use crate::common::timer;
-
- #[derive(Clone, Debug, PartialEq, Eq, Hash)]
- struct KeyImpl(http::uri::Scheme, http::uri::Authority);
-
- type KeyTuple = (http::uri::Scheme, http::uri::Authority);
-
- /// Test unique reservations.
- #[derive(Debug, PartialEq, Eq)]
- struct Uniq<T>(T);
-
- impl<T: Send + 'static + Unpin> Poolable for Uniq<T> {
- fn is_open(&self) -> bool {
- true
- }
-
- fn reserve(self) -> Reservation<Self> {
- Reservation::Unique(self)
- }
-
- fn can_share(&self) -> bool {
- false
- }
- }
-
- fn c<T: Poolable, K: Key>(key: K) -> Connecting<T, K> {
- Connecting {
- key,
- pool: WeakOpt::none(),
- }
- }
-
- fn host_key(s: &str) -> KeyImpl {
- KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key"))
- }
-
- fn pool_no_timer<T, K: Key>() -> Pool<T, K> {
- pool_max_idle_no_timer(usize::MAX)
- }
-
- fn pool_max_idle_no_timer<T, K: Key>(max_idle: usize) -> Pool<T, K> {
- let pool = Pool::new(
- super::Config {
- idle_timeout: Some(Duration::from_millis(100)),
- max_idle_per_host: max_idle,
- },
- TokioExecutor::new(),
- Option::<timer::Timer>::None,
- );
- pool.no_timer();
- pool
- }
-
- #[tokio::test]
- async fn test_pool_checkout_smoke() {
- let pool = pool_no_timer();
- let key = host_key("foo");
- let pooled = pool.pooled(c(key.clone()), Uniq(41));
-
- drop(pooled);
-
- match pool.checkout(key).await {
- Ok(pooled) => assert_eq!(*pooled, Uniq(41)),
- Err(_) => panic!("not ready"),
- };
- }
-
- /// Helper to check if the future is ready after polling once.
- struct PollOnce<'a, F>(&'a mut F);
-
- impl<F, T, U> Future for PollOnce<'_, F>
- where
- F: Future<Output = Result<T, U>> + Unpin,
- {
- type Output = Option<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match Pin::new(&mut self.0).poll(cx) {
- Poll::Ready(Ok(_)) => Poll::Ready(Some(())),
- Poll::Ready(Err(_)) => Poll::Ready(Some(())),
- Poll::Pending => Poll::Ready(None),
- }
- }
- }
-
- #[tokio::test]
- async fn test_pool_checkout_returns_none_if_expired() {
- let pool = pool_no_timer();
- let key = host_key("foo");
- let pooled = pool.pooled(c(key.clone()), Uniq(41));
-
- drop(pooled);
- tokio::time::sleep(pool.locked().timeout.unwrap()).await;
- let mut checkout = pool.checkout(key);
- let poll_once = PollOnce(&mut checkout);
- let is_not_ready = poll_once.await.is_none();
- assert!(is_not_ready);
- }
-
- #[tokio::test]
- async fn test_pool_checkout_removes_expired() {
- let pool = pool_no_timer();
- let key = host_key("foo");
-
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
-
- assert_eq!(
- pool.locked().idle.get(&key).map(|entries| entries.len()),
- Some(3)
- );
- tokio::time::sleep(pool.locked().timeout.unwrap()).await;
-
- let mut checkout = pool.checkout(key.clone());
- let poll_once = PollOnce(&mut checkout);
- // checkout.await should clean out the expired
- poll_once.await;
- assert!(!pool.locked().idle.contains_key(&key));
- }
-
- #[test]
- fn test_pool_max_idle_per_host() {
- let pool = pool_max_idle_no_timer(2);
- let key = host_key("foo");
-
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
-
- // pooled and dropped 3, max_idle should only allow 2
- assert_eq!(
- pool.locked().idle.get(&key).map(|entries| entries.len()),
- Some(2)
- );
- }
-
- #[tokio::test]
- async fn test_pool_timer_removes_expired() {
- let pool = Pool::new(
- super::Config {
- idle_timeout: Some(Duration::from_millis(10)),
- max_idle_per_host: usize::MAX,
- },
- TokioExecutor::new(),
- Some(TokioTimer::new()),
- );
-
- let key = host_key("foo");
-
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
-
- assert_eq!(
- pool.locked().idle.get(&key).map(|entries| entries.len()),
- Some(3)
- );
-
- // Let the timer tick passed the expiration...
- tokio::time::sleep(Duration::from_millis(30)).await;
- // Yield so the Interval can reap...
- tokio::task::yield_now().await;
-
- assert!(!pool.locked().idle.contains_key(&key));
- }
-
- #[tokio::test]
- async fn test_pool_checkout_task_unparked() {
- use futures_util::future::join;
- use futures_util::FutureExt;
-
- let pool = pool_no_timer();
- let key = host_key("foo");
- let pooled = pool.pooled(c(key.clone()), Uniq(41));
-
- let checkout = join(pool.checkout(key), async {
- // the checkout future will park first,
- // and then this lazy future will be polled, which will insert
- // the pooled back into the pool
- //
- // this test makes sure that doing so will unpark the checkout
- drop(pooled);
- })
- .map(|(entry, _)| entry);
-
- assert_eq!(*checkout.await.unwrap(), Uniq(41));
- }
-
- #[tokio::test]
- async fn test_pool_checkout_drop_cleans_up_waiters() {
- let pool = pool_no_timer::<Uniq<i32>, KeyImpl>();
- let key = host_key("foo");
-
- let mut checkout1 = pool.checkout(key.clone());
- let mut checkout2 = pool.checkout(key.clone());
-
- let poll_once1 = PollOnce(&mut checkout1);
- let poll_once2 = PollOnce(&mut checkout2);
-
- // first poll needed to get into Pool's parked
- poll_once1.await;
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
- poll_once2.await;
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2);
-
- // on drop, clean up Pool
- drop(checkout1);
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
-
- drop(checkout2);
- assert!(!pool.locked().waiters.contains_key(&key));
- }
-
- #[derive(Debug)]
- struct CanClose {
- #[allow(unused)]
- val: i32,
- closed: bool,
- }
-
- impl Poolable for CanClose {
- fn is_open(&self) -> bool {
- !self.closed
- }
-
- fn reserve(self) -> Reservation<Self> {
- Reservation::Unique(self)
- }
-
- fn can_share(&self) -> bool {
- false
- }
- }
-
- #[test]
- fn pooled_drop_if_closed_doesnt_reinsert() {
- let pool = pool_no_timer();
- let key = host_key("foo");
- pool.pooled(
- c(key.clone()),
- CanClose {
- val: 57,
- closed: true,
- },
- );
-
- assert!(!pool.locked().idle.contains_key(&key));
- }
-}
diff --git a/vendor/hyper-util/src/client/mod.rs b/vendor/hyper-util/src/client/mod.rs
deleted file mode 100644
index 0d896030..00000000
--- a/vendor/hyper-util/src/client/mod.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-//! HTTP client utilities
-
-/// Legacy implementations of `connect` module and `Client`
-#[cfg(feature = "client-legacy")]
-pub mod legacy;
-
-#[cfg(feature = "client-proxy")]
-pub mod proxy;
diff --git a/vendor/hyper-util/src/client/proxy/matcher.rs b/vendor/hyper-util/src/client/proxy/matcher.rs
deleted file mode 100644
index fd563bca..00000000
--- a/vendor/hyper-util/src/client/proxy/matcher.rs
+++ /dev/null
@@ -1,848 +0,0 @@
-//! Proxy matchers
-//!
-//! This module contains different matchers to configure rules for when a proxy
-//! should be used, and if so, with what arguments.
-//!
-//! A [`Matcher`] can be constructed either using environment variables, or
-//! a [`Matcher::builder()`].
-//!
-//! Once constructed, the `Matcher` can be asked if it intercepts a `Uri` by
-//! calling [`Matcher::intercept()`].
-//!
-//! An [`Intercept`] includes the destination for the proxy, and any parsed
-//! authentication to be used.
-
-use std::fmt;
-use std::net::IpAddr;
-
-use http::header::HeaderValue;
-use ipnet::IpNet;
-use percent_encoding::percent_decode_str;
-
-#[cfg(docsrs)]
-pub use self::builder::IntoValue;
-#[cfg(not(docsrs))]
-use self::builder::IntoValue;
-
-/// A proxy matcher, usually built from environment variables.
-pub struct Matcher {
- http: Option<Intercept>,
- https: Option<Intercept>,
- no: NoProxy,
-}
-
-/// A matched proxy,
-///
-/// This is returned by a matcher if a proxy should be used.
-#[derive(Clone)]
-pub struct Intercept {
- uri: http::Uri,
- auth: Auth,
-}
-
-/// A builder to create a [`Matcher`].
-///
-/// Construct with [`Matcher::builder()`].
-#[derive(Default)]
-pub struct Builder {
- is_cgi: bool,
- all: String,
- http: String,
- https: String,
- no: String,
-}
-
-#[derive(Clone)]
-enum Auth {
- Empty,
- Basic(http::header::HeaderValue),
- Raw(String, String),
-}
-
-/// A filter for proxy matchers.
-///
-/// This type is based off the `NO_PROXY` rules used by curl.
-#[derive(Clone, Debug, Default)]
-struct NoProxy {
- ips: IpMatcher,
- domains: DomainMatcher,
-}
-
-#[derive(Clone, Debug, Default)]
-struct DomainMatcher(Vec<String>);
-
-#[derive(Clone, Debug, Default)]
-struct IpMatcher(Vec<Ip>);
-
-#[derive(Clone, Debug)]
-enum Ip {
- Address(IpAddr),
- Network(IpNet),
-}
-
-// ===== impl Matcher =====
-
-impl Matcher {
- /// Create a matcher reading the current environment variables.
- ///
- /// This checks for values in the following variables, treating them the
- /// same as curl does:
- ///
- /// - `ALL_PROXY`/`all_proxy`
- /// - `HTTPS_PROXY`/`https_proxy`
- /// - `HTTP_PROXY`/`http_proxy`
- /// - `NO_PROXY`/`no_proxy`
- pub fn from_env() -> Self {
- Builder::from_env().build()
- }
-
- /// Create a matcher from the environment or system.
- ///
- /// This checks the same environment variables as `from_env()`, and if not
- /// set, checks the system configuration for values for the OS.
- ///
- /// This constructor is always available, but if the `client-proxy-system`
- /// feature is enabled, it will check more configuration. Use this
- /// constructor if you want to allow users to optionally enable more, or
- /// use `from_env` if you do not want the values to change based on an
- /// enabled feature.
- pub fn from_system() -> Self {
- Builder::from_system().build()
- }
-
- /// Start a builder to configure a matcher.
- pub fn builder() -> Builder {
- Builder::default()
- }
-
- /// Check if the destination should be intercepted by a proxy.
- ///
- /// If the proxy rules match the destination, a new `Uri` will be returned
- /// to connect to.
- pub fn intercept(&self, dst: &http::Uri) -> Option<Intercept> {
- // TODO(perf): don't need to check `no` if below doesn't match...
- if self.no.contains(dst.host()?) {
- return None;
- }
-
- match dst.scheme_str() {
- Some("http") => self.http.clone(),
- Some("https") => self.https.clone(),
- _ => None,
- }
- }
-}
-
-impl fmt::Debug for Matcher {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let mut b = f.debug_struct("Matcher");
-
- if let Some(ref http) = self.http {
- b.field("http", http);
- }
-
- if let Some(ref https) = self.https {
- b.field("https", https);
- }
-
- if !self.no.is_empty() {
- b.field("no", &self.no);
- }
- b.finish()
- }
-}
-
-// ===== impl Intercept =====
-
-impl Intercept {
- /// Get the `http::Uri` for the target proxy.
- pub fn uri(&self) -> &http::Uri {
- &self.uri
- }
-
- /// Get any configured basic authorization.
- ///
- /// This should usually be used with a `Proxy-Authorization` header, to
- /// send in Basic format.
- ///
- /// # Example
- ///
- /// ```rust
- /// # use hyper_util::client::proxy::matcher::Matcher;
- /// # let uri = http::Uri::from_static("https://hyper.rs");
- /// let m = Matcher::builder()
- /// .all("https://Aladdin:opensesame@localhost:8887")
- /// .build();
- ///
- /// let proxy = m.intercept(&uri).expect("example");
- /// let auth = proxy.basic_auth().expect("example");
- /// assert_eq!(auth, "Basic QWxhZGRpbjpvcGVuc2VzYW1l");
- /// ```
- pub fn basic_auth(&self) -> Option<&HeaderValue> {
- if let Auth::Basic(ref val) = self.auth {
- Some(val)
- } else {
- None
- }
- }
-
- /// Get any configured raw authorization.
- ///
- /// If not detected as another scheme, this is the username and password
- /// that should be sent with whatever protocol the proxy handshake uses.
- ///
- /// # Example
- ///
- /// ```rust
- /// # use hyper_util::client::proxy::matcher::Matcher;
- /// # let uri = http::Uri::from_static("https://hyper.rs");
- /// let m = Matcher::builder()
- /// .all("socks5h://Aladdin:opensesame@localhost:8887")
- /// .build();
- ///
- /// let proxy = m.intercept(&uri).expect("example");
- /// let auth = proxy.raw_auth().expect("example");
- /// assert_eq!(auth, ("Aladdin", "opensesame"));
- /// ```
- pub fn raw_auth(&self) -> Option<(&str, &str)> {
- if let Auth::Raw(ref u, ref p) = self.auth {
- Some((u.as_str(), p.as_str()))
- } else {
- None
- }
- }
-}
-
-impl fmt::Debug for Intercept {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Intercept")
- .field("uri", &self.uri)
- // dont output auth, its sensitive
- .finish()
- }
-}
-
-// ===== impl Builder =====
-
-impl Builder {
- fn from_env() -> Self {
- Builder {
- is_cgi: std::env::var_os("REQUEST_METHOD").is_some(),
- all: get_first_env(&["ALL_PROXY", "all_proxy"]),
- http: get_first_env(&["HTTP_PROXY", "http_proxy"]),
- https: get_first_env(&["HTTPS_PROXY", "https_proxy"]),
- no: get_first_env(&["NO_PROXY", "no_proxy"]),
- }
- }
-
- fn from_system() -> Self {
- #[allow(unused_mut)]
- let mut builder = Self::from_env();
-
- #[cfg(all(feature = "client-proxy-system", target_os = "macos"))]
- mac::with_system(&mut builder);
-
- #[cfg(all(feature = "client-proxy-system", windows))]
- win::with_system(&mut builder);
-
- builder
- }
-
- /// Set the target proxy for all destinations.
- pub fn all<S>(mut self, val: S) -> Self
- where
- S: IntoValue,
- {
- self.all = val.into_value();
- self
- }
-
- /// Set the target proxy for HTTP destinations.
- pub fn http<S>(mut self, val: S) -> Self
- where
- S: IntoValue,
- {
- self.http = val.into_value();
- self
- }
-
- /// Set the target proxy for HTTPS destinations.
- pub fn https<S>(mut self, val: S) -> Self
- where
- S: IntoValue,
- {
- self.https = val.into_value();
- self
- }
-
- /// Set the "no" proxy filter.
- ///
- /// The rules are as follows:
- /// * Entries are expected to be comma-separated (whitespace between entries is ignored)
- /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size,
- /// for example "`192.168.1.0/24`").
- /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed)
- /// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com`
- /// and `.google.com` are equivalent) and would match both that domain AND all subdomains.
- ///
- /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match
- /// (and therefore would bypass the proxy):
- /// * `http://google.com/`
- /// * `http://www.google.com/`
- /// * `http://192.168.1.42/`
- ///
- /// The URL `http://notgoogle.com/` would not match.
- pub fn no<S>(mut self, val: S) -> Self
- where
- S: IntoValue,
- {
- self.no = val.into_value();
- self
- }
-
- /// Construct a [`Matcher`] using the configured values.
- pub fn build(self) -> Matcher {
- if self.is_cgi {
- return Matcher {
- http: None,
- https: None,
- no: NoProxy::empty(),
- };
- }
-
- let all = parse_env_uri(&self.all);
-
- Matcher {
- http: parse_env_uri(&self.http).or_else(|| all.clone()),
- https: parse_env_uri(&self.https).or(all),
- no: NoProxy::from_string(&self.no),
- }
- }
-}
-
-fn get_first_env(names: &[&str]) -> String {
- for name in names {
- if let Ok(val) = std::env::var(name) {
- return val;
- }
- }
-
- String::new()
-}
-
-fn parse_env_uri(val: &str) -> Option<Intercept> {
- let uri = val.parse::<http::Uri>().ok()?;
- let mut builder = http::Uri::builder();
- let mut is_httpish = false;
- let mut auth = Auth::Empty;
-
- builder = builder.scheme(match uri.scheme() {
- Some(s) => {
- if s == &http::uri::Scheme::HTTP || s == &http::uri::Scheme::HTTPS {
- is_httpish = true;
- s.clone()
- } else if s.as_str() == "socks5" || s.as_str() == "socks5h" {
- s.clone()
- } else {
- // can't use this proxy scheme
- return None;
- }
- }
- // if no scheme provided, assume they meant 'http'
- None => {
- is_httpish = true;
- http::uri::Scheme::HTTP
- }
- });
-
- let authority = uri.authority()?;
-
- if let Some((userinfo, host_port)) = authority.as_str().split_once('@') {
- let (user, pass) = userinfo.split_once(':')?;
- let user = percent_decode_str(user).decode_utf8_lossy();
- let pass = percent_decode_str(pass).decode_utf8_lossy();
- if is_httpish {
- auth = Auth::Basic(encode_basic_auth(&user, Some(&pass)));
- } else {
- auth = Auth::Raw(user.into(), pass.into());
- }
- builder = builder.authority(host_port);
- } else {
- builder = builder.authority(authority.clone());
- }
-
- // removing any path, but we MUST specify one or the builder errors
- builder = builder.path_and_query("/");
-
- let dst = builder.build().ok()?;
-
- Some(Intercept { uri: dst, auth })
-}
-
-fn encode_basic_auth(user: &str, pass: Option<&str>) -> HeaderValue {
- use base64::prelude::BASE64_STANDARD;
- use base64::write::EncoderWriter;
- use std::io::Write;
-
- let mut buf = b"Basic ".to_vec();
- {
- let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD);
- let _ = write!(encoder, "{user}:");
- if let Some(password) = pass {
- let _ = write!(encoder, "{password}");
- }
- }
- let mut header = HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue");
- header.set_sensitive(true);
- header
-}
-
-impl NoProxy {
- /*
- fn from_env() -> NoProxy {
- let raw = std::env::var("NO_PROXY")
- .or_else(|_| std::env::var("no_proxy"))
- .unwrap_or_default();
-
- Self::from_string(&raw)
- }
- */
-
- fn empty() -> NoProxy {
- NoProxy {
- ips: IpMatcher(Vec::new()),
- domains: DomainMatcher(Vec::new()),
- }
- }
-
- /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables
- /// are set)
- /// The rules are as follows:
- /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked
- /// * If neither environment variable is set, `None` is returned
- /// * Entries are expected to be comma-separated (whitespace between entries is ignored)
- /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size,
- /// for example "`192.168.1.0/24`").
- /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed)
- /// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com`
- /// and `.google.com` are equivalent) and would match both that domain AND all subdomains.
- ///
- /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match
- /// (and therefore would bypass the proxy):
- /// * `http://google.com/`
- /// * `http://www.google.com/`
- /// * `http://192.168.1.42/`
- ///
- /// The URL `http://notgoogle.com/` would not match.
- pub fn from_string(no_proxy_list: &str) -> Self {
- let mut ips = Vec::new();
- let mut domains = Vec::new();
- let parts = no_proxy_list.split(',').map(str::trim);
- for part in parts {
- match part.parse::<IpNet>() {
- // If we can parse an IP net or address, then use it, otherwise, assume it is a domain
- Ok(ip) => ips.push(Ip::Network(ip)),
- Err(_) => match part.parse::<IpAddr>() {
- Ok(addr) => ips.push(Ip::Address(addr)),
- Err(_) => {
- if !part.trim().is_empty() {
- domains.push(part.to_owned())
- }
- }
- },
- }
- }
- NoProxy {
- ips: IpMatcher(ips),
- domains: DomainMatcher(domains),
- }
- }
-
- /// Return true if this matches the host (domain or IP).
- pub fn contains(&self, host: &str) -> bool {
- // According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off
- // the end in order to parse correctly
- let host = if host.starts_with('[') {
- let x: &[_] = &['[', ']'];
- host.trim_matches(x)
- } else {
- host
- };
- match host.parse::<IpAddr>() {
- // If we can parse an IP addr, then use it, otherwise, assume it is a domain
- Ok(ip) => self.ips.contains(ip),
- Err(_) => self.domains.contains(host),
- }
- }
-
- fn is_empty(&self) -> bool {
- self.ips.0.is_empty() && self.domains.0.is_empty()
- }
-}
-
-impl IpMatcher {
- fn contains(&self, addr: IpAddr) -> bool {
- for ip in &self.0 {
- match ip {
- Ip::Address(address) => {
- if &addr == address {
- return true;
- }
- }
- Ip::Network(net) => {
- if net.contains(&addr) {
- return true;
- }
- }
- }
- }
- false
- }
-}
-
-impl DomainMatcher {
- // The following links may be useful to understand the origin of these rules:
- // * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html
- // * https://github.com/curl/curl/issues/1208
- fn contains(&self, domain: &str) -> bool {
- let domain_len = domain.len();
- for d in &self.0 {
- if d == domain || d.strip_prefix('.') == Some(domain) {
- return true;
- } else if domain.ends_with(d) {
- if d.starts_with('.') {
- // If the first character of d is a dot, that means the first character of domain
- // must also be a dot, so we are looking at a subdomain of d and that matches
- return true;
- } else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.') {
- // Given that d is a prefix of domain, if the prior character in domain is a dot
- // then that means we must be matching a subdomain of d, and that matches
- return true;
- }
- } else if d == "*" {
- return true;
- }
- }
- false
- }
-}
-
-mod builder {
- /// A type that can used as a `Builder` value.
- ///
- /// Private and sealed, only visible in docs.
- pub trait IntoValue {
- #[doc(hidden)]
- fn into_value(self) -> String;
- }
-
- impl IntoValue for String {
- #[doc(hidden)]
- fn into_value(self) -> String {
- self
- }
- }
-
- impl IntoValue for &String {
- #[doc(hidden)]
- fn into_value(self) -> String {
- self.into()
- }
- }
-
- impl IntoValue for &str {
- #[doc(hidden)]
- fn into_value(self) -> String {
- self.into()
- }
- }
-}
-
-#[cfg(feature = "client-proxy-system")]
-#[cfg(target_os = "macos")]
-mod mac {
- use system_configuration::core_foundation::base::{CFType, TCFType, TCFTypeRef};
- use system_configuration::core_foundation::dictionary::CFDictionary;
- use system_configuration::core_foundation::number::CFNumber;
- use system_configuration::core_foundation::string::{CFString, CFStringRef};
- use system_configuration::dynamic_store::SCDynamicStoreBuilder;
- use system_configuration::sys::schema_definitions::{
- kSCPropNetProxiesHTTPEnable, kSCPropNetProxiesHTTPPort, kSCPropNetProxiesHTTPProxy,
- kSCPropNetProxiesHTTPSEnable, kSCPropNetProxiesHTTPSPort, kSCPropNetProxiesHTTPSProxy,
- };
-
- pub(super) fn with_system(builder: &mut super::Builder) {
- let store = SCDynamicStoreBuilder::new("").build();
-
- let proxies_map = if let Some(proxies_map) = store.get_proxies() {
- proxies_map
- } else {
- return;
- };
-
- if builder.http.is_empty() {
- let http_proxy_config = parse_setting_from_dynamic_store(
- &proxies_map,
- unsafe { kSCPropNetProxiesHTTPEnable },
- unsafe { kSCPropNetProxiesHTTPProxy },
- unsafe { kSCPropNetProxiesHTTPPort },
- );
- if let Some(http) = http_proxy_config {
- builder.http = http;
- }
- }
-
- if builder.https.is_empty() {
- let https_proxy_config = parse_setting_from_dynamic_store(
- &proxies_map,
- unsafe { kSCPropNetProxiesHTTPSEnable },
- unsafe { kSCPropNetProxiesHTTPSProxy },
- unsafe { kSCPropNetProxiesHTTPSPort },
- );
-
- if let Some(https) = https_proxy_config {
- builder.https = https;
- }
- }
- }
-
- fn parse_setting_from_dynamic_store(
- proxies_map: &CFDictionary<CFString, CFType>,
- enabled_key: CFStringRef,
- host_key: CFStringRef,
- port_key: CFStringRef,
- ) -> Option<String> {
- let proxy_enabled = proxies_map
- .find(enabled_key)
- .and_then(|flag| flag.downcast::<CFNumber>())
- .and_then(|flag| flag.to_i32())
- .unwrap_or(0)
- == 1;
-
- if proxy_enabled {
- let proxy_host = proxies_map
- .find(host_key)
- .and_then(|host| host.downcast::<CFString>())
- .map(|host| host.to_string());
- let proxy_port = proxies_map
- .find(port_key)
- .and_then(|port| port.downcast::<CFNumber>())
- .and_then(|port| port.to_i32());
-
- return match (proxy_host, proxy_port) {
- (Some(proxy_host), Some(proxy_port)) => Some(format!("{proxy_host}:{proxy_port}")),
- (Some(proxy_host), None) => Some(proxy_host),
- (None, Some(_)) => None,
- (None, None) => None,
- };
- }
-
- None
- }
-}
-
-#[cfg(feature = "client-proxy-system")]
-#[cfg(windows)]
-mod win {
- pub(super) fn with_system(builder: &mut super::Builder) {
- let settings = if let Ok(settings) = windows_registry::CURRENT_USER
- .open("Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings")
- {
- settings
- } else {
- return;
- };
-
- if settings.get_u32("ProxyEnable").unwrap_or(0) == 0 {
- return;
- }
-
- if let Ok(val) = settings.get_string("ProxyServer") {
- if builder.http.is_empty() {
- builder.http = val.clone();
- }
- if builder.https.is_empty() {
- builder.https = val;
- }
- }
-
- if builder.no.is_empty() {
- if let Ok(val) = settings.get_string("ProxyOverride") {
- builder.no = val
- .split(';')
- .map(|s| s.trim())
- .collect::<Vec<&str>>()
- .join(",")
- .replace("*.", "");
- }
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_domain_matcher() {
- let domains = vec![".foo.bar".into(), "bar.foo".into()];
- let matcher = DomainMatcher(domains);
-
- // domains match with leading `.`
- assert!(matcher.contains("foo.bar"));
- // subdomains match with leading `.`
- assert!(matcher.contains("www.foo.bar"));
-
- // domains match with no leading `.`
- assert!(matcher.contains("bar.foo"));
- // subdomains match with no leading `.`
- assert!(matcher.contains("www.bar.foo"));
-
- // non-subdomain string prefixes don't match
- assert!(!matcher.contains("notfoo.bar"));
- assert!(!matcher.contains("notbar.foo"));
- }
-
- #[test]
- fn test_no_proxy_wildcard() {
- let no_proxy = NoProxy::from_string("*");
- assert!(no_proxy.contains("any.where"));
- }
-
- #[test]
- fn test_no_proxy_ip_ranges() {
- let no_proxy =
- NoProxy::from_string(".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17");
-
- let should_not_match = [
- // random url, not in no_proxy
- "hyper.rs",
- // make sure that random non-subdomain string prefixes don't match
- "notfoo.bar",
- // make sure that random non-subdomain string prefixes don't match
- "notbar.baz",
- // ipv4 address out of range
- "10.43.1.1",
- // ipv4 address out of range
- "10.124.7.7",
- // ipv6 address out of range
- "[ffff:db8:a0b:12f0::1]",
- // ipv6 address out of range
- "[2005:db8:a0b:12f0::1]",
- ];
-
- for host in &should_not_match {
- assert!(!no_proxy.contains(host), "should not contain {host:?}");
- }
-
- let should_match = [
- // make sure subdomains (with leading .) match
- "hello.foo.bar",
- // make sure exact matches (without leading .) match (also makes sure spaces between entries work)
- "bar.baz",
- // make sure subdomains (without leading . in no_proxy) match
- "foo.bar.baz",
- // make sure subdomains (without leading . in no_proxy) match - this differs from cURL
- "foo.bar",
- // ipv4 address match within range
- "10.42.1.100",
- // ipv6 address exact match
- "[::1]",
- // ipv6 address match within range
- "[2001:db8:a0b:12f0::1]",
- // ipv4 address exact match
- "10.124.7.8",
- ];
-
- for host in &should_match {
- assert!(no_proxy.contains(host), "should contain {host:?}");
- }
- }
-
- macro_rules! p {
- ($($n:ident = $v:expr,)*) => ({Builder {
- $($n: $v.into(),)*
- ..Builder::default()
- }.build()});
- }
-
- fn intercept(p: &Matcher, u: &str) -> Intercept {
- p.intercept(&u.parse().unwrap()).unwrap()
- }
-
- #[test]
- fn test_all_proxy() {
- let p = p! {
- all = "http://om.nom",
- };
-
- assert_eq!("http://om.nom", intercept(&p, "http://example.com").uri());
-
- assert_eq!("http://om.nom", intercept(&p, "https://example.com").uri());
- }
-
- #[test]
- fn test_specific_overrides_all() {
- let p = p! {
- all = "http://no.pe",
- http = "http://y.ep",
- };
-
- assert_eq!("http://no.pe", intercept(&p, "https://example.com").uri());
-
- // the http rule is "more specific" than the all rule
- assert_eq!("http://y.ep", intercept(&p, "http://example.com").uri());
- }
-
- #[test]
- fn test_parse_no_scheme_defaults_to_http() {
- let p = p! {
- https = "y.ep",
- http = "127.0.0.1:8887",
- };
-
- assert_eq!(intercept(&p, "https://example.local").uri(), "http://y.ep");
- assert_eq!(
- intercept(&p, "http://example.local").uri(),
- "http://127.0.0.1:8887"
- );
- }
-
- #[test]
- fn test_parse_http_auth() {
- let p = p! {
- all = "http://Aladdin:opensesame@y.ep",
- };
-
- let proxy = intercept(&p, "https://example.local");
- assert_eq!(proxy.uri(), "http://y.ep");
- assert_eq!(
- proxy.basic_auth().expect("basic_auth"),
- "Basic QWxhZGRpbjpvcGVuc2VzYW1l"
- );
- }
-
- #[test]
- fn test_parse_http_auth_without_scheme() {
- let p = p! {
- all = "Aladdin:opensesame@y.ep",
- };
-
- let proxy = intercept(&p, "https://example.local");
- assert_eq!(proxy.uri(), "http://y.ep");
- assert_eq!(
- proxy.basic_auth().expect("basic_auth"),
- "Basic QWxhZGRpbjpvcGVuc2VzYW1l"
- );
- }
-
- #[test]
- fn test_dont_parse_http_when_is_cgi() {
- let mut builder = Matcher::builder();
- builder.is_cgi = true;
- builder.http = "http://never.gonna.let.you.go".into();
- let m = builder.build();
-
- assert!(m.intercept(&"http://rick.roll".parse().unwrap()).is_none());
- }
-}
diff --git a/vendor/hyper-util/src/client/proxy/mod.rs b/vendor/hyper-util/src/client/proxy/mod.rs
deleted file mode 100644
index 59c8e46d..00000000
--- a/vendor/hyper-util/src/client/proxy/mod.rs
+++ /dev/null
@@ -1,3 +0,0 @@
-//! Proxy utilities
-
-pub mod matcher;
diff --git a/vendor/hyper-util/src/client/service.rs b/vendor/hyper-util/src/client/service.rs
deleted file mode 100644
index 580fb105..00000000
--- a/vendor/hyper-util/src/client/service.rs
+++ /dev/null
@@ -1,8 +0,0 @@
-struct ConnectingPool<C, P> {
- connector: C,
- pool: P,
-}
-
-struct PoolableSvc<S>(S);
-
-
diff --git a/vendor/hyper-util/src/common/exec.rs b/vendor/hyper-util/src/common/exec.rs
deleted file mode 100644
index 40860ee1..00000000
--- a/vendor/hyper-util/src/common/exec.rs
+++ /dev/null
@@ -1,53 +0,0 @@
-#![allow(dead_code)]
-
-use hyper::rt::Executor;
-use std::fmt;
-use std::future::Future;
-use std::pin::Pin;
-use std::sync::Arc;
-
-pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
-
-// Either the user provides an executor for background tasks, or we use
-// `tokio::spawn`.
-#[derive(Clone)]
-pub(crate) enum Exec {
- Executor(Arc<dyn Executor<BoxSendFuture> + Send + Sync>),
-}
-
-// ===== impl Exec =====
-
-impl Exec {
- pub(crate) fn new<E>(inner: E) -> Self
- where
- E: Executor<BoxSendFuture> + Send + Sync + 'static,
- {
- Exec::Executor(Arc::new(inner))
- }
-
- pub(crate) fn execute<F>(&self, fut: F)
- where
- F: Future<Output = ()> + Send + 'static,
- {
- match *self {
- Exec::Executor(ref e) => {
- e.execute(Box::pin(fut));
- }
- }
- }
-}
-
-impl fmt::Debug for Exec {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Exec").finish()
- }
-}
-
-impl<F> hyper::rt::Executor<F> for Exec
-where
- F: Future<Output = ()> + Send + 'static,
-{
- fn execute(&self, fut: F) {
- Exec::execute(self, fut);
- }
-}
diff --git a/vendor/hyper-util/src/common/future.rs b/vendor/hyper-util/src/common/future.rs
deleted file mode 100644
index 47897f24..00000000
--- a/vendor/hyper-util/src/common/future.rs
+++ /dev/null
@@ -1,30 +0,0 @@
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-// TODO: replace with `std::future::poll_fn` once MSRV >= 1.64
-pub(crate) fn poll_fn<T, F>(f: F) -> PollFn<F>
-where
- F: FnMut(&mut Context<'_>) -> Poll<T>,
-{
- PollFn { f }
-}
-
-pub(crate) struct PollFn<F> {
- f: F,
-}
-
-impl<F> Unpin for PollFn<F> {}
-
-impl<T, F> Future for PollFn<F>
-where
- F: FnMut(&mut Context<'_>) -> Poll<T>,
-{
- type Output = T;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- (self.f)(cx)
- }
-}
diff --git a/vendor/hyper-util/src/common/lazy.rs b/vendor/hyper-util/src/common/lazy.rs
deleted file mode 100644
index 7ec09bbe..00000000
--- a/vendor/hyper-util/src/common/lazy.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-use pin_project_lite::pin_project;
-
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{self, Poll};
-
-pub(crate) trait Started: Future {
- fn started(&self) -> bool;
-}
-
-pub(crate) fn lazy<F, R>(func: F) -> Lazy<F, R>
-where
- F: FnOnce() -> R,
- R: Future + Unpin,
-{
- Lazy {
- inner: Inner::Init { func },
- }
-}
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub(crate) struct Lazy<F, R> {
- #[pin]
- inner: Inner<F, R>,
- }
-}
-
-pin_project! {
- #[project = InnerProj]
- #[project_replace = InnerProjReplace]
- enum Inner<F, R> {
- Init { func: F },
- Fut { #[pin] fut: R },
- Empty,
- }
-}
-
-impl<F, R> Started for Lazy<F, R>
-where
- F: FnOnce() -> R,
- R: Future,
-{
- fn started(&self) -> bool {
- match self.inner {
- Inner::Init { .. } => false,
- Inner::Fut { .. } | Inner::Empty => true,
- }
- }
-}
-
-impl<F, R> Future for Lazy<F, R>
-where
- F: FnOnce() -> R,
- R: Future,
-{
- type Output = R::Output;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
-
- if let InnerProj::Fut { fut } = this.inner.as_mut().project() {
- return fut.poll(cx);
- }
-
- match this.inner.as_mut().project_replace(Inner::Empty) {
- InnerProjReplace::Init { func } => {
- this.inner.set(Inner::Fut { fut: func() });
- if let InnerProj::Fut { fut } = this.inner.project() {
- return fut.poll(cx);
- }
- unreachable!()
- }
- _ => unreachable!("lazy state wrong"),
- }
- }
-}
diff --git a/vendor/hyper-util/src/common/mod.rs b/vendor/hyper-util/src/common/mod.rs
deleted file mode 100644
index b45cd0b2..00000000
--- a/vendor/hyper-util/src/common/mod.rs
+++ /dev/null
@@ -1,19 +0,0 @@
-#![allow(missing_docs)]
-
-pub(crate) mod exec;
-#[cfg(feature = "client")]
-mod lazy;
-pub(crate) mod rewind;
-#[cfg(feature = "client")]
-mod sync;
-pub(crate) mod timer;
-
-#[cfg(feature = "client")]
-pub(crate) use exec::Exec;
-
-#[cfg(feature = "client")]
-pub(crate) use lazy::{lazy, Started as Lazy};
-#[cfg(feature = "client")]
-pub(crate) use sync::SyncWrapper;
-
-pub(crate) mod future;
diff --git a/vendor/hyper-util/src/common/rewind.rs b/vendor/hyper-util/src/common/rewind.rs
deleted file mode 100644
index 760d7966..00000000
--- a/vendor/hyper-util/src/common/rewind.rs
+++ /dev/null
@@ -1,137 +0,0 @@
-use std::{cmp, io};
-
-use bytes::{Buf, Bytes};
-use hyper::rt::{Read, ReadBufCursor, Write};
-
-use std::{
- pin::Pin,
- task::{self, Poll},
-};
-
-/// Combine a buffer with an IO, rewinding reads to use the buffer.
-#[derive(Debug)]
-pub(crate) struct Rewind<T> {
- pub(crate) pre: Option<Bytes>,
- pub(crate) inner: T,
-}
-
-impl<T> Rewind<T> {
- #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
- pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self {
- Rewind {
- pre: Some(buf),
- inner: io,
- }
- }
-}
-
-impl<T> Read for Rewind<T>
-where
- T: Read + Unpin,
-{
- fn poll_read(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- mut buf: ReadBufCursor<'_>,
- ) -> Poll<io::Result<()>> {
- if let Some(mut prefix) = self.pre.take() {
- // If there are no remaining bytes, let the bytes get dropped.
- if !prefix.is_empty() {
- let copy_len = cmp::min(prefix.len(), buf.remaining());
- buf.put_slice(&prefix[..copy_len]);
- prefix.advance(copy_len);
- // Put back what's left
- if !prefix.is_empty() {
- self.pre = Some(prefix);
- }
-
- return Poll::Ready(Ok(()));
- }
- }
- Pin::new(&mut self.inner).poll_read(cx, buf)
- }
-}
-
-impl<T> Write for Rewind<T>
-where
- T: Write + Unpin,
-{
- fn poll_write(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &[u8],
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_write(cx, buf)
- }
-
- fn poll_write_vectored(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- bufs: &[io::IoSlice<'_>],
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_write_vectored(cx, bufs)
- }
-
- fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
- Pin::new(&mut self.inner).poll_flush(cx)
- }
-
- fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
- Pin::new(&mut self.inner).poll_shutdown(cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- self.inner.is_write_vectored()
- }
-}
-
-/*
-#[cfg(test)]
-mod tests {
- use super::Rewind;
- use bytes::Bytes;
- use tokio::io::AsyncReadExt;
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn partial_rewind() {
- let underlying = [104, 101, 108, 108, 111];
-
- let mock = tokio_test::io::Builder::new().read(&underlying).build();
-
- let mut stream = Rewind::new(mock);
-
- // Read off some bytes, ensure we filled o1
- let mut buf = [0; 2];
- stream.read_exact(&mut buf).await.expect("read1");
-
- // Rewind the stream so that it is as if we never read in the first place.
- stream.rewind(Bytes::copy_from_slice(&buf[..]));
-
- let mut buf = [0; 5];
- stream.read_exact(&mut buf).await.expect("read1");
-
- // At this point we should have read everything that was in the MockStream
- assert_eq!(&buf, &underlying);
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn full_rewind() {
- let underlying = [104, 101, 108, 108, 111];
-
- let mock = tokio_test::io::Builder::new().read(&underlying).build();
-
- let mut stream = Rewind::new(mock);
-
- let mut buf = [0; 5];
- stream.read_exact(&mut buf).await.expect("read1");
-
- // Rewind the stream so that it is as if we never read in the first place.
- stream.rewind(Bytes::copy_from_slice(&buf[..]));
-
- let mut buf = [0; 5];
- stream.read_exact(&mut buf).await.expect("read1");
- }
-}
-*/
diff --git a/vendor/hyper-util/src/common/sync.rs b/vendor/hyper-util/src/common/sync.rs
deleted file mode 100644
index 2755fd05..00000000
--- a/vendor/hyper-util/src/common/sync.rs
+++ /dev/null
@@ -1,67 +0,0 @@
-pub(crate) struct SyncWrapper<T>(T);
-
-impl<T> SyncWrapper<T> {
- /// Creates a new SyncWrapper containing the given value.
- ///
- /// # Examples
- ///
- /// ```ignore
- /// use hyper::common::sync_wrapper::SyncWrapper;
- ///
- /// let wrapped = SyncWrapper::new(42);
- /// ```
- pub(crate) fn new(value: T) -> Self {
- Self(value)
- }
-
- /// Acquires a reference to the protected value.
- ///
- /// This is safe because it requires an exclusive reference to the wrapper. Therefore this method
- /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which
- /// returns an error if another thread panicked while holding the lock. It is not recommended
- /// to send an exclusive reference to a potentially damaged value to another thread for further
- /// processing.
- ///
- /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut
- ///
- /// # Examples
- ///
- /// ```ignore
- /// use hyper::common::sync_wrapper::SyncWrapper;
- ///
- /// let mut wrapped = SyncWrapper::new(42);
- /// let value = wrapped.get_mut();
- /// *value = 0;
- /// assert_eq!(*wrapped.get_mut(), 0);
- /// ```
- pub(crate) fn get_mut(&mut self) -> &mut T {
- &mut self.0
- }
-
- /// Consumes this wrapper, returning the underlying data.
- ///
- /// This is safe because it requires ownership of the wrapper, aherefore this method will neither
- /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which
- /// returns an error if another thread panicked while holding the lock. It is not recommended
- /// to send an exclusive reference to a potentially damaged value to another thread for further
- /// processing.
- ///
- /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner
- ///
- /// # Examples
- ///
- /// ```ignore
- /// use hyper::common::sync_wrapper::SyncWrapper;
- ///
- /// let mut wrapped = SyncWrapper::new(42);
- /// assert_eq!(wrapped.into_inner(), 42);
- /// ```
- #[allow(dead_code)]
- pub(crate) fn into_inner(self) -> T {
- self.0
- }
-}
-
-// this is safe because the only operations permitted on this data structure require exclusive
-// access or ownership
-unsafe impl<T: Send> Sync for SyncWrapper<T> {}
diff --git a/vendor/hyper-util/src/common/timer.rs b/vendor/hyper-util/src/common/timer.rs
deleted file mode 100644
index 390be3b0..00000000
--- a/vendor/hyper-util/src/common/timer.rs
+++ /dev/null
@@ -1,38 +0,0 @@
-#![allow(dead_code)]
-
-use std::fmt;
-use std::pin::Pin;
-use std::sync::Arc;
-use std::time::Duration;
-use std::time::Instant;
-
-use hyper::rt::Sleep;
-
-#[derive(Clone)]
-pub(crate) struct Timer(Arc<dyn hyper::rt::Timer + Send + Sync>);
-
-// =====impl Timer=====
-impl Timer {
- pub(crate) fn new<T>(inner: T) -> Self
- where
- T: hyper::rt::Timer + Send + Sync + 'static,
- {
- Self(Arc::new(inner))
- }
-}
-
-impl fmt::Debug for Timer {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Timer").finish()
- }
-}
-
-impl hyper::rt::Timer for Timer {
- fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
- self.0.sleep(duration)
- }
-
- fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
- self.0.sleep_until(deadline)
- }
-}
diff --git a/vendor/hyper-util/src/error.rs b/vendor/hyper-util/src/error.rs
deleted file mode 100644
index d1894495..00000000
--- a/vendor/hyper-util/src/error.rs
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
-use std::error::Error;
-
-pub(crate) fn find<'a, E: Error + 'static>(top: &'a (dyn Error + 'static)) -> Option<&'a E> {
- let mut err = Some(top);
- while let Some(src) = err {
- if src.is::<E>() {
- return src.downcast_ref();
- }
- err = src.source();
- }
- None
-}
-*/
diff --git a/vendor/hyper-util/src/lib.rs b/vendor/hyper-util/src/lib.rs
deleted file mode 100644
index ac8f89b1..00000000
--- a/vendor/hyper-util/src/lib.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-#![deny(missing_docs)]
-#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))]
-
-//! Utilities for working with hyper.
-//!
-//! This crate is less-stable than [`hyper`](https://docs.rs/hyper). However,
-//! does respect Rust's semantic version regarding breaking changes.
-
-#[cfg(feature = "client")]
-pub mod client;
-mod common;
-pub mod rt;
-#[cfg(feature = "server")]
-pub mod server;
-#[cfg(any(feature = "service", feature = "client-legacy"))]
-pub mod service;
-
-mod error;
diff --git a/vendor/hyper-util/src/rt/io.rs b/vendor/hyper-util/src/rt/io.rs
deleted file mode 100644
index 888756f6..00000000
--- a/vendor/hyper-util/src/rt/io.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-use std::marker::Unpin;
-use std::pin::Pin;
-use std::task::Poll;
-
-use futures_core::ready;
-use hyper::rt::{Read, ReadBuf, Write};
-
-use crate::common::future::poll_fn;
-
-pub(crate) async fn read<T>(io: &mut T, buf: &mut [u8]) -> Result<usize, std::io::Error>
-where
- T: Read + Unpin,
-{
- poll_fn(move |cx| {
- let mut buf = ReadBuf::new(buf);
- ready!(Pin::new(&mut *io).poll_read(cx, buf.unfilled()))?;
- Poll::Ready(Ok(buf.filled().len()))
- })
- .await
-}
-
-pub(crate) async fn write_all<T>(io: &mut T, buf: &[u8]) -> Result<(), std::io::Error>
-where
- T: Write + Unpin,
-{
- let mut n = 0;
- poll_fn(move |cx| {
- while n < buf.len() {
- n += ready!(Pin::new(&mut *io).poll_write(cx, &buf[n..])?);
- }
- Poll::Ready(Ok(()))
- })
- .await
-}
diff --git a/vendor/hyper-util/src/rt/mod.rs b/vendor/hyper-util/src/rt/mod.rs
deleted file mode 100644
index 71363ccd..00000000
--- a/vendor/hyper-util/src/rt/mod.rs
+++ /dev/null
@@ -1,12 +0,0 @@
-//! Runtime utilities
-
-#[cfg(feature = "client-legacy")]
-mod io;
-#[cfg(feature = "client-legacy")]
-pub(crate) use self::io::{read, write_all};
-
-#[cfg(feature = "tokio")]
-pub mod tokio;
-
-#[cfg(feature = "tokio")]
-pub use self::tokio::{TokioExecutor, TokioIo, TokioTimer};
diff --git a/vendor/hyper-util/src/rt/tokio.rs b/vendor/hyper-util/src/rt/tokio.rs
deleted file mode 100644
index 46ffeba8..00000000
--- a/vendor/hyper-util/src/rt/tokio.rs
+++ /dev/null
@@ -1,339 +0,0 @@
-//! [`tokio`] runtime components integration for [`hyper`].
-//!
-//! [`hyper::rt`] exposes a set of traits to allow hyper to be agnostic to
-//! its underlying asynchronous runtime. This submodule provides glue for
-//! [`tokio`] users to bridge those types to [`hyper`]'s interfaces.
-//!
-//! # IO
-//!
-//! [`hyper`] abstracts over asynchronous readers and writers using [`Read`]
-//! and [`Write`], while [`tokio`] abstracts over this using [`AsyncRead`]
-//! and [`AsyncWrite`]. This submodule provides a collection of IO adaptors
-//! to bridge these two IO ecosystems together: [`TokioIo<I>`],
-//! [`WithHyperIo<I>`], and [`WithTokioIo<I>`].
-//!
-//! To compare and constrast these IO adaptors and to help explain which
-//! is the proper choice for your needs, here is a table showing which IO
-//! traits these implement, given two types `T` and `H` which implement
-//! Tokio's and Hyper's corresponding IO traits:
-//!
-//! | | [`AsyncRead`] | [`AsyncWrite`] | [`Read`] | [`Write`] |
-//! |--------------------|------------------|-------------------|--------------|--------------|
-//! | `T` | ✅ **true** | ✅ **true** | ❌ **false** | ❌ **false** |
-//! | `H` | ❌ **false** | ❌ **false** | ✅ **true** | ✅ **true** |
-//! | [`TokioIo<T>`] | ❌ **false** | ❌ **false** | ✅ **true** | ✅ **true** |
-//! | [`TokioIo<H>`] | ✅ **true** | ✅ **true** | ❌ **false** | ❌ **false** |
-//! | [`WithHyperIo<T>`] | ✅ **true** | ✅ **true** | ✅ **true** | ✅ **true** |
-//! | [`WithHyperIo<H>`] | ❌ **false** | ❌ **false** | ❌ **false** | ❌ **false** |
-//! | [`WithTokioIo<T>`] | ❌ **false** | ❌ **false** | ❌ **false** | ❌ **false** |
-//! | [`WithTokioIo<H>`] | ✅ **true** | ✅ **true** | ✅ **true** | ✅ **true** |
-//!
-//! For most situations, [`TokioIo<I>`] is the proper choice. This should be
-//! constructed, wrapping some underlying [`hyper`] or [`tokio`] IO, at the
-//! call-site of a function like [`hyper::client::conn::http1::handshake`].
-//!
-//! [`TokioIo<I>`] switches across these ecosystems, but notably does not
-//! preserve the existing IO trait implementations of its underlying IO. If
-//! one wishes to _extend_ IO with additional implementations,
-//! [`WithHyperIo<I>`] and [`WithTokioIo<I>`] are the correct choice.
-//!
-//! For example, a Tokio reader/writer can be wrapped in [`WithHyperIo<I>`].
-//! That will implement _both_ sets of IO traits. Conversely,
-//! [`WithTokioIo<I>`] will implement both sets of IO traits given a
-//! reader/writer that implements Hyper's [`Read`] and [`Write`].
-//!
-//! See [`tokio::io`] and ["_Asynchronous IO_"][tokio-async-docs] for more
-//! information.
-//!
-//! [`AsyncRead`]: tokio::io::AsyncRead
-//! [`AsyncWrite`]: tokio::io::AsyncWrite
-//! [`Read`]: hyper::rt::Read
-//! [`Write`]: hyper::rt::Write
-//! [tokio-async-docs]: https://docs.rs/tokio/latest/tokio/#asynchronous-io
-
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
- time::{Duration, Instant},
-};
-
-use hyper::rt::{Executor, Sleep, Timer};
-use pin_project_lite::pin_project;
-
-#[cfg(feature = "tracing")]
-use tracing::instrument::Instrument;
-
-pub use self::{with_hyper_io::WithHyperIo, with_tokio_io::WithTokioIo};
-
-mod with_hyper_io;
-mod with_tokio_io;
-
-/// Future executor that utilises `tokio` threads.
-#[non_exhaustive]
-#[derive(Default, Debug, Clone)]
-pub struct TokioExecutor {}
-
-pin_project! {
- /// A wrapper that implements Tokio's IO traits for an inner type that
- /// implements hyper's IO traits, or vice versa (implements hyper's IO
- /// traits for a type that implements Tokio's IO traits).
- #[derive(Debug)]
- pub struct TokioIo<T> {
- #[pin]
- inner: T,
- }
-}
-
-/// A Timer that uses the tokio runtime.
-#[non_exhaustive]
-#[derive(Default, Clone, Debug)]
-pub struct TokioTimer;
-
-// Use TokioSleep to get tokio::time::Sleep to implement Unpin.
-// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html
-pin_project! {
- #[derive(Debug)]
- struct TokioSleep {
- #[pin]
- inner: tokio::time::Sleep,
- }
-}
-
-// ===== impl TokioExecutor =====
-
-impl<Fut> Executor<Fut> for TokioExecutor
-where
- Fut: Future + Send + 'static,
- Fut::Output: Send + 'static,
-{
- fn execute(&self, fut: Fut) {
- #[cfg(feature = "tracing")]
- tokio::spawn(fut.in_current_span());
-
- #[cfg(not(feature = "tracing"))]
- tokio::spawn(fut);
- }
-}
-
-impl TokioExecutor {
- /// Create new executor that relies on [`tokio::spawn`] to execute futures.
- pub fn new() -> Self {
- Self {}
- }
-}
-
-// ==== impl TokioIo =====
-
-impl<T> TokioIo<T> {
- /// Wrap a type implementing Tokio's or hyper's IO traits.
- pub fn new(inner: T) -> Self {
- Self { inner }
- }
-
- /// Borrow the inner type.
- pub fn inner(&self) -> &T {
- &self.inner
- }
-
- /// Mut borrow the inner type.
- pub fn inner_mut(&mut self) -> &mut T {
- &mut self.inner
- }
-
- /// Consume this wrapper and get the inner type.
- pub fn into_inner(self) -> T {
- self.inner
- }
-}
-
-impl<T> hyper::rt::Read for TokioIo<T>
-where
- T: tokio::io::AsyncRead,
-{
- fn poll_read(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- mut buf: hyper::rt::ReadBufCursor<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- let n = unsafe {
- let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
- match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) {
- Poll::Ready(Ok(())) => tbuf.filled().len(),
- other => return other,
- }
- };
-
- unsafe {
- buf.advance(n);
- }
- Poll::Ready(Ok(()))
- }
-}
-
-impl<T> hyper::rt::Write for TokioIo<T>
-where
- T: tokio::io::AsyncWrite,
-{
- fn poll_write(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf)
- }
-
- fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
- tokio::io::AsyncWrite::poll_flush(self.project().inner, cx)
- }
-
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- tokio::io::AsyncWrite::is_write_vectored(&self.inner)
- }
-
- fn poll_write_vectored(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs)
- }
-}
-
-impl<T> tokio::io::AsyncRead for TokioIo<T>
-where
- T: hyper::rt::Read,
-{
- fn poll_read(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- tbuf: &mut tokio::io::ReadBuf<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- //let init = tbuf.initialized().len();
- let filled = tbuf.filled().len();
- let sub_filled = unsafe {
- let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut());
-
- match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) {
- Poll::Ready(Ok(())) => buf.filled().len(),
- other => return other,
- }
- };
-
- let n_filled = filled + sub_filled;
- // At least sub_filled bytes had to have been initialized.
- let n_init = sub_filled;
- unsafe {
- tbuf.assume_init(n_init);
- tbuf.set_filled(n_filled);
- }
-
- Poll::Ready(Ok(()))
- }
-}
-
-impl<T> tokio::io::AsyncWrite for TokioIo<T>
-where
- T: hyper::rt::Write,
-{
- fn poll_write(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- hyper::rt::Write::poll_write(self.project().inner, cx, buf)
- }
-
- fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Write::poll_flush(self.project().inner, cx)
- }
-
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Write::poll_shutdown(self.project().inner, cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- hyper::rt::Write::is_write_vectored(&self.inner)
- }
-
- fn poll_write_vectored(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs)
- }
-}
-
-// ==== impl TokioTimer =====
-
-impl Timer for TokioTimer {
- fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
- Box::pin(TokioSleep {
- inner: tokio::time::sleep(duration),
- })
- }
-
- fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
- Box::pin(TokioSleep {
- inner: tokio::time::sleep_until(deadline.into()),
- })
- }
-
- fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
- if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
- sleep.reset(new_deadline)
- }
- }
-}
-
-impl TokioTimer {
- /// Create a new TokioTimer
- pub fn new() -> Self {
- Self {}
- }
-}
-
-impl Future for TokioSleep {
- type Output = ();
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.project().inner.poll(cx)
- }
-}
-
-impl Sleep for TokioSleep {}
-
-impl TokioSleep {
- fn reset(self: Pin<&mut Self>, deadline: Instant) {
- self.project().inner.as_mut().reset(deadline.into());
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::rt::TokioExecutor;
- use hyper::rt::Executor;
- use tokio::sync::oneshot;
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn simple_execute() -> Result<(), Box<dyn std::error::Error>> {
- let (tx, rx) = oneshot::channel();
- let executor = TokioExecutor::new();
- executor.execute(async move {
- tx.send(()).unwrap();
- });
- rx.await.map_err(Into::into)
- }
-}
diff --git a/vendor/hyper-util/src/rt/tokio/with_hyper_io.rs b/vendor/hyper-util/src/rt/tokio/with_hyper_io.rs
deleted file mode 100644
index 9c5072d4..00000000
--- a/vendor/hyper-util/src/rt/tokio/with_hyper_io.rs
+++ /dev/null
@@ -1,170 +0,0 @@
-use pin_project_lite::pin_project;
-use std::{
- pin::Pin,
- task::{Context, Poll},
-};
-
-pin_project! {
- /// Extends an underlying [`tokio`] I/O with [`hyper`] I/O implementations.
- ///
- /// This implements [`Read`] and [`Write`] given an inner type that implements [`AsyncRead`]
- /// and [`AsyncWrite`], respectively.
- #[derive(Debug)]
- pub struct WithHyperIo<I> {
- #[pin]
- inner: I,
- }
-}
-
-// ==== impl WithHyperIo =====
-
-impl<I> WithHyperIo<I> {
- /// Wraps the inner I/O in an [`WithHyperIo<I>`]
- pub fn new(inner: I) -> Self {
- Self { inner }
- }
-
- /// Returns a reference to the inner type.
- pub fn inner(&self) -> &I {
- &self.inner
- }
-
- /// Returns a mutable reference to the inner type.
- pub fn inner_mut(&mut self) -> &mut I {
- &mut self.inner
- }
-
- /// Consumes this wrapper and returns the inner type.
- pub fn into_inner(self) -> I {
- self.inner
- }
-}
-
-/// [`WithHyperIo<I>`] is [`Read`] if `I` is [`AsyncRead`].
-///
-/// [`AsyncRead`]: tokio::io::AsyncRead
-/// [`Read`]: hyper::rt::Read
-impl<I> hyper::rt::Read for WithHyperIo<I>
-where
- I: tokio::io::AsyncRead,
-{
- fn poll_read(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- mut buf: hyper::rt::ReadBufCursor<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- let n = unsafe {
- let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
- match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) {
- Poll::Ready(Ok(())) => tbuf.filled().len(),
- other => return other,
- }
- };
-
- unsafe {
- buf.advance(n);
- }
- Poll::Ready(Ok(()))
- }
-}
-
-/// [`WithHyperIo<I>`] is [`Write`] if `I` is [`AsyncWrite`].
-///
-/// [`AsyncWrite`]: tokio::io::AsyncWrite
-/// [`Write`]: hyper::rt::Write
-impl<I> hyper::rt::Write for WithHyperIo<I>
-where
- I: tokio::io::AsyncWrite,
-{
- fn poll_write(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf)
- }
-
- fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
- tokio::io::AsyncWrite::poll_flush(self.project().inner, cx)
- }
-
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- tokio::io::AsyncWrite::is_write_vectored(&self.inner)
- }
-
- fn poll_write_vectored(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs)
- }
-}
-
-/// [`WithHyperIo<I>`] exposes its inner `I`'s [`AsyncRead`] implementation.
-///
-/// [`AsyncRead`]: tokio::io::AsyncRead
-impl<I> tokio::io::AsyncRead for WithHyperIo<I>
-where
- I: tokio::io::AsyncRead,
-{
- #[inline]
- fn poll_read(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &mut tokio::io::ReadBuf<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- self.project().inner.poll_read(cx, buf)
- }
-}
-
-/// [`WithHyperIo<I>`] exposes its inner `I`'s [`AsyncWrite`] implementation.
-///
-/// [`AsyncWrite`]: tokio::io::AsyncWrite
-impl<I> tokio::io::AsyncWrite for WithHyperIo<I>
-where
- I: tokio::io::AsyncWrite,
-{
- #[inline]
- fn poll_write(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- self.project().inner.poll_write(cx, buf)
- }
-
- #[inline]
- fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
- self.project().inner.poll_flush(cx)
- }
-
- #[inline]
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- self.project().inner.poll_shutdown(cx)
- }
-
- #[inline]
- fn is_write_vectored(&self) -> bool {
- self.inner.is_write_vectored()
- }
-
- #[inline]
- fn poll_write_vectored(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- self.project().inner.poll_write_vectored(cx, bufs)
- }
-}
diff --git a/vendor/hyper-util/src/rt/tokio/with_tokio_io.rs b/vendor/hyper-util/src/rt/tokio/with_tokio_io.rs
deleted file mode 100644
index 223e0ed3..00000000
--- a/vendor/hyper-util/src/rt/tokio/with_tokio_io.rs
+++ /dev/null
@@ -1,178 +0,0 @@
-use pin_project_lite::pin_project;
-use std::{
- pin::Pin,
- task::{Context, Poll},
-};
-
-pin_project! {
- /// Extends an underlying [`hyper`] I/O with [`tokio`] I/O implementations.
- ///
- /// This implements [`AsyncRead`] and [`AsyncWrite`] given an inner type that implements
- /// [`Read`] and [`Write`], respectively.
- #[derive(Debug)]
- pub struct WithTokioIo<I> {
- #[pin]
- inner: I,
- }
-}
-
-// ==== impl WithTokioIo =====
-
-/// [`WithTokioIo<I>`] is [`AsyncRead`] if `I` is [`Read`].
-///
-/// [`AsyncRead`]: tokio::io::AsyncRead
-/// [`Read`]: hyper::rt::Read
-impl<I> tokio::io::AsyncRead for WithTokioIo<I>
-where
- I: hyper::rt::Read,
-{
- fn poll_read(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- tbuf: &mut tokio::io::ReadBuf<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- //let init = tbuf.initialized().len();
- let filled = tbuf.filled().len();
- let sub_filled = unsafe {
- let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut());
-
- match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) {
- Poll::Ready(Ok(())) => buf.filled().len(),
- other => return other,
- }
- };
-
- let n_filled = filled + sub_filled;
- // At least sub_filled bytes had to have been initialized.
- let n_init = sub_filled;
- unsafe {
- tbuf.assume_init(n_init);
- tbuf.set_filled(n_filled);
- }
-
- Poll::Ready(Ok(()))
- }
-}
-
-/// [`WithTokioIo<I>`] is [`AsyncWrite`] if `I` is [`Write`].
-///
-/// [`AsyncWrite`]: tokio::io::AsyncWrite
-/// [`Write`]: hyper::rt::Write
-impl<I> tokio::io::AsyncWrite for WithTokioIo<I>
-where
- I: hyper::rt::Write,
-{
- fn poll_write(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- hyper::rt::Write::poll_write(self.project().inner, cx, buf)
- }
-
- fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Write::poll_flush(self.project().inner, cx)
- }
-
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Write::poll_shutdown(self.project().inner, cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- hyper::rt::Write::is_write_vectored(&self.inner)
- }
-
- fn poll_write_vectored(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs)
- }
-}
-
-/// [`WithTokioIo<I>`] exposes its inner `I`'s [`Write`] implementation.
-///
-/// [`Write`]: hyper::rt::Write
-impl<I> hyper::rt::Write for WithTokioIo<I>
-where
- I: hyper::rt::Write,
-{
- #[inline]
- fn poll_write(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- self.project().inner.poll_write(cx, buf)
- }
-
- #[inline]
- fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
- self.project().inner.poll_flush(cx)
- }
-
- #[inline]
- fn poll_shutdown(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- self.project().inner.poll_shutdown(cx)
- }
-
- #[inline]
- fn is_write_vectored(&self) -> bool {
- self.inner.is_write_vectored()
- }
-
- #[inline]
- fn poll_write_vectored(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- self.project().inner.poll_write_vectored(cx, bufs)
- }
-}
-
-impl<I> WithTokioIo<I> {
- /// Wraps the inner I/O in an [`WithTokioIo<I>`]
- pub fn new(inner: I) -> Self {
- Self { inner }
- }
-
- /// Returns a reference to the inner type.
- pub fn inner(&self) -> &I {
- &self.inner
- }
-
- /// Returns a mutable reference to the inner type.
- pub fn inner_mut(&mut self) -> &mut I {
- &mut self.inner
- }
-
- /// Consumes this wrapper and returns the inner type.
- pub fn into_inner(self) -> I {
- self.inner
- }
-}
-
-/// [`WithTokioIo<I>`] exposes its inner `I`'s [`Read`] implementation.
-///
-/// [`Read`]: hyper::rt::Read
-impl<I> hyper::rt::Read for WithTokioIo<I>
-where
- I: hyper::rt::Read,
-{
- #[inline]
- fn poll_read(
- self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: hyper::rt::ReadBufCursor<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- self.project().inner.poll_read(cx, buf)
- }
-}
diff --git a/vendor/hyper-util/src/server/conn/auto/mod.rs b/vendor/hyper-util/src/server/conn/auto/mod.rs
deleted file mode 100644
index b2fc6556..00000000
--- a/vendor/hyper-util/src/server/conn/auto/mod.rs
+++ /dev/null
@@ -1,1304 +0,0 @@
-//! Http1 or Http2 connection.
-
-pub mod upgrade;
-
-use hyper::service::HttpService;
-use std::future::Future;
-use std::marker::PhantomPinned;
-use std::mem::MaybeUninit;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use std::{error::Error as StdError, io, time::Duration};
-
-use bytes::Bytes;
-use futures_core::ready;
-use http::{Request, Response};
-use http_body::Body;
-use hyper::{
- body::Incoming,
- rt::{Read, ReadBuf, Timer, Write},
- service::Service,
-};
-
-#[cfg(feature = "http1")]
-use hyper::server::conn::http1;
-
-#[cfg(feature = "http2")]
-use hyper::{rt::bounds::Http2ServerConnExec, server::conn::http2};
-
-#[cfg(any(not(feature = "http2"), not(feature = "http1")))]
-use std::marker::PhantomData;
-
-use pin_project_lite::pin_project;
-
-use crate::common::rewind::Rewind;
-
-type Error = Box<dyn std::error::Error + Send + Sync>;
-
-type Result<T> = std::result::Result<T, Error>;
-
-const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
-
-/// Exactly equivalent to [`Http2ServerConnExec`].
-#[cfg(feature = "http2")]
-pub trait HttpServerConnExec<A, B: Body>: Http2ServerConnExec<A, B> {}
-
-#[cfg(feature = "http2")]
-impl<A, B: Body, T: Http2ServerConnExec<A, B>> HttpServerConnExec<A, B> for T {}
-
-/// Exactly equivalent to [`Http2ServerConnExec`].
-#[cfg(not(feature = "http2"))]
-pub trait HttpServerConnExec<A, B: Body> {}
-
-#[cfg(not(feature = "http2"))]
-impl<A, B: Body, T> HttpServerConnExec<A, B> for T {}
-
-/// Http1 or Http2 connection builder.
-#[derive(Clone, Debug)]
-pub struct Builder<E> {
- #[cfg(feature = "http1")]
- http1: http1::Builder,
- #[cfg(feature = "http2")]
- http2: http2::Builder<E>,
- #[cfg(any(feature = "http1", feature = "http2"))]
- version: Option<Version>,
- #[cfg(not(feature = "http2"))]
- _executor: E,
-}
-
-impl<E: Default> Default for Builder<E> {
- fn default() -> Self {
- Self::new(E::default())
- }
-}
-
-impl<E> Builder<E> {
- /// Create a new auto connection builder.
- ///
- /// `executor` parameter should be a type that implements
- /// [`Executor`](hyper::rt::Executor) trait.
- ///
- /// # Example
- ///
- /// ```
- /// use hyper_util::{
- /// rt::TokioExecutor,
- /// server::conn::auto,
- /// };
- ///
- /// auto::Builder::new(TokioExecutor::new());
- /// ```
- pub fn new(executor: E) -> Self {
- Self {
- #[cfg(feature = "http1")]
- http1: http1::Builder::new(),
- #[cfg(feature = "http2")]
- http2: http2::Builder::new(executor),
- #[cfg(any(feature = "http1", feature = "http2"))]
- version: None,
- #[cfg(not(feature = "http2"))]
- _executor: executor,
- }
- }
-
- /// Http1 configuration.
- #[cfg(feature = "http1")]
- pub fn http1(&mut self) -> Http1Builder<'_, E> {
- Http1Builder { inner: self }
- }
-
- /// Http2 configuration.
- #[cfg(feature = "http2")]
- pub fn http2(&mut self) -> Http2Builder<'_, E> {
- Http2Builder { inner: self }
- }
-
- /// Only accepts HTTP/2
- ///
- /// Does not do anything if used with [`serve_connection_with_upgrades`]
- ///
- /// [`serve_connection_with_upgrades`]: Builder::serve_connection_with_upgrades
- #[cfg(feature = "http2")]
- pub fn http2_only(mut self) -> Self {
- assert!(self.version.is_none());
- self.version = Some(Version::H2);
- self
- }
-
- /// Only accepts HTTP/1
- ///
- /// Does not do anything if used with [`serve_connection_with_upgrades`]
- ///
- /// [`serve_connection_with_upgrades`]: Builder::serve_connection_with_upgrades
- #[cfg(feature = "http1")]
- pub fn http1_only(mut self) -> Self {
- assert!(self.version.is_none());
- self.version = Some(Version::H1);
- self
- }
-
- /// Returns `true` if this builder can serve an HTTP/1.1-based connection.
- pub fn is_http1_available(&self) -> bool {
- match self.version {
- #[cfg(feature = "http1")]
- Some(Version::H1) => true,
- #[cfg(feature = "http2")]
- Some(Version::H2) => false,
- #[cfg(any(feature = "http1", feature = "http2"))]
- _ => true,
- }
- }
-
- /// Returns `true` if this builder can serve an HTTP/2-based connection.
- pub fn is_http2_available(&self) -> bool {
- match self.version {
- #[cfg(feature = "http1")]
- Some(Version::H1) => false,
- #[cfg(feature = "http2")]
- Some(Version::H2) => true,
- #[cfg(any(feature = "http1", feature = "http2"))]
- _ => true,
- }
- }
-
- /// Bind a connection together with a [`Service`].
- pub fn serve_connection<I, S, B>(&self, io: I, service: S) -> Connection<'_, I, S, E>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + 'static,
- E: HttpServerConnExec<S::Future, B>,
- {
- let state = match self.version {
- #[cfg(feature = "http1")]
- Some(Version::H1) => {
- let io = Rewind::new_buffered(io, Bytes::new());
- let conn = self.http1.serve_connection(io, service);
- ConnState::H1 { conn }
- }
- #[cfg(feature = "http2")]
- Some(Version::H2) => {
- let io = Rewind::new_buffered(io, Bytes::new());
- let conn = self.http2.serve_connection(io, service);
- ConnState::H2 { conn }
- }
- #[cfg(any(feature = "http1", feature = "http2"))]
- _ => ConnState::ReadVersion {
- read_version: read_version(io),
- builder: Cow::Borrowed(self),
- service: Some(service),
- },
- };
-
- Connection { state }
- }
-
- /// Bind a connection together with a [`Service`], with the ability to
- /// handle HTTP upgrades. This requires that the IO object implements
- /// `Send`.
- ///
- /// Note that if you ever want to use [`hyper::upgrade::Upgraded::downcast`]
- /// with this crate, you'll need to use [`hyper_util::server::conn::auto::upgrade::downcast`]
- /// instead. See the documentation of the latter to understand why.
- ///
- /// [`hyper_util::server::conn::auto::upgrade::downcast`]: crate::server::conn::auto::upgrade::downcast
- pub fn serve_connection_with_upgrades<I, S, B>(
- &self,
- io: I,
- service: S,
- ) -> UpgradeableConnection<'_, I, S, E>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + Send + 'static,
- E: HttpServerConnExec<S::Future, B>,
- {
- UpgradeableConnection {
- state: UpgradeableConnState::ReadVersion {
- read_version: read_version(io),
- builder: Cow::Borrowed(self),
- service: Some(service),
- },
- }
- }
-}
-
-#[derive(Copy, Clone, Debug)]
-enum Version {
- H1,
- H2,
-}
-
-impl Version {
- #[must_use]
- #[cfg(any(not(feature = "http2"), not(feature = "http1")))]
- pub fn unsupported(self) -> Error {
- match self {
- Version::H1 => Error::from("HTTP/1 is not supported"),
- Version::H2 => Error::from("HTTP/2 is not supported"),
- }
- }
-}
-
-fn read_version<I>(io: I) -> ReadVersion<I>
-where
- I: Read + Unpin,
-{
- ReadVersion {
- io: Some(io),
- buf: [MaybeUninit::uninit(); 24],
- filled: 0,
- version: Version::H2,
- cancelled: false,
- _pin: PhantomPinned,
- }
-}
-
-pin_project! {
- struct ReadVersion<I> {
- io: Option<I>,
- buf: [MaybeUninit<u8>; 24],
- // the amount of `buf` thats been filled
- filled: usize,
- version: Version,
- cancelled: bool,
- // Make this future `!Unpin` for compatibility with async trait methods.
- #[pin]
- _pin: PhantomPinned,
- }
-}
-
-impl<I> ReadVersion<I> {
- pub fn cancel(self: Pin<&mut Self>) {
- *self.project().cancelled = true;
- }
-}
-
-impl<I> Future for ReadVersion<I>
-where
- I: Read + Unpin,
-{
- type Output = io::Result<(Version, Rewind<I>)>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- let this = self.project();
- if *this.cancelled {
- return Poll::Ready(Err(io::Error::new(io::ErrorKind::Interrupted, "Cancelled")));
- }
-
- let mut buf = ReadBuf::uninit(&mut *this.buf);
- // SAFETY: `this.filled` tracks how many bytes have been read (and thus initialized) and
- // we're only advancing by that many.
- unsafe {
- buf.unfilled().advance(*this.filled);
- };
-
- // We start as H2 and switch to H1 as soon as we don't have the preface.
- while buf.filled().len() < H2_PREFACE.len() {
- let len = buf.filled().len();
- ready!(Pin::new(this.io.as_mut().unwrap()).poll_read(cx, buf.unfilled()))?;
- *this.filled = buf.filled().len();
-
- // We starts as H2 and switch to H1 when we don't get the preface.
- if buf.filled().len() == len
- || buf.filled()[len..] != H2_PREFACE[len..buf.filled().len()]
- {
- *this.version = Version::H1;
- break;
- }
- }
-
- let io = this.io.take().unwrap();
- let buf = buf.filled().to_vec();
- Poll::Ready(Ok((
- *this.version,
- Rewind::new_buffered(io, Bytes::from(buf)),
- )))
- }
-}
-
-pin_project! {
- /// A [`Future`](core::future::Future) representing an HTTP/1 connection, returned from
- /// [`Builder::serve_connection`](struct.Builder.html#method.serve_connection).
- ///
- /// To drive HTTP on this connection this future **must be polled**, typically with
- /// `.await`. If it isn't polled, no progress will be made on this connection.
- #[must_use = "futures do nothing unless polled"]
- pub struct Connection<'a, I, S, E>
- where
- S: HttpService<Incoming>,
- {
- #[pin]
- state: ConnState<'a, I, S, E>,
- }
-}
-
-// A custom COW, since the libstd is has ToOwned bounds that are too eager.
-enum Cow<'a, T> {
- Borrowed(&'a T),
- Owned(T),
-}
-
-impl<T> std::ops::Deref for Cow<'_, T> {
- type Target = T;
- fn deref(&self) -> &T {
- match self {
- Cow::Borrowed(t) => &*t,
- Cow::Owned(ref t) => t,
- }
- }
-}
-
-#[cfg(feature = "http1")]
-type Http1Connection<I, S> = hyper::server::conn::http1::Connection<Rewind<I>, S>;
-
-#[cfg(not(feature = "http1"))]
-type Http1Connection<I, S> = (PhantomData<I>, PhantomData<S>);
-
-#[cfg(feature = "http2")]
-type Http2Connection<I, S, E> = hyper::server::conn::http2::Connection<Rewind<I>, S, E>;
-
-#[cfg(not(feature = "http2"))]
-type Http2Connection<I, S, E> = (PhantomData<I>, PhantomData<S>, PhantomData<E>);
-
-pin_project! {
- #[project = ConnStateProj]
- enum ConnState<'a, I, S, E>
- where
- S: HttpService<Incoming>,
- {
- ReadVersion {
- #[pin]
- read_version: ReadVersion<I>,
- builder: Cow<'a, Builder<E>>,
- service: Option<S>,
- },
- H1 {
- #[pin]
- conn: Http1Connection<I, S>,
- },
- H2 {
- #[pin]
- conn: Http2Connection<I, S, E>,
- },
- }
-}
-
-impl<I, S, E, B> Connection<'_, I, S, E>
-where
- S: HttpService<Incoming, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: HttpServerConnExec<S::Future, B>,
-{
- /// Start a graceful shutdown process for this connection.
- ///
- /// This `Connection` should continue to be polled until shutdown can finish.
- ///
- /// # Note
- ///
- /// This should only be called while the `Connection` future is still pending. If called after
- /// `Connection::poll` has resolved, this does nothing.
- pub fn graceful_shutdown(self: Pin<&mut Self>) {
- match self.project().state.project() {
- ConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(),
- #[cfg(feature = "http1")]
- ConnStateProj::H1 { conn } => conn.graceful_shutdown(),
- #[cfg(feature = "http2")]
- ConnStateProj::H2 { conn } => conn.graceful_shutdown(),
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => unreachable!(),
- }
- }
-
- /// Make this Connection static, instead of borrowing from Builder.
- pub fn into_owned(self) -> Connection<'static, I, S, E>
- where
- Builder<E>: Clone,
- {
- Connection {
- state: match self.state {
- ConnState::ReadVersion {
- read_version,
- builder,
- service,
- } => ConnState::ReadVersion {
- read_version,
- service,
- builder: Cow::Owned(builder.clone()),
- },
- #[cfg(feature = "http1")]
- ConnState::H1 { conn } => ConnState::H1 { conn },
- #[cfg(feature = "http2")]
- ConnState::H2 { conn } => ConnState::H2 { conn },
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => unreachable!(),
- },
- }
- }
-}
-
-impl<I, S, E, B> Future for Connection<'_, I, S, E>
-where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + 'static,
- E: HttpServerConnExec<S::Future, B>,
-{
- type Output = Result<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- loop {
- let mut this = self.as_mut().project();
-
- match this.state.as_mut().project() {
- ConnStateProj::ReadVersion {
- read_version,
- builder,
- service,
- } => {
- let (version, io) = ready!(read_version.poll(cx))?;
- let service = service.take().unwrap();
- match version {
- #[cfg(feature = "http1")]
- Version::H1 => {
- let conn = builder.http1.serve_connection(io, service);
- this.state.set(ConnState::H1 { conn });
- }
- #[cfg(feature = "http2")]
- Version::H2 => {
- let conn = builder.http2.serve_connection(io, service);
- this.state.set(ConnState::H2 { conn });
- }
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => return Poll::Ready(Err(version.unsupported())),
- }
- }
- #[cfg(feature = "http1")]
- ConnStateProj::H1 { conn } => {
- return conn.poll(cx).map_err(Into::into);
- }
- #[cfg(feature = "http2")]
- ConnStateProj::H2 { conn } => {
- return conn.poll(cx).map_err(Into::into);
- }
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => unreachable!(),
- }
- }
- }
-}
-
-pin_project! {
- /// An upgradable [`Connection`], returned by
- /// [`Builder::serve_upgradable_connection`](struct.Builder.html#method.serve_connection_with_upgrades).
- ///
- /// To drive HTTP on this connection this future **must be polled**, typically with
- /// `.await`. If it isn't polled, no progress will be made on this connection.
- #[must_use = "futures do nothing unless polled"]
- pub struct UpgradeableConnection<'a, I, S, E>
- where
- S: HttpService<Incoming>,
- {
- #[pin]
- state: UpgradeableConnState<'a, I, S, E>,
- }
-}
-
-#[cfg(feature = "http1")]
-type Http1UpgradeableConnection<I, S> = hyper::server::conn::http1::UpgradeableConnection<I, S>;
-
-#[cfg(not(feature = "http1"))]
-type Http1UpgradeableConnection<I, S> = (PhantomData<I>, PhantomData<S>);
-
-pin_project! {
- #[project = UpgradeableConnStateProj]
- enum UpgradeableConnState<'a, I, S, E>
- where
- S: HttpService<Incoming>,
- {
- ReadVersion {
- #[pin]
- read_version: ReadVersion<I>,
- builder: Cow<'a, Builder<E>>,
- service: Option<S>,
- },
- H1 {
- #[pin]
- conn: Http1UpgradeableConnection<Rewind<I>, S>,
- },
- H2 {
- #[pin]
- conn: Http2Connection<I, S, E>,
- },
- }
-}
-
-impl<I, S, E, B> UpgradeableConnection<'_, I, S, E>
-where
- S: HttpService<Incoming, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: HttpServerConnExec<S::Future, B>,
-{
- /// Start a graceful shutdown process for this connection.
- ///
- /// This `UpgradeableConnection` should continue to be polled until shutdown can finish.
- ///
- /// # Note
- ///
- /// This should only be called while the `Connection` future is still nothing. pending. If
- /// called after `UpgradeableConnection::poll` has resolved, this does nothing.
- pub fn graceful_shutdown(self: Pin<&mut Self>) {
- match self.project().state.project() {
- UpgradeableConnStateProj::ReadVersion { read_version, .. } => read_version.cancel(),
- #[cfg(feature = "http1")]
- UpgradeableConnStateProj::H1 { conn } => conn.graceful_shutdown(),
- #[cfg(feature = "http2")]
- UpgradeableConnStateProj::H2 { conn } => conn.graceful_shutdown(),
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => unreachable!(),
- }
- }
-
- /// Make this Connection static, instead of borrowing from Builder.
- pub fn into_owned(self) -> UpgradeableConnection<'static, I, S, E>
- where
- Builder<E>: Clone,
- {
- UpgradeableConnection {
- state: match self.state {
- UpgradeableConnState::ReadVersion {
- read_version,
- builder,
- service,
- } => UpgradeableConnState::ReadVersion {
- read_version,
- service,
- builder: Cow::Owned(builder.clone()),
- },
- #[cfg(feature = "http1")]
- UpgradeableConnState::H1 { conn } => UpgradeableConnState::H1 { conn },
- #[cfg(feature = "http2")]
- UpgradeableConnState::H2 { conn } => UpgradeableConnState::H2 { conn },
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => unreachable!(),
- },
- }
- }
-}
-
-impl<I, S, E, B> Future for UpgradeableConnection<'_, I, S, E>
-where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + Send + 'static,
- E: HttpServerConnExec<S::Future, B>,
-{
- type Output = Result<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- loop {
- let mut this = self.as_mut().project();
-
- match this.state.as_mut().project() {
- UpgradeableConnStateProj::ReadVersion {
- read_version,
- builder,
- service,
- } => {
- let (version, io) = ready!(read_version.poll(cx))?;
- let service = service.take().unwrap();
- match version {
- #[cfg(feature = "http1")]
- Version::H1 => {
- let conn = builder.http1.serve_connection(io, service).with_upgrades();
- this.state.set(UpgradeableConnState::H1 { conn });
- }
- #[cfg(feature = "http2")]
- Version::H2 => {
- let conn = builder.http2.serve_connection(io, service);
- this.state.set(UpgradeableConnState::H2 { conn });
- }
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => return Poll::Ready(Err(version.unsupported())),
- }
- }
- #[cfg(feature = "http1")]
- UpgradeableConnStateProj::H1 { conn } => {
- return conn.poll(cx).map_err(Into::into);
- }
- #[cfg(feature = "http2")]
- UpgradeableConnStateProj::H2 { conn } => {
- return conn.poll(cx).map_err(Into::into);
- }
- #[cfg(any(not(feature = "http1"), not(feature = "http2")))]
- _ => unreachable!(),
- }
- }
- }
-}
-
-/// Http1 part of builder.
-#[cfg(feature = "http1")]
-pub struct Http1Builder<'a, E> {
- inner: &'a mut Builder<E>,
-}
-
-#[cfg(feature = "http1")]
-impl<E> Http1Builder<'_, E> {
- /// Http2 configuration.
- #[cfg(feature = "http2")]
- pub fn http2(&mut self) -> Http2Builder<'_, E> {
- Http2Builder { inner: self.inner }
- }
-
- /// Set whether the `date` header should be included in HTTP responses.
- ///
- /// Note that including the `date` header is recommended by RFC 7231.
- ///
- /// Default is true.
- pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self {
- self.inner.http1.auto_date_header(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections should support half-closures.
- ///
- /// Clients can chose to shutdown their write-side while waiting
- /// for the server to respond. Setting this to `true` will
- /// prevent closing the connection immediately if `read`
- /// detects an EOF in the middle of a request.
- ///
- /// Default is `false`.
- pub fn half_close(&mut self, val: bool) -> &mut Self {
- self.inner.http1.half_close(val);
- self
- }
-
- /// Enables or disables HTTP/1 keep-alive.
- ///
- /// Default is true.
- pub fn keep_alive(&mut self, val: bool) -> &mut Self {
- self.inner.http1.keep_alive(val);
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self {
- self.inner.http1.title_case_headers(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections will silently ignored malformed header lines.
- ///
- /// If this is enabled and a header line does not start with a valid header
- /// name, or does not include a colon at all, the line will be silently ignored
- /// and no error will be reported.
- ///
- /// Default is false.
- pub fn ignore_invalid_headers(&mut self, enabled: bool) -> &mut Self {
- self.inner.http1.ignore_invalid_headers(enabled);
- self
- }
-
- /// Set whether to support preserving original header cases.
- ///
- /// Currently, this will record the original cases received, and store them
- /// in a private extension on the `Request`. It will also look for and use
- /// such an extension in any provided `Response`.
- ///
- /// Since the relevant extension is still private, there is no way to
- /// interact with the original cases. The only effect this can have now is
- /// to forward the cases in a proxy-like fashion.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self {
- self.inner.http1.preserve_header_case(enabled);
- self
- }
-
- /// Set the maximum number of headers.
- ///
- /// When a request is received, the parser will reserve a buffer to store headers for optimal
- /// performance.
- ///
- /// If server receives more headers than the buffer size, it responds to the client with
- /// "431 Request Header Fields Too Large".
- ///
- /// The headers is allocated on the stack by default, which has higher performance. After
- /// setting this value, headers will be allocated in heap memory, that is, heap memory
- /// allocation will occur for each request, and there will be a performance drop of about 5%.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is 100.
- pub fn max_headers(&mut self, val: usize) -> &mut Self {
- self.inner.http1.max_headers(val);
- self
- }
-
- /// Set a timeout for reading client request headers. If a client does not
- /// transmit the entire header within this time, the connection is closed.
- ///
- /// Requires a [`Timer`] set by [`Http1Builder::timer`] to take effect. Panics if `header_read_timeout` is configured
- /// without a [`Timer`].
- ///
- /// Pass `None` to disable.
- ///
- /// Default is currently 30 seconds, but do not depend on that.
- pub fn header_read_timeout(&mut self, read_timeout: impl Into<Option<Duration>>) -> &mut Self {
- self.inner.http1.header_read_timeout(read_timeout);
- self
- }
-
- /// Set whether HTTP/1 connections should try to use vectored writes,
- /// or always flatten into a single buffer.
- ///
- /// Note that setting this to false may mean more copies of body data,
- /// but may also improve performance when an IO transport doesn't
- /// support vectored writes well, such as most TLS implementations.
- ///
- /// Setting this to true will force hyper to use queued strategy
- /// which may eliminate unnecessary cloning on some TLS backends
- ///
- /// Default is `auto`. In this mode hyper will try to guess which
- /// mode to use
- pub fn writev(&mut self, val: bool) -> &mut Self {
- self.inner.http1.writev(val);
- self
- }
-
- /// Set the maximum buffer size for the connection.
- ///
- /// Default is ~400kb.
- ///
- /// # Panics
- ///
- /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
- self.inner.http1.max_buf_size(max);
- self
- }
-
- /// Aggregates flushes to better support pipelined responses.
- ///
- /// Experimental, may have bugs.
- ///
- /// Default is false.
- pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self {
- self.inner.http1.pipeline_flush(enabled);
- self
- }
-
- /// Set the timer used in background tasks.
- pub fn timer<M>(&mut self, timer: M) -> &mut Self
- where
- M: Timer + Send + Sync + 'static,
- {
- self.inner.http1.timer(timer);
- self
- }
-
- /// Bind a connection together with a [`Service`].
- #[cfg(feature = "http2")]
- pub async fn serve_connection<I, S, B>(&self, io: I, service: S) -> Result<()>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + 'static,
- E: HttpServerConnExec<S::Future, B>,
- {
- self.inner.serve_connection(io, service).await
- }
-
- /// Bind a connection together with a [`Service`].
- #[cfg(not(feature = "http2"))]
- pub async fn serve_connection<I, S, B>(&self, io: I, service: S) -> Result<()>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + 'static,
- {
- self.inner.serve_connection(io, service).await
- }
-
- /// Bind a connection together with a [`Service`], with the ability to
- /// handle HTTP upgrades. This requires that the IO object implements
- /// `Send`.
- #[cfg(feature = "http2")]
- pub fn serve_connection_with_upgrades<I, S, B>(
- &self,
- io: I,
- service: S,
- ) -> UpgradeableConnection<'_, I, S, E>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + Send + 'static,
- E: HttpServerConnExec<S::Future, B>,
- {
- self.inner.serve_connection_with_upgrades(io, service)
- }
-}
-
-/// Http2 part of builder.
-#[cfg(feature = "http2")]
-pub struct Http2Builder<'a, E> {
- inner: &'a mut Builder<E>,
-}
-
-#[cfg(feature = "http2")]
-impl<E> Http2Builder<'_, E> {
- #[cfg(feature = "http1")]
- /// Http1 configuration.
- pub fn http1(&mut self) -> Http1Builder<'_, E> {
- Http1Builder { inner: self.inner }
- }
-
- /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent.
- ///
- /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2).
- /// As of v0.4.0, it is 20.
- ///
- /// See <https://github.com/hyperium/hyper/issues/2877> for more information.
- pub fn max_pending_accept_reset_streams(&mut self, max: impl Into<Option<usize>>) -> &mut Self {
- self.inner.http2.max_pending_accept_reset_streams(max);
- self
- }
-
- /// Configures the maximum number of local reset streams allowed before a GOAWAY will be sent.
- ///
- /// If not set, hyper will use a default, currently of 1024.
- ///
- /// If `None` is supplied, hyper will not apply any limit.
- /// This is not advised, as it can potentially expose servers to DOS vulnerabilities.
- ///
- /// See <https://rustsec.org/advisories/RUSTSEC-2024-0003.html> for more information.
- pub fn max_local_error_reset_streams(&mut self, max: impl Into<Option<usize>>) -> &mut Self {
- self.inner.http2.max_local_error_reset_streams(max);
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.inner.http2.initial_stream_window_size(sz);
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- pub fn initial_connection_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.inner.http2.initial_connection_window_size(sz);
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
- self.inner.http2.adaptive_window(enabled);
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.inner.http2.max_frame_size(sz);
- self
- }
-
- /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
- /// connections.
- ///
- /// Default is 200. Passing `None` will remove any limit.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS
- pub fn max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
- self.inner.http2.max_concurrent_streams(max);
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- pub fn keep_alive_interval(&mut self, interval: impl Into<Option<Duration>>) -> &mut Self {
- self.inner.http2.keep_alive_interval(interval);
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
- self.inner.http2.keep_alive_timeout(timeout);
- self
- }
-
- /// Set the maximum write buffer size for each HTTP/2 stream.
- ///
- /// Default is currently ~400KB, but may change.
- ///
- /// # Panics
- ///
- /// The value must be no larger than `u32::MAX`.
- pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
- self.inner.http2.max_send_buf_size(max);
- self
- }
-
- /// Enables the [extended CONNECT protocol].
- ///
- /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
- pub fn enable_connect_protocol(&mut self) -> &mut Self {
- self.inner.http2.enable_connect_protocol();
- self
- }
-
- /// Sets the max size of received header frames.
- ///
- /// Default is currently ~16MB, but may change.
- pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
- self.inner.http2.max_header_list_size(max);
- self
- }
-
- /// Set the timer used in background tasks.
- pub fn timer<M>(&mut self, timer: M) -> &mut Self
- where
- M: Timer + Send + Sync + 'static,
- {
- self.inner.http2.timer(timer);
- self
- }
-
- /// Set whether the `date` header should be included in HTTP responses.
- ///
- /// Note that including the `date` header is recommended by RFC 7231.
- ///
- /// Default is true.
- pub fn auto_date_header(&mut self, enabled: bool) -> &mut Self {
- self.inner.http2.auto_date_header(enabled);
- self
- }
-
- /// Bind a connection together with a [`Service`].
- pub async fn serve_connection<I, S, B>(&self, io: I, service: S) -> Result<()>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + 'static,
- E: HttpServerConnExec<S::Future, B>,
- {
- self.inner.serve_connection(io, service).await
- }
-
- /// Bind a connection together with a [`Service`], with the ability to
- /// handle HTTP upgrades. This requires that the IO object implements
- /// `Send`.
- pub fn serve_connection_with_upgrades<I, S, B>(
- &self,
- io: I,
- service: S,
- ) -> UpgradeableConnection<'_, I, S, E>
- where
- S: Service<Request<Incoming>, Response = Response<B>>,
- S::Future: 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: Read + Write + Unpin + Send + 'static,
- E: HttpServerConnExec<S::Future, B>,
- {
- self.inner.serve_connection_with_upgrades(io, service)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::{
- rt::{TokioExecutor, TokioIo},
- server::conn::auto,
- };
- use http::{Request, Response};
- use http_body::Body;
- use http_body_util::{BodyExt, Empty, Full};
- use hyper::{body, body::Bytes, client, service::service_fn};
- use std::{convert::Infallible, error::Error as StdError, net::SocketAddr, time::Duration};
- use tokio::{
- net::{TcpListener, TcpStream},
- pin,
- };
-
- const BODY: &[u8] = b"Hello, world!";
-
- #[test]
- fn configuration() {
- // One liner.
- auto::Builder::new(TokioExecutor::new())
- .http1()
- .keep_alive(true)
- .http2()
- .keep_alive_interval(None);
- // .serve_connection(io, service);
-
- // Using variable.
- let mut builder = auto::Builder::new(TokioExecutor::new());
-
- builder.http1().keep_alive(true);
- builder.http2().keep_alive_interval(None);
- // builder.serve_connection(io, service);
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn http1() {
- let addr = start_server(false, false).await;
- let mut sender = connect_h1(addr).await;
-
- let response = sender
- .send_request(Request::new(Empty::<Bytes>::new()))
- .await
- .unwrap();
-
- let body = response.into_body().collect().await.unwrap().to_bytes();
-
- assert_eq!(body, BODY);
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn http2() {
- let addr = start_server(false, false).await;
- let mut sender = connect_h2(addr).await;
-
- let response = sender
- .send_request(Request::new(Empty::<Bytes>::new()))
- .await
- .unwrap();
-
- let body = response.into_body().collect().await.unwrap().to_bytes();
-
- assert_eq!(body, BODY);
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn http2_only() {
- let addr = start_server(false, true).await;
- let mut sender = connect_h2(addr).await;
-
- let response = sender
- .send_request(Request::new(Empty::<Bytes>::new()))
- .await
- .unwrap();
-
- let body = response.into_body().collect().await.unwrap().to_bytes();
-
- assert_eq!(body, BODY);
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn http2_only_fail_if_client_is_http1() {
- let addr = start_server(false, true).await;
- let mut sender = connect_h1(addr).await;
-
- let _ = sender
- .send_request(Request::new(Empty::<Bytes>::new()))
- .await
- .expect_err("should fail");
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn http1_only() {
- let addr = start_server(true, false).await;
- let mut sender = connect_h1(addr).await;
-
- let response = sender
- .send_request(Request::new(Empty::<Bytes>::new()))
- .await
- .unwrap();
-
- let body = response.into_body().collect().await.unwrap().to_bytes();
-
- assert_eq!(body, BODY);
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn http1_only_fail_if_client_is_http2() {
- let addr = start_server(true, false).await;
- let mut sender = connect_h2(addr).await;
-
- let _ = sender
- .send_request(Request::new(Empty::<Bytes>::new()))
- .await
- .expect_err("should fail");
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn graceful_shutdown() {
- let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
- .await
- .unwrap();
-
- let listener_addr = listener.local_addr().unwrap();
-
- // Spawn the task in background so that we can connect there
- let listen_task = tokio::spawn(async move { listener.accept().await.unwrap() });
- // Only connect a stream, do not send headers or anything
- let _stream = TcpStream::connect(listener_addr).await.unwrap();
-
- let (stream, _) = listen_task.await.unwrap();
- let stream = TokioIo::new(stream);
- let builder = auto::Builder::new(TokioExecutor::new());
- let connection = builder.serve_connection(stream, service_fn(hello));
-
- pin!(connection);
-
- connection.as_mut().graceful_shutdown();
-
- let connection_error = tokio::time::timeout(Duration::from_millis(200), connection)
- .await
- .expect("Connection should have finished in a timely manner after graceful shutdown.")
- .expect_err("Connection should have been interrupted.");
-
- let connection_error = connection_error
- .downcast_ref::<std::io::Error>()
- .expect("The error should have been `std::io::Error`.");
- assert_eq!(connection_error.kind(), std::io::ErrorKind::Interrupted);
- }
-
- async fn connect_h1<B>(addr: SocketAddr) -> client::conn::http1::SendRequest<B>
- where
- B: Body + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- {
- let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap());
- let (sender, connection) = client::conn::http1::handshake(stream).await.unwrap();
-
- tokio::spawn(connection);
-
- sender
- }
-
- async fn connect_h2<B>(addr: SocketAddr) -> client::conn::http2::SendRequest<B>
- where
- B: Body + Unpin + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- {
- let stream = TokioIo::new(TcpStream::connect(addr).await.unwrap());
- let (sender, connection) = client::conn::http2::Builder::new(TokioExecutor::new())
- .handshake(stream)
- .await
- .unwrap();
-
- tokio::spawn(connection);
-
- sender
- }
-
- async fn start_server(h1_only: bool, h2_only: bool) -> SocketAddr {
- let addr: SocketAddr = ([127, 0, 0, 1], 0).into();
- let listener = TcpListener::bind(addr).await.unwrap();
-
- let local_addr = listener.local_addr().unwrap();
-
- tokio::spawn(async move {
- loop {
- let (stream, _) = listener.accept().await.unwrap();
- let stream = TokioIo::new(stream);
- tokio::task::spawn(async move {
- let mut builder = auto::Builder::new(TokioExecutor::new());
- if h1_only {
- builder = builder.http1_only();
- builder.serve_connection(stream, service_fn(hello)).await
- } else if h2_only {
- builder = builder.http2_only();
- builder.serve_connection(stream, service_fn(hello)).await
- } else {
- builder
- .http2()
- .max_header_list_size(4096)
- .serve_connection_with_upgrades(stream, service_fn(hello))
- .await
- }
- .unwrap();
- });
- }
- });
-
- local_addr
- }
-
- async fn hello(_req: Request<body::Incoming>) -> Result<Response<Full<Bytes>>, Infallible> {
- Ok(Response::new(Full::new(Bytes::from(BODY))))
- }
-}
diff --git a/vendor/hyper-util/src/server/conn/auto/upgrade.rs b/vendor/hyper-util/src/server/conn/auto/upgrade.rs
deleted file mode 100644
index 8d94c409..00000000
--- a/vendor/hyper-util/src/server/conn/auto/upgrade.rs
+++ /dev/null
@@ -1,68 +0,0 @@
-//! Upgrade utilities.
-
-use bytes::{Bytes, BytesMut};
-use hyper::{
- rt::{Read, Write},
- upgrade::Upgraded,
-};
-
-use crate::common::rewind::Rewind;
-
-/// Tries to downcast the internal trait object to the type passed.
-///
-/// On success, returns the downcasted parts. On error, returns the Upgraded back.
-/// This is a kludge to work around the fact that the machinery provided by
-/// [`hyper_util::server::conn::auto`] wraps the inner `T` with a private type
-/// that is not reachable from outside the crate.
-///
-/// [`hyper_util::server::conn::auto`]: crate::server::conn::auto
-///
-/// This kludge will be removed when this machinery is added back to the main
-/// `hyper` code.
-pub fn downcast<T>(upgraded: Upgraded) -> Result<Parts<T>, Upgraded>
-where
- T: Read + Write + Unpin + 'static,
-{
- let hyper::upgrade::Parts {
- io: rewind,
- mut read_buf,
- ..
- } = upgraded.downcast::<Rewind<T>>()?;
-
- if let Some(pre) = rewind.pre {
- read_buf = if read_buf.is_empty() {
- pre
- } else {
- let mut buf = BytesMut::from(read_buf);
-
- buf.extend_from_slice(&pre);
-
- buf.freeze()
- };
- }
-
- Ok(Parts {
- io: rewind.inner,
- read_buf,
- })
-}
-
-/// The deconstructed parts of an [`Upgraded`] type.
-///
-/// Includes the original IO type, and a read buffer of bytes that the
-/// HTTP state machine may have already read before completing an upgrade.
-#[derive(Debug)]
-#[non_exhaustive]
-pub struct Parts<T> {
- /// The original IO object used before the upgrade.
- pub io: T,
- /// A buffer of bytes that have been read but not processed as HTTP.
- ///
- /// For instance, if the `Connection` is used for an HTTP upgrade request,
- /// it is possible the server sent back the first bytes of the new protocol
- /// along with the response upgrade.
- ///
- /// You will want to check for any existing bytes if you plan to continue
- /// communicating on the IO object.
- pub read_buf: Bytes,
-}
diff --git a/vendor/hyper-util/src/server/conn/mod.rs b/vendor/hyper-util/src/server/conn/mod.rs
deleted file mode 100644
index b23503a1..00000000
--- a/vendor/hyper-util/src/server/conn/mod.rs
+++ /dev/null
@@ -1,4 +0,0 @@
-//! Connection utilities.
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-pub mod auto;
diff --git a/vendor/hyper-util/src/server/graceful.rs b/vendor/hyper-util/src/server/graceful.rs
deleted file mode 100644
index b367fc8a..00000000
--- a/vendor/hyper-util/src/server/graceful.rs
+++ /dev/null
@@ -1,488 +0,0 @@
-//! Utility to gracefully shutdown a server.
-//!
-//! This module provides a [`GracefulShutdown`] type,
-//! which can be used to gracefully shutdown a server.
-//!
-//! See <https://github.com/hyperium/hyper-util/blob/master/examples/server_graceful.rs>
-//! for an example of how to use this.
-
-use std::{
- fmt::{self, Debug},
- future::Future,
- pin::Pin,
- task::{self, Poll},
-};
-
-use pin_project_lite::pin_project;
-use tokio::sync::watch;
-
-/// A graceful shutdown utility
-// Purposefully not `Clone`, see `watcher()` method for why.
-pub struct GracefulShutdown {
- tx: watch::Sender<()>,
-}
-
-/// A watcher side of the graceful shutdown.
-///
-/// This type can only watch a connection, it cannot trigger a shutdown.
-///
-/// Call [`GracefulShutdown::watcher()`] to construct one of these.
-pub struct Watcher {
- rx: watch::Receiver<()>,
-}
-
-impl GracefulShutdown {
- /// Create a new graceful shutdown helper.
- pub fn new() -> Self {
- let (tx, _) = watch::channel(());
- Self { tx }
- }
-
- /// Wrap a future for graceful shutdown watching.
- pub fn watch<C: GracefulConnection>(&self, conn: C) -> impl Future<Output = C::Output> {
- self.watcher().watch(conn)
- }
-
- /// Create an owned type that can watch a connection.
- ///
- /// This method allows created an owned type that can be sent onto another
- /// task before calling [`Watcher::watch()`].
- // Internal: this function exists because `Clone` allows footguns.
- // If the `tx` were cloned (or the `rx`), race conditions can happens where
- // one task starting a shutdown is scheduled and interwined with a task
- // starting to watch a connection, and the "watch version" is one behind.
- pub fn watcher(&self) -> Watcher {
- let rx = self.tx.subscribe();
- Watcher { rx }
- }
-
- /// Signal shutdown for all watched connections.
- ///
- /// This returns a `Future` which will complete once all watched
- /// connections have shutdown.
- pub async fn shutdown(self) {
- let Self { tx } = self;
-
- // signal all the watched futures about the change
- let _ = tx.send(());
- // and then wait for all of them to complete
- tx.closed().await;
- }
-
- /// Returns the number of the watching connections.
- pub fn count(&self) -> usize {
- self.tx.receiver_count()
- }
-}
-
-impl Debug for GracefulShutdown {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("GracefulShutdown").finish()
- }
-}
-
-impl Default for GracefulShutdown {
- fn default() -> Self {
- Self::new()
- }
-}
-
-impl Watcher {
- /// Wrap a future for graceful shutdown watching.
- pub fn watch<C: GracefulConnection>(self, conn: C) -> impl Future<Output = C::Output> {
- let Watcher { mut rx } = self;
- GracefulConnectionFuture::new(conn, async move {
- let _ = rx.changed().await;
- // hold onto the rx until the watched future is completed
- rx
- })
- }
-}
-
-impl Debug for Watcher {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("GracefulWatcher").finish()
- }
-}
-
-pin_project! {
- struct GracefulConnectionFuture<C, F: Future> {
- #[pin]
- conn: C,
- #[pin]
- cancel: F,
- #[pin]
- // If cancelled, this is held until the inner conn is done.
- cancelled_guard: Option<F::Output>,
- }
-}
-
-impl<C, F: Future> GracefulConnectionFuture<C, F> {
- fn new(conn: C, cancel: F) -> Self {
- Self {
- conn,
- cancel,
- cancelled_guard: None,
- }
- }
-}
-
-impl<C, F: Future> Debug for GracefulConnectionFuture<C, F> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("GracefulConnectionFuture").finish()
- }
-}
-
-impl<C, F> Future for GracefulConnectionFuture<C, F>
-where
- C: GracefulConnection,
- F: Future,
-{
- type Output = C::Output;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
- if this.cancelled_guard.is_none() {
- if let Poll::Ready(guard) = this.cancel.poll(cx) {
- this.cancelled_guard.set(Some(guard));
- this.conn.as_mut().graceful_shutdown();
- }
- }
- this.conn.poll(cx)
- }
-}
-
-/// An internal utility trait as an umbrella target for all (hyper) connection
-/// types that the [`GracefulShutdown`] can watch.
-pub trait GracefulConnection: Future<Output = Result<(), Self::Error>> + private::Sealed {
- /// The error type returned by the connection when used as a future.
- type Error;
-
- /// Start a graceful shutdown process for this connection.
- fn graceful_shutdown(self: Pin<&mut Self>);
-}
-
-#[cfg(feature = "http1")]
-impl<I, B, S> GracefulConnection for hyper::server::conn::http1::Connection<I, S>
-where
- S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
-{
- type Error = hyper::Error;
-
- fn graceful_shutdown(self: Pin<&mut Self>) {
- hyper::server::conn::http1::Connection::graceful_shutdown(self);
- }
-}
-
-#[cfg(feature = "http2")]
-impl<I, B, S, E> GracefulConnection for hyper::server::conn::http2::Connection<I, S, E>
-where
- S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
-{
- type Error = hyper::Error;
-
- fn graceful_shutdown(self: Pin<&mut Self>) {
- hyper::server::conn::http2::Connection::graceful_shutdown(self);
- }
-}
-
-#[cfg(feature = "server-auto")]
-impl<I, B, S, E> GracefulConnection for crate::server::conn::auto::Connection<'_, I, S, E>
-where
- S: hyper::service::Service<http::Request<hyper::body::Incoming>, Response = http::Response<B>>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- S::Future: 'static,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
-{
- type Error = Box<dyn std::error::Error + Send + Sync>;
-
- fn graceful_shutdown(self: Pin<&mut Self>) {
- crate::server::conn::auto::Connection::graceful_shutdown(self);
- }
-}
-
-#[cfg(feature = "server-auto")]
-impl<I, B, S, E> GracefulConnection
- for crate::server::conn::auto::UpgradeableConnection<'_, I, S, E>
-where
- S: hyper::service::Service<http::Request<hyper::body::Incoming>, Response = http::Response<B>>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- S::Future: 'static,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
-{
- type Error = Box<dyn std::error::Error + Send + Sync>;
-
- fn graceful_shutdown(self: Pin<&mut Self>) {
- crate::server::conn::auto::UpgradeableConnection::graceful_shutdown(self);
- }
-}
-
-mod private {
- pub trait Sealed {}
-
- #[cfg(feature = "http1")]
- impl<I, B, S> Sealed for hyper::server::conn::http1::Connection<I, S>
- where
- S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- {
- }
-
- #[cfg(feature = "http1")]
- impl<I, B, S> Sealed for hyper::server::conn::http1::UpgradeableConnection<I, S>
- where
- S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- {
- }
-
- #[cfg(feature = "http2")]
- impl<I, B, S, E> Sealed for hyper::server::conn::http2::Connection<I, S, E>
- where
- S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
- {
- }
-
- #[cfg(feature = "server-auto")]
- impl<I, B, S, E> Sealed for crate::server::conn::auto::Connection<'_, I, S, E>
- where
- S: hyper::service::Service<
- http::Request<hyper::body::Incoming>,
- Response = http::Response<B>,
- >,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- S::Future: 'static,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
- {
- }
-
- #[cfg(feature = "server-auto")]
- impl<I, B, S, E> Sealed for crate::server::conn::auto::UpgradeableConnection<'_, I, S, E>
- where
- S: hyper::service::Service<
- http::Request<hyper::body::Incoming>,
- Response = http::Response<B>,
- >,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- S::Future: 'static,
- I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,
- B: hyper::body::Body + 'static,
- B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
- {
- }
-}
-
-#[cfg(test)]
-mod test {
- use super::*;
- use pin_project_lite::pin_project;
- use std::sync::atomic::{AtomicUsize, Ordering};
- use std::sync::Arc;
-
- pin_project! {
- #[derive(Debug)]
- struct DummyConnection<F> {
- #[pin]
- future: F,
- shutdown_counter: Arc<AtomicUsize>,
- }
- }
-
- impl<F> private::Sealed for DummyConnection<F> {}
-
- impl<F: Future> GracefulConnection for DummyConnection<F> {
- type Error = ();
-
- fn graceful_shutdown(self: Pin<&mut Self>) {
- self.shutdown_counter.fetch_add(1, Ordering::SeqCst);
- }
- }
-
- impl<F: Future> Future for DummyConnection<F> {
- type Output = Result<(), ()>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match self.project().future.poll(cx) {
- Poll::Ready(_) => Poll::Ready(Ok(())),
- Poll::Pending => Poll::Pending,
- }
- }
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn test_graceful_shutdown_ok() {
- let graceful = GracefulShutdown::new();
- let shutdown_counter = Arc::new(AtomicUsize::new(0));
- let (dummy_tx, _) = tokio::sync::broadcast::channel(1);
-
- for i in 1..=3 {
- let mut dummy_rx = dummy_tx.subscribe();
- let shutdown_counter = shutdown_counter.clone();
-
- let future = async move {
- tokio::time::sleep(std::time::Duration::from_millis(i * 10)).await;
- let _ = dummy_rx.recv().await;
- };
- let dummy_conn = DummyConnection {
- future,
- shutdown_counter,
- };
- let conn = graceful.watch(dummy_conn);
- tokio::spawn(async move {
- conn.await.unwrap();
- });
- }
-
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
- let _ = dummy_tx.send(());
-
- tokio::select! {
- _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => {
- panic!("timeout")
- },
- _ = graceful.shutdown() => {
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3);
- }
- }
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn test_graceful_shutdown_delayed_ok() {
- let graceful = GracefulShutdown::new();
- let shutdown_counter = Arc::new(AtomicUsize::new(0));
-
- for i in 1..=3 {
- let shutdown_counter = shutdown_counter.clone();
-
- //tokio::time::sleep(std::time::Duration::from_millis(i * 5)).await;
- let future = async move {
- tokio::time::sleep(std::time::Duration::from_millis(i * 50)).await;
- };
- let dummy_conn = DummyConnection {
- future,
- shutdown_counter,
- };
- let conn = graceful.watch(dummy_conn);
- tokio::spawn(async move {
- conn.await.unwrap();
- });
- }
-
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
-
- tokio::select! {
- _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => {
- panic!("timeout")
- },
- _ = graceful.shutdown() => {
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3);
- }
- }
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn test_graceful_shutdown_multi_per_watcher_ok() {
- let graceful = GracefulShutdown::new();
- let shutdown_counter = Arc::new(AtomicUsize::new(0));
-
- for i in 1..=3 {
- let shutdown_counter = shutdown_counter.clone();
-
- let mut futures = Vec::new();
- for u in 1..=i {
- let future = tokio::time::sleep(std::time::Duration::from_millis(u * 50));
- let dummy_conn = DummyConnection {
- future,
- shutdown_counter: shutdown_counter.clone(),
- };
- let conn = graceful.watch(dummy_conn);
- futures.push(conn);
- }
- tokio::spawn(async move {
- futures_util::future::join_all(futures).await;
- });
- }
-
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
-
- tokio::select! {
- _ = tokio::time::sleep(std::time::Duration::from_millis(200)) => {
- panic!("timeout")
- },
- _ = graceful.shutdown() => {
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 6);
- }
- }
- }
-
- #[cfg(not(miri))]
- #[tokio::test]
- async fn test_graceful_shutdown_timeout() {
- let graceful = GracefulShutdown::new();
- let shutdown_counter = Arc::new(AtomicUsize::new(0));
-
- for i in 1..=3 {
- let shutdown_counter = shutdown_counter.clone();
-
- let future = async move {
- if i == 1 {
- std::future::pending::<()>().await
- } else {
- std::future::ready(()).await
- }
- };
- let dummy_conn = DummyConnection {
- future,
- shutdown_counter,
- };
- let conn = graceful.watch(dummy_conn);
- tokio::spawn(async move {
- conn.await.unwrap();
- });
- }
-
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
-
- tokio::select! {
- _ = tokio::time::sleep(std::time::Duration::from_millis(100)) => {
- assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3);
- },
- _ = graceful.shutdown() => {
- panic!("shutdown should not be completed: as not all our conns finish")
- }
- }
- }
-}
diff --git a/vendor/hyper-util/src/server/mod.rs b/vendor/hyper-util/src/server/mod.rs
deleted file mode 100644
index a4838ac5..00000000
--- a/vendor/hyper-util/src/server/mod.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-//! Server utilities.
-
-pub mod conn;
-
-#[cfg(feature = "server-graceful")]
-pub mod graceful;
diff --git a/vendor/hyper-util/src/service/glue.rs b/vendor/hyper-util/src/service/glue.rs
deleted file mode 100644
index ceff86f5..00000000
--- a/vendor/hyper-util/src/service/glue.rs
+++ /dev/null
@@ -1,72 +0,0 @@
-use pin_project_lite::pin_project;
-use std::{
- future::Future,
- pin::Pin,
- task::{Context, Poll},
-};
-
-use super::Oneshot;
-
-/// A tower [`Service`][tower-svc] converted into a hyper [`Service`][hyper-svc].
-///
-/// This wraps an inner tower service `S` in a [`hyper::service::Service`] implementation. See
-/// the module-level documentation of [`service`][crate::service] for more information about using
-/// [`tower`][tower] services and middleware with [`hyper`].
-///
-/// [hyper-svc]: hyper::service::Service
-/// [tower]: https://docs.rs/tower/latest/tower/
-/// [tower-svc]: https://docs.rs/tower/latest/tower/trait.Service.html
-#[derive(Debug, Copy, Clone)]
-pub struct TowerToHyperService<S> {
- service: S,
-}
-
-impl<S> TowerToHyperService<S> {
- /// Create a new [`TowerToHyperService`] from a tower service.
- pub fn new(tower_service: S) -> Self {
- Self {
- service: tower_service,
- }
- }
-}
-
-impl<S, R> hyper::service::Service<R> for TowerToHyperService<S>
-where
- S: tower_service::Service<R> + Clone,
-{
- type Response = S::Response;
- type Error = S::Error;
- type Future = TowerToHyperServiceFuture<S, R>;
-
- fn call(&self, req: R) -> Self::Future {
- TowerToHyperServiceFuture {
- future: Oneshot::new(self.service.clone(), req),
- }
- }
-}
-
-pin_project! {
- /// Response future for [`TowerToHyperService`].
- ///
- /// This future is acquired by [`call`][hyper::service::Service::call]ing a
- /// [`TowerToHyperService`].
- pub struct TowerToHyperServiceFuture<S, R>
- where
- S: tower_service::Service<R>,
- {
- #[pin]
- future: Oneshot<S, R>,
- }
-}
-
-impl<S, R> Future for TowerToHyperServiceFuture<S, R>
-where
- S: tower_service::Service<R>,
-{
- type Output = Result<S::Response, S::Error>;
-
- #[inline]
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.project().future.poll(cx)
- }
-}
diff --git a/vendor/hyper-util/src/service/mod.rs b/vendor/hyper-util/src/service/mod.rs
deleted file mode 100644
index 34796431..00000000
--- a/vendor/hyper-util/src/service/mod.rs
+++ /dev/null
@@ -1,32 +0,0 @@
-//! Service utilities.
-//!
-//! [`hyper::service`] provides a [`Service`][hyper-svc] trait, representing an asynchronous
-//! function from a `Request` to a `Response`. This provides an interface allowing middleware for
-//! network application to be written in a modular and reusable way.
-//!
-//! This submodule provides an assortment of utilities for working with [`Service`][hyper-svc]s.
-//! See the module-level documentation of [`hyper::service`] for more information.
-//!
-//! # Tower
-//!
-//! While [`hyper`] uses its own notion of a [`Service`][hyper-svc] internally, many other
-//! libraries use a library such as [`tower`][tower] to provide the fundamental model of an
-//! asynchronous function.
-//!
-//! The [`TowerToHyperService`] type provided by this submodule can be used to bridge these
-//! ecosystems together. By wrapping a [`tower::Service`][tower-svc] in [`TowerToHyperService`],
-//! it can be passed into [`hyper`] interfaces that expect a [`hyper::service::Service`].
-//!
-//! [hyper-svc]: hyper::service::Service
-//! [tower]: https://docs.rs/tower/latest/tower/
-//! [tower-svc]: https://docs.rs/tower/latest/tower/trait.Service.html
-
-#[cfg(feature = "service")]
-mod glue;
-#[cfg(any(feature = "client-legacy", feature = "service"))]
-mod oneshot;
-
-#[cfg(feature = "service")]
-pub use self::glue::{TowerToHyperService, TowerToHyperServiceFuture};
-#[cfg(any(feature = "client-legacy", feature = "service"))]
-pub(crate) use self::oneshot::Oneshot;
diff --git a/vendor/hyper-util/src/service/oneshot.rs b/vendor/hyper-util/src/service/oneshot.rs
deleted file mode 100644
index 2cc3e6e9..00000000
--- a/vendor/hyper-util/src/service/oneshot.rs
+++ /dev/null
@@ -1,63 +0,0 @@
-use futures_core::ready;
-use pin_project_lite::pin_project;
-use std::future::Future;
-use std::pin::Pin;
-use std::task::{Context, Poll};
-use tower_service::Service;
-
-// Vendored from tower::util to reduce dependencies, the code is small enough.
-
-// Not really pub, but used in a trait for bounds
-pin_project! {
- #[project = OneshotProj]
- #[derive(Debug)]
- pub enum Oneshot<S: Service<Req>, Req> {
- NotReady {
- svc: S,
- req: Option<Req>,
- },
- Called {
- #[pin]
- fut: S::Future,
- },
- Done,
- }
-}
-
-impl<S, Req> Oneshot<S, Req>
-where
- S: Service<Req>,
-{
- pub(crate) const fn new(svc: S, req: Req) -> Self {
- Oneshot::NotReady {
- svc,
- req: Some(req),
- }
- }
-}
-
-impl<S, Req> Future for Oneshot<S, Req>
-where
- S: Service<Req>,
-{
- type Output = Result<S::Response, S::Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- loop {
- let this = self.as_mut().project();
- match this {
- OneshotProj::NotReady { svc, req } => {
- ready!(svc.poll_ready(cx))?;
- let fut = svc.call(req.take().expect("already called"));
- self.set(Oneshot::Called { fut });
- }
- OneshotProj::Called { fut } => {
- let res = ready!(fut.poll(cx))?;
- self.set(Oneshot::Done);
- return Poll::Ready(Ok(res));
- }
- OneshotProj::Done => panic!("polled after complete"),
- }
- }
- }
-}
diff --git a/vendor/hyper-util/tests/legacy_client.rs b/vendor/hyper-util/tests/legacy_client.rs
deleted file mode 100644
index bdac5e0e..00000000
--- a/vendor/hyper-util/tests/legacy_client.rs
+++ /dev/null
@@ -1,1488 +0,0 @@
-mod test_utils;
-
-use std::io::{Read, Write};
-use std::net::{SocketAddr, TcpListener};
-use std::pin::Pin;
-use std::sync::atomic::Ordering;
-use std::sync::Arc;
-use std::task::Poll;
-use std::thread;
-use std::time::Duration;
-
-use futures_channel::{mpsc, oneshot};
-use futures_util::future::{self, FutureExt, TryFutureExt};
-use futures_util::stream::StreamExt;
-use futures_util::{self, Stream};
-use http_body_util::BodyExt;
-use http_body_util::{Empty, Full, StreamBody};
-use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
-use hyper::body::Bytes;
-use hyper::body::Frame;
-use hyper::Request;
-use hyper_util::client::legacy::connect::{capture_connection, HttpConnector};
-use hyper_util::client::legacy::Client;
-use hyper_util::rt::{TokioExecutor, TokioIo};
-
-use test_utils::{DebugConnector, DebugStream};
-
-pub fn runtime() -> tokio::runtime::Runtime {
- tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .expect("new rt")
-}
-
-fn s(buf: &[u8]) -> &str {
- std::str::from_utf8(buf).expect("from_utf8")
-}
-
-#[cfg(not(miri))]
-#[test]
-fn drop_body_before_eof_closes_connection() {
- // https://github.com/hyperium/hyper/issues/1353
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = runtime();
- let (closes_tx, closes) = mpsc::channel::<()>(10);
- let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build(
- DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx),
- );
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- let body = vec![b'x'; 1024 * 128];
- write!(
- sock,
- "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n",
- body.len()
- )
- .expect("write head");
- let _ = sock.write_all(&body);
- let _ = tx1.send(());
- });
-
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req).map_ok(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- });
- let rx = rx1;
- rt.block_on(async move {
- let (res, _) = future::join(res, rx).await;
- res.unwrap();
- tokio::time::sleep(Duration::from_secs(1)).await;
- });
- rt.block_on(closes.into_future()).0.expect("closes");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn drop_client_closes_idle_connections() {
- let _ = pretty_env_logger::try_init();
-
- let server = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, mut closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
-
- let t1 = tokio::spawn(async move {
- let mut sock = server.accept().await.unwrap().0;
- let mut buf = [0; 4096];
- sock.read(&mut buf).await.expect("read 1");
- let body = [b'x'; 64];
- let headers = format!("HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n", body.len());
- sock.write_all(headers.as_bytes())
- .await
- .expect("write head");
- sock.write_all(&body).await.expect("write body");
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- if let Ok(n) = sock.read(&mut buf).await {
- assert_eq!(n, 0);
- }
- });
-
- let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
-
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req).map_ok(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- });
- let rx = rx1;
- let (res, _) = future::join(res, rx).await;
- res.unwrap();
-
- // not closed yet, just idle
- future::poll_fn(|ctx| {
- assert!(Pin::new(&mut closes).poll_next(ctx).is_pending());
- Poll::Ready(())
- })
- .await;
-
- // drop to start the connections closing
- drop(client);
-
- // and wait a few ticks for the connections to close
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
- t1.await.unwrap();
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn drop_response_future_closes_in_progress_connection() {
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- // we never write a response head
- // simulates a slow server operation
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = client_drop_rx.recv();
- });
-
- let res = {
- let client = Client::builder(TokioExecutor::new()).build(
- DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx),
- );
-
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- client.request(req).map(|_| unreachable!())
- };
-
- future::select(res, rx1).await;
-
- // res now dropped
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn drop_response_body_closes_in_progress_connection() {
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- write!(
- sock,
- "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n"
- )
- .expect("write head");
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = client_drop_rx.recv();
- });
-
- let rx = rx1;
- let res = {
- let client = Client::builder(TokioExecutor::new()).build(
- DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx),
- );
-
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- // notably, haven't read body yet
- client.request(req)
- };
-
- let (res, _) = future::join(res, rx).await;
- // drop the body
- res.unwrap();
-
- // and wait a few ticks to see the connection drop
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn no_keep_alive_closes_connection() {
- // https://github.com/hyperium/hyper/issues/1383
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_tx2, rx2) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .unwrap();
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = rx2.recv();
- });
-
- let client = Client::builder(TokioExecutor::new())
- .pool_max_idle_per_host(0)
- .build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
-
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req).map_ok(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- });
- let rx = rx1;
- let (res, _) = future::join(res, rx).await;
- res.unwrap();
-
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(close, t).await;
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn socket_disconnect_closes_idle_conn() {
- // notably when keep-alive is enabled
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .unwrap();
- let _ = tx1.send(());
- });
-
- let client = Client::builder(TokioExecutor::new()).build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
-
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req).map_ok(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- });
- let rx = rx1;
-
- let (res, _) = future::join(res, rx).await;
- res.unwrap();
-
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
-}
-
-#[cfg(not(miri))]
-#[test]
-fn connect_call_is_lazy() {
- // We especially don't want connects() triggered if there's
- // idle connections that the Checkout would have found
- let _ = pretty_env_logger::try_init();
-
- let _rt = runtime();
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- assert_eq!(connects.load(Ordering::Relaxed), 0);
- let req = Request::builder()
- .uri("http://hyper.local/a")
- .body(Empty::<Bytes>::new())
- .unwrap();
- let _fut = client.request(req);
- // internal Connect::connect should have been lazy, and not
- // triggered an actual connect yet.
- assert_eq!(connects.load(Ordering::Relaxed), 0);
-}
-
-#[cfg(not(miri))]
-#[test]
-fn client_keep_alive_0() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = runtime();
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- let _ = tx1.send(());
-
- let n2 = sock.read(&mut buf).expect("read 2");
- assert_ne!(n2, 0);
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx2.send(());
- });
-
- assert_eq!(connects.load(Ordering::SeqCst), 0);
-
- let rx = rx1;
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(connects.load(Ordering::SeqCst), 1);
-
- // sleep real quick to let the threadpool put connection in ready
- // state and back into client pool
- thread::sleep(Duration::from_millis(50));
-
- let rx = rx2;
- let req = Request::builder()
- .uri(&*format!("http://{addr}/b"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(
- connects.load(Ordering::SeqCst),
- 1,
- "second request should still only have 1 connect"
- );
- drop(client);
-}
-
-#[cfg(not(miri))]
-#[test]
-fn client_keep_alive_extra_body() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = runtime();
-
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello")
- .expect("write 1");
- // the body "hello", while ignored because its a HEAD request, should mean the connection
- // cannot be put back in the pool
- let _ = tx1.send(());
-
- let mut sock2 = server.accept().unwrap().0;
- let n2 = sock2.read(&mut buf).expect("read 2");
- assert_ne!(n2, 0);
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock2
- .write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx2.send(());
- });
-
- assert_eq!(connects.load(Ordering::Relaxed), 0);
-
- let rx = rx1;
- let req = Request::builder()
- .method("HEAD")
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(connects.load(Ordering::Relaxed), 1);
-
- let rx = rx2;
- let req = Request::builder()
- .uri(&*format!("http://{addr}/b"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(connects.load(Ordering::Relaxed), 2);
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn client_keep_alive_when_response_before_request_body_ends() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
-
- let (closes_tx, mut closes) = mpsc::channel::<()>(10);
- let connector = DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx);
- let connects = connector.connects.clone();
- let client = Client::builder(TokioExecutor::new()).build(connector.clone());
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- let (_tx3, rx3) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- // after writing the response, THEN stream the body
- let _ = tx1.send(());
-
- sock.read(&mut buf).expect("read 2");
- let _ = tx2.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = rx3.recv();
- });
-
- assert_eq!(connects.load(Ordering::Relaxed), 0);
-
- let delayed_body = rx1
- .then(|_| Box::pin(tokio::time::sleep(Duration::from_millis(200))))
- .map(|_| Ok::<_, ()>(Frame::data(&b"hello a"[..])))
- .map_err(|_| -> hyper::Error { panic!("rx1") })
- .into_stream();
-
- let req = Request::builder()
- .method("POST")
- .uri(&*format!("http://{addr}/a"))
- .body(StreamBody::new(delayed_body))
- .unwrap();
- let res = client.request(req).map_ok(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- });
-
- future::join(res, rx2).await.0.unwrap();
- future::poll_fn(|ctx| {
- assert!(Pin::new(&mut closes).poll_next(ctx).is_pending());
- Poll::Ready(())
- })
- .await;
-
- assert_eq!(connects.load(Ordering::Relaxed), 1);
-
- drop(client);
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn client_keep_alive_eager_when_chunked() {
- // If a response body has been read to completion, with completion
- // determined by some other factor, like decompression, and thus
- // it is in't polled a final time to clear the final 0-len chunk,
- // try to eagerly clear it so the connection can still be used.
-
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(
- b"\
- HTTP/1.1 200 OK\r\n\
- transfer-encoding: chunked\r\n\
- \r\n\
- 5\r\n\
- hello\r\n\
- 0\r\n\r\n\
- ",
- )
- .expect("write 1");
- let _ = tx1.send(());
-
- let n2 = sock.read(&mut buf).expect("read 2");
- assert_ne!(n2, 0, "bytes of second request");
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx2.send(());
- });
-
- assert_eq!(connects.load(Ordering::SeqCst), 0);
-
- let rx = rx1;
- let req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let fut = client.request(req);
-
- let resp = future::join(fut, rx).map(|r| r.0).await.unwrap();
- assert_eq!(connects.load(Ordering::SeqCst), 1);
- assert_eq!(resp.status(), 200);
- assert_eq!(resp.headers()["transfer-encoding"], "chunked");
-
- // Read the "hello" chunk...
- let chunk = resp.collect().await.unwrap().to_bytes();
- assert_eq!(chunk, "hello");
-
- // sleep real quick to let the threadpool put connection in ready
- // state and back into client pool
- tokio::time::sleep(Duration::from_millis(50)).await;
-
- let rx = rx2;
- let req = Request::builder()
- .uri(&*format!("http://{addr}/b"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let fut = client.request(req);
- future::join(fut, rx).map(|r| r.0).await.unwrap();
-
- assert_eq!(
- connects.load(Ordering::SeqCst),
- 1,
- "second request should still only have 1 connect"
- );
- drop(client);
-}
-
-#[cfg(not(miri))]
-#[test]
-fn connect_proxy_sends_absolute_uri() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = runtime();
- let connector = DebugConnector::new().proxy();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- let n = sock.read(&mut buf).expect("read 1");
- let expected = format!("GET http://{addr}/foo/bar HTTP/1.1\r\nhost: {addr}\r\n\r\n");
- assert_eq!(s(&buf[..n]), expected);
-
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- let _ = tx1.send(());
- });
-
- let rx = rx1;
- let req = Request::builder()
- .uri(&*format!("http://{addr}/foo/bar"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-}
-
-#[cfg(not(miri))]
-#[test]
-fn connect_proxy_http_connect_sends_authority_form() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = runtime();
- let connector = DebugConnector::new().proxy();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- let n = sock.read(&mut buf).expect("read 1");
- let expected = format!("CONNECT {addr} HTTP/1.1\r\nhost: {addr}\r\n\r\n");
- assert_eq!(s(&buf[..n]), expected);
-
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- let _ = tx1.send(());
- });
-
- let rx = rx1;
- let req = Request::builder()
- .method("CONNECT")
- .uri(&*format!("http://{addr}/useless/path"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-}
-
-#[cfg(not(miri))]
-#[test]
-fn client_upgrade() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = runtime();
-
- let connector = DebugConnector::new();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(
- b"\
- HTTP/1.1 101 Switching Protocols\r\n\
- Upgrade: foobar\r\n\
- \r\n\
- foobar=ready\
- ",
- )
- .unwrap();
- let _ = tx1.send(());
-
- let n = sock.read(&mut buf).expect("read 2");
- assert_eq!(&buf[..n], b"foo=bar");
- sock.write_all(b"bar=foo").expect("write 2");
- });
-
- let rx = rx1;
-
- let req = Request::builder()
- .method("GET")
- .uri(&*format!("http://{addr}/up"))
- .body(Empty::<Bytes>::new())
- .unwrap();
-
- let res = client.request(req);
- let res = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(res.status(), 101);
- let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade");
-
- let parts = upgraded.downcast::<DebugStream>().unwrap();
- assert_eq!(s(&parts.read_buf), "foobar=ready");
-
- let mut io = parts.io;
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
- let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
- assert_eq!(vec, b"bar=foo");
-}
-
-#[cfg(not(miri))]
-#[test]
-fn client_http2_upgrade() {
- use http::{Method, Response, Version};
- use hyper::service::service_fn;
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
- use tokio::net::TcpListener;
-
- let _ = pretty_env_logger::try_init();
- let rt = runtime();
- let server = rt
- .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))))
- .unwrap();
- let addr = server.local_addr().unwrap();
- let mut connector = DebugConnector::new();
- connector.alpn_h2 = true;
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- rt.spawn(async move {
- let (stream, _) = server.accept().await.expect("accept");
- let stream = TokioIo::new(stream);
- let mut builder = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
- // IMPORTANT: This is required to advertise our support for HTTP/2 websockets to the client.
- builder.http2().enable_connect_protocol();
- builder
- .serve_connection_with_upgrades(
- stream,
- service_fn(|req| async move {
- assert_eq!(req.headers().get("host"), None);
- assert_eq!(req.version(), Version::HTTP_2);
- assert_eq!(
- req.headers().get(http::header::SEC_WEBSOCKET_VERSION),
- Some(&http::header::HeaderValue::from_static("13"))
- );
- assert_eq!(
- req.extensions().get::<hyper::ext::Protocol>(),
- Some(&hyper::ext::Protocol::from_static("websocket"))
- );
-
- let on_upgrade = hyper::upgrade::on(req);
- tokio::spawn(async move {
- let upgraded = on_upgrade.await.unwrap();
- let mut io = TokioIo::new(upgraded);
-
- let mut vec = vec![];
- io.read_buf(&mut vec).await.unwrap();
- assert_eq!(vec, b"foo=bar");
- io.write_all(b"bar=foo").await.unwrap();
- });
-
- Ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new()))
- }),
- )
- .await
- .expect("server");
- });
-
- let req = Request::builder()
- .method(Method::CONNECT)
- .uri(&*format!("http://{addr}/up"))
- .header(http::header::SEC_WEBSOCKET_VERSION, "13")
- .version(Version::HTTP_2)
- .extension(hyper::ext::Protocol::from_static("websocket"))
- .body(Empty::<Bytes>::new())
- .unwrap();
-
- let res = client.request(req);
- let res = rt.block_on(res).unwrap();
-
- assert_eq!(res.status(), http::StatusCode::OK);
- assert_eq!(res.version(), Version::HTTP_2);
-
- let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade");
- let mut io = TokioIo::new(upgraded);
-
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
- let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
- assert_eq!(vec, b"bar=foo");
-}
-
-#[cfg(not(miri))]
-#[test]
-fn alpn_h2() {
- use http::Response;
- use hyper::service::service_fn;
- use tokio::net::TcpListener;
-
- let _ = pretty_env_logger::try_init();
- let rt = runtime();
- let listener = rt
- .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))))
- .unwrap();
- let addr = listener.local_addr().unwrap();
- let mut connector = DebugConnector::new();
- connector.alpn_h2 = true;
- let connects = connector.connects.clone();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- rt.spawn(async move {
- let (stream, _) = listener.accept().await.expect("accept");
- let stream = TokioIo::new(stream);
- hyper::server::conn::http2::Builder::new(TokioExecutor::new())
- .serve_connection(
- stream,
- service_fn(|req| async move {
- assert_eq!(req.headers().get("host"), None);
- Ok::<_, hyper::Error>(Response::new(Full::<Bytes>::from("Hello, world")))
- }),
- )
- .await
- .expect("server");
- });
-
- assert_eq!(connects.load(Ordering::SeqCst), 0);
-
- let url = format!("http://{addr}/a").parse::<::hyper::Uri>().unwrap();
- let res1 = client.get(url.clone());
- let res2 = client.get(url.clone());
- let res3 = client.get(url.clone());
- rt.block_on(future::try_join3(res1, res2, res3)).unwrap();
-
- // Since the client doesn't know it can ALPN at first, it will have
- // started 3 connections. But, the server above will only handle 1,
- // so the unwrapped responses futures show it still worked.
- assert_eq!(connects.load(Ordering::SeqCst), 3);
-
- let res4 = client.get(url.clone());
- rt.block_on(res4).unwrap();
-
- // HTTP/2 request allowed
- let res5 = client.request(
- Request::builder()
- .uri(url)
- .version(hyper::Version::HTTP_2)
- .body(Empty::<Bytes>::new())
- .unwrap(),
- );
- rt.block_on(res5).unwrap();
-
- assert_eq!(
- connects.load(Ordering::SeqCst),
- 3,
- "after ALPN, no more connects"
- );
- drop(client);
-}
-
-#[cfg(not(miri))]
-#[test]
-fn capture_connection_on_client() {
- let _ = pretty_env_logger::try_init();
-
- let rt = runtime();
- let connector = DebugConnector::new();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- });
- let mut req = Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap();
- let captured_conn = capture_connection(&mut req);
- rt.block_on(client.request(req)).expect("200 OK");
- assert!(captured_conn.connection_metadata().is_some());
-}
-
-#[cfg(not(miri))]
-#[test]
-fn connection_poisoning() {
- use std::sync::atomic::AtomicUsize;
-
- let _ = pretty_env_logger::try_init();
-
- let rt = runtime();
- let connector = DebugConnector::new();
-
- let client = Client::builder(TokioExecutor::new()).build(connector);
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let num_conns: Arc<AtomicUsize> = Default::default();
- let num_requests: Arc<AtomicUsize> = Default::default();
- let num_requests_tracker = num_requests.clone();
- let num_conns_tracker = num_conns.clone();
- thread::spawn(move || loop {
- let mut sock = server.accept().unwrap().0;
- num_conns_tracker.fetch_add(1, Ordering::Relaxed);
- let num_requests_tracker = num_requests_tracker.clone();
- thread::spawn(move || {
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- loop {
- if sock.read(&mut buf).expect("read 1") > 0 {
- num_requests_tracker.fetch_add(1, Ordering::Relaxed);
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- }
- }
- });
- });
- let make_request = || {
- Request::builder()
- .uri(&*format!("http://{addr}/a"))
- .body(Empty::<Bytes>::new())
- .unwrap()
- };
- let mut req = make_request();
- let captured_conn = capture_connection(&mut req);
- rt.block_on(client.request(req)).expect("200 OK");
- assert_eq!(num_conns.load(Ordering::SeqCst), 1);
- assert_eq!(num_requests.load(Ordering::SeqCst), 1);
-
- rt.block_on(client.request(make_request())).expect("200 OK");
- rt.block_on(client.request(make_request())).expect("200 OK");
- // Before poisoning the connection is reused
- assert_eq!(num_conns.load(Ordering::SeqCst), 1);
- assert_eq!(num_requests.load(Ordering::SeqCst), 3);
- captured_conn
- .connection_metadata()
- .as_ref()
- .unwrap()
- .poison();
-
- rt.block_on(client.request(make_request())).expect("200 OK");
-
- // After poisoning, a new connection is established
- assert_eq!(num_conns.load(Ordering::SeqCst), 2);
- assert_eq!(num_requests.load(Ordering::SeqCst), 4);
-
- rt.block_on(client.request(make_request())).expect("200 OK");
- // another request can still reuse:
- assert_eq!(num_conns.load(Ordering::SeqCst), 2);
- assert_eq!(num_requests.load(Ordering::SeqCst), 5);
-}
-
-// -------------------------------------------------------
-// Below is our custom code for testing hyper legacy-client behavior with mock connections for PR #184
-// We use fully qualified paths for all types and identifiers to make this code
-// copy/paste-able without relying on external 'use' statements. Detailed inline
-// comments explain the purpose and logic of each section.
-
-//XXX: can manually run like this:
-// $ cargo test --features="http1,http2,server,client-legacy" --test legacy_client -- test_connection_error_propagation test_incomplete_message_error --nocapture
-// $ cargo test --all-features --test legacy_client -- --nocapture
-// $ cargo test --all-features --test legacy_client
-
-use std::error::Error; // needed for .source() eg. error[E0599]: no method named `source` found for struct `hyper_util::client::legacy::Error` in the current scope
-
-// Helper function to debug byte slices by attempting to interpret them as UTF-8.
-// If the bytes are valid UTF-8, they are printed as a string; otherwise, they are
-// printed as a raw byte array. This aids in debugging tokio_test::io::Mock mismatches.
-fn debug_bytes(bytes: &[u8], label: &str) {
- // Try to convert the byte slice to a UTF-8 string.
- // If successful, print it with the provided label for context.
- if let Ok(s) = std::str::from_utf8(bytes) {
- eprintln!("{}: {}", label, s);
- } else {
- // If the bytes are not valid UTF-8, print them as a raw byte array.
- eprintln!("{}: {:?}", label, bytes);
- }
-}
-
-// Struct representing a mock connection for testing hyper client behavior.
-// Implements hyper::rt::Read, hyper::rt::Write, and hyper_util::client::legacy::connect::Connection
-// traits to simulate I/O operations. Uses tokio_test::io::Mock for controlled I/O behavior.
-struct MockConnection {
- // The underlying mock I/O object, wrapped in hyper_util::rt::TokioIo for compatibility.
- inner: hyper_util::rt::TokioIo<tokio_test::io::Mock>,
- // Atomic flag to signal a connection failure, controlling poll_read behavior.
- failed: std::sync::Arc<std::sync::atomic::AtomicBool>,
- // The error to return when failed=true, simulating an I/O failure.
- error: std::sync::Arc<std::io::Error>,
- // Optional channel to signal unexpected writes, used for debugging.
- error_tx: Option<tokio::sync::mpsc::Sender<()>>,
- // Tracks total bytes written, for logging and verification.
- bytes_written: usize,
-}
-
-impl MockConnection {
- // Constructor for MockConnection, initializing all fields.
- // Takes a mock I/O object, failure flag, error, and optional error channel.
- fn new(
- mock: tokio_test::io::Mock,
- failed: std::sync::Arc<std::sync::atomic::AtomicBool>,
- error: std::sync::Arc<std::io::Error>,
- error_tx: Option<tokio::sync::mpsc::Sender<()>>,
- ) -> Self {
- MockConnection {
- inner: hyper_util::rt::TokioIo::new(mock),
- failed,
- error,
- error_tx,
- bytes_written: 0,
- }
- }
-}
-
-// Implement hyper::rt::Read trait to handle read operations on the mock connection.
-// Controls whether an error or mock I/O data is returned based on the failed flag.
-impl hyper::rt::Read for MockConnection {
- // Polls the connection for reading, filling the provided buffer.
- // If failed=true, returns the stored error; otherwise, delegates to the mock I/O.
- fn poll_read(
- mut self: std::pin::Pin<&mut Self>,
- cx: &mut std::task::Context<'_>,
- buf: hyper::rt::ReadBufCursor<'_>,
- ) -> std::task::Poll<std::result::Result<(), std::io::Error>> {
- // Log the current state of the failed flag for debugging.
- eprintln!(
- "poll_read: failed={}",
- self.failed.load(std::sync::atomic::Ordering::SeqCst)
- );
- // Check if the connection is marked as failed.
- // If true, return the stored error immediately to simulate a connection failure.
- if self.failed.load(std::sync::atomic::Ordering::SeqCst) {
- // Log the error being returned for traceability.
- eprintln!("poll_read: returning error: {}", self.error);
- // Create a new io::Error with the same kind and message as the stored error.
- return std::task::Poll::Ready(std::result::Result::Err(std::io::Error::new(
- self.error.kind(),
- self.error.to_string(),
- )));
- }
- // If not failed, delegate to the mock I/O to simulate normal read behavior.
- // This may return EOF (Poll::Ready(Ok(0))) for empty IoBuilder.
- let inner = std::pin::Pin::new(&mut self.inner);
- inner.poll_read(cx, buf)
- }
-}
-
-// Implement hyper::rt::Write trait to handle write operations on the mock connection.
-// Logs writes and signals unexpected writes via error_tx.
-impl hyper::rt::Write for MockConnection {
- // Polls the connection for writing, sending the provided buffer.
- // Logs the write operation and tracks total bytes written.
- fn poll_write(
- mut self: std::pin::Pin<&mut Self>,
- cx: &mut std::task::Context<'_>,
- buf: &[u8],
- ) -> std::task::Poll<std::result::Result<usize, std::io::Error>> {
- // Log the size of the buffer being written for debugging.
- eprintln!("poll_write: {} bytes", buf.len());
- // Debug the buffer contents as UTF-8 or raw bytes.
- debug_bytes(buf, "poll_write buffer");
- // Delegate the write to the mock I/O object.
- let inner = std::pin::Pin::new(&mut self.inner);
- match inner.poll_write(cx, buf) {
- // If the write succeeds, update the bytes_written counter and log the result.
- std::task::Poll::Ready(std::result::Result::Ok(bytes)) => {
- // Increment the total bytes written for tracking.
- self.bytes_written += bytes;
- // Log the number of bytes written and the running total.
- eprintln!(
- "poll_write: wrote {} bytes, total={}",
- bytes, self.bytes_written
- );
- // If error_tx is present, signal an unexpected write (used in error tests).
- // This helps detect writes when the connection should fail early.
- if let Some(tx) = self.error_tx.take() {
- // Log that an unexpected write is being signaled.
- eprintln!("poll_write: signaling unexpected write");
- // Send a message through the channel, ignoring errors if the receiver is closed.
- let _ = tx.try_send(());
- }
- // Return the successful write result.
- std::task::Poll::Ready(std::result::Result::Ok(bytes))
- }
- // For pending or error results, propagate them directly.
- other => other,
- }
- }
-
- // Polls the connection to flush any buffered data.
- // Delegates to the mock I/O object.
- fn poll_flush(
- mut self: std::pin::Pin<&mut Self>,
- cx: &mut std::task::Context<'_>,
- ) -> std::task::Poll<std::result::Result<(), std::io::Error>> {
- // Log the flush operation for debugging.
- eprintln!("poll_flush");
- // Delegate the flush to the mock I/O object.
- let inner = std::pin::Pin::new(&mut self.inner);
- inner.poll_flush(cx)
- }
-
- // Polls the connection to shut down the write side.
- // Delegates to the mock I/O object.
- fn poll_shutdown(
- mut self: std::pin::Pin<&mut Self>,
- cx: &mut std::task::Context<'_>,
- ) -> std::task::Poll<std::result::Result<(), std::io::Error>> {
- // Log the shutdown operation for debugging.
- eprintln!("poll_shutdown");
- // Delegate the shutdown to the mock I/O object.
- let inner = std::pin::Pin::new(&mut self.inner);
- inner.poll_shutdown(cx)
- }
-}
-
-// Implement hyper_util::client::legacy::connect::Connection trait to provide connection metadata.
-// Required for hyper to use MockConnection as a valid connection.
-impl hyper_util::client::legacy::connect::Connection for MockConnection {
- // Returns metadata about the connection.
- // In this case, a default Connected object indicating a new connection.
- fn connected(&self) -> hyper_util::client::legacy::connect::Connected {
- hyper_util::client::legacy::connect::Connected::new()
- }
-}
-
-// Struct representing a mock connector for creating MockConnection instances.
-// Implements tower_service::Service to integrate with hyper’s client.
-#[derive(Clone)]
-struct MockConnector {
- // The IoBuilder used to create mock I/O objects for each connection.
- io_builder: tokio_test::io::Builder,
- // Optional error to simulate a connection failure, passed to MockConnection.
- conn_error: Option<std::sync::Arc<std::io::Error>>,
-}
-
-impl MockConnector {
- // Constructor for MockConnector, initializing the IoBuilder and optional error.
- fn new(
- io_builder: tokio_test::io::Builder,
- conn_error: Option<std::sync::Arc<std::io::Error>>,
- ) -> Self {
- MockConnector {
- io_builder,
- conn_error,
- }
- }
-}
-
-// Implement tower_service::Service for MockConnector to create MockConnection instances.
-// Takes a hyper::Uri and returns a future resolving to a MockConnection.
-impl tower_service::Service<hyper::Uri> for MockConnector {
- type Response = crate::MockConnection;
- type Error = std::io::Error;
- type Future = std::pin::Pin<
- Box<
- dyn futures_util::Future<Output = std::result::Result<Self::Response, Self::Error>>
- + Send,
- >,
- >;
-
- // Polls the connector to check if it’s ready to handle a request.
- // Always ready, as we don’t have resource constraints.
- fn poll_ready(
- &mut self,
- _cx: &mut std::task::Context<'_>,
- ) -> std::task::Poll<std::result::Result<(), Self::Error>> {
- std::task::Poll::Ready(std::result::Result::Ok(()))
- }
-
- // Creates a new MockConnection for the given URI.
- // Configures the connection based on io_builder and conn_error.
- fn call(&mut self, _req: hyper::Uri) -> Self::Future {
- // Clone the IoBuilder to create a fresh mock I/O object.
- let mut io_builder = self.io_builder.clone();
- // Clone the optional connection error for this call.
- let conn_error = self.conn_error.clone();
- // Return a pinned future that creates the MockConnection.
- Box::pin(async move {
- // Build the mock I/O object from the IoBuilder.
- // This defines the I/O behavior (e.g., EOF for empty builder).
- let mock = io_builder.build();
- // Create an atomic flag to track connection failure, initially false.
- let failed = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
- // Set the default error for non-failure cases.
- // Used when conn_error is None, simulating a clean EOF or connection close.
- let error = if let Some(ref err) = conn_error {
- err.clone()
- } else {
- std::sync::Arc::new(std::io::Error::new(
- std::io::ErrorKind::BrokenPipe,
- "connection closed",
- ))
- };
- // Create an mpsc channel for signaling unexpected writes, if conn_error is set.
- // This helps debug cases where writes occur despite an expected failure.
- let error_tx = if conn_error.is_some() {
- // Create a channel with a buffer of 1 for signaling writes.
- let (tx, mut rx) = tokio::sync::mpsc::channel::<()>(1);
- // Spawn a task to log unexpected writes when received.
- tokio::spawn(async move {
- // Wait for a message indicating a write occurred.
- if rx.recv().await.is_some() {
- // Log the unexpected write for debugging.
- eprintln!("Unexpected write occurred");
- }
- });
- Some(tx)
- } else {
- None
- };
- // If a connection error is provided, mark the connection as failed.
- // This causes poll_read to return the error immediately.
- if let Some(err_clone) = conn_error {
- // Set the failed flag to true atomically.
- failed.store(true, std::sync::atomic::Ordering::SeqCst);
- // Log the simulated error for traceability.
- eprintln!("Simulated conn task error: {}", err_clone);
- }
- // Create and return the MockConnection with all configured components.
- std::result::Result::Ok(crate::MockConnection::new(mock, failed, error, error_tx))
- })
- }
-}
-
-// Test for connection error propagation with PR #184.
-// Simulates a connection failure by setting failed=true and returning a custom io::Error.
-// Verifies the error propagates through hyper’s client as a hyper::Error(Io, ...).
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_connection_error_propagation_pr184() {
- // Define the error message for the simulated connection failure.
- // Reused for creating the error and verifying the result.
- let err_str = "mock connection failure";
- // Create an io::Error with Other kind and the custom message.
- // Wrapped in Arc for sharing across threads and MockConnection.
- let io_error = std::sync::Arc::new(std::io::Error::new(std::io::ErrorKind::Other, err_str));
- // Create an empty IoBuilder, as no I/O is expected.
- // The error triggers before any reads or writes occur.
- let io_builder = tokio_test::io::Builder::new();
- // Create a MockConnector with the error to simulate a failed connection.
- // The error will set failed=true in MockConnection.
- let connector = crate::MockConnector::new(io_builder, Some(io_error.clone()));
- // Build the hyper client with TokioExecutor and our connector.
- // pool_max_idle_per_host(0) disables connection pooling for a fresh connection.
- let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())
- .pool_max_idle_per_host(0)
- .build::<_, http_body_util::Empty<hyper::body::Bytes>>(connector);
- // Build a GET request to a mock URI with custom headers.
- // Uses mixed-case headers to match your style, ensuring case-insensitive handling.
- let request = hyper::Request::builder()
- .uri("http://mocked")
- .header("hoSt", "mocked")
- .header("conNection", "close")
- .body(http_body_util::Empty::<hyper::body::Bytes>::new())
- .expect("failed to build request");
- // Send the request and capture the result.
- // Expect it to fail due to the simulated connection error.
- let result = client.request(request).await;
- // Extract the error, as the request should fail.
- let err = result.expect_err("expected request to fail");
- // Log the full error for debugging, including its structure.
- // Matches your detailed logging style for traceability.
- eprintln!("Actually gotten error is: {:?}", err);
- // Downcast the error to a hyper::Error to verify its type.
- // Expect a hyper::Error wrapping an io::Error from MockConnection.
- let hyper_err = err
- .source()
- .and_then(|e| e.downcast_ref::<hyper::Error>())
- .expect("expected hyper::Error");
- // Downcast the hyper::Error’s source to an io::Error.
- // Verify it matches the simulated error from MockConnection.
- let io_err = hyper_err
- .source()
- .and_then(|e| e.downcast_ref::<std::io::Error>())
- .expect(&format!("expected io::Error but got {:?}", hyper_err));
- // Verify the io::Error has the expected kind (Other).
- assert_eq!(io_err.kind(), std::io::ErrorKind::Other);
- // Verify the io::Error’s message matches err_str.
- assert_eq!(io_err.to_string(), err_str);
-}
-
-// Test for consistent IncompleteMessage error with or without PR #184.
-// Simulates a connection that returns EOF immediately, causing hyper’s HTTP/1.1 parser
-// to fail with IncompleteMessage due to no response data.
-// Uses MockConnector with conn_error=None to keep failed=false, ensuring EOF behavior.
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_incomplete_message_error_pr184() {
- // Create an empty IoBuilder to simulate a connection with no data.
- // No write or read expectations, so poll_read returns EOF (Poll::Ready(Ok(0))).
- // This triggers IncompleteMessage in hyper’s parser.
- let io_builder = tokio_test::io::Builder::new();
- // Create MockConnector with no error (conn_error=None).
- // Keeps failed=false in MockConnection, so poll_read delegates to the mock’s EOF.
- let connector = crate::MockConnector::new(io_builder, None);
- // Build the hyper client with TokioExecutor and our connector.
- // pool_max_idle_per_host(0) disables pooling for a fresh connection.
- let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())
- .pool_max_idle_per_host(0)
- .build::<_, http_body_util::Empty<hyper::body::Bytes>>(connector);
- // Build a GET request to a mock URI with headers.
- // Uses mixed-case headers to match test_connection_error_propagation_pr184.
- // Empty body ensures focus on response parsing failure.
- let request = hyper::Request::builder()
- .uri("http://mocked")
- .header("hoSt", "mocked")
- .header("conNection", "close")
- .body(http_body_util::Empty::<hyper::body::Bytes>::new())
- .expect("failed to build request");
- // Send the request and capture the result.
- // Expect failure due to EOF causing IncompleteMessage.
- let result = client.request(request).await;
- // Extract the error, as the request should fail.
- // Without PR #184, expect ChannelClosed; with PR #184, expect IncompleteMessage.
- let err = result.expect_err("expected request to fail");
- // Log the full error for debugging, matching your style.
- eprintln!("Actually gotten error is: {:?}", err);
- // Downcast to hyper::Error to verify the error type.
- // Expect IncompleteMessage (with PR #184) or ChannelClosed (without).
- let hyper_err = err
- .source()
- .and_then(|e| e.downcast_ref::<hyper::Error>())
- .expect("expected hyper::Error");
- // Verify the error is IncompleteMessage when PR #184 is applied.
- // This checks the parser’s failure due to EOF.
- assert!(
- hyper_err.is_incomplete_message(),
- "expected IncompleteMessage, got {:?}",
- hyper_err
- );
- // Confirm no io::Error is present, as this is a parsing failure, not I/O.
- // Ensures we’re testing the correct error type.
- assert!(
- hyper_err
- .source()
- .and_then(|e| e.downcast_ref::<std::io::Error>())
- .is_none(),
- "expected no io::Error, got {:?}",
- hyper_err
- );
-}
-
-// Test for a successful HTTP/1.1 connection using a mock connector.
-// Simulates a server that accepts a request and responds with a 200 OK.
-// Verifies the client correctly sends the request and receives the response.
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_successful_connection() {
- // Define the expected server response: a valid HTTP/1.1 200 OK with no body.
- let response = b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n";
- // Define the expected client request, including headers and CRLF termination.
- // This ensures the client sends the correct request format.
- let expected_request = b"GET / HTTP/1.1\r\nhost: mocked\r\nconnection: close\r\n\r\n";
- // Create an IoBuilder to simulate the server’s I/O behavior.
- // Expect the client to write the request and read the response.
- let mut io_builder = tokio_test::io::Builder::new();
- // Configure the IoBuilder to expect the request and provide the response.
- io_builder.write(expected_request).read(response);
- // Finalize the IoBuilder for use in the connector.
- let io_builder = io_builder;
- // Create a MockConnector with no error (conn_error=None).
- // Ensures failed=false, allowing normal I/O operations.
- let connector = crate::MockConnector::new(io_builder, None);
- // Build the hyper client with TokioExecutor and our connector.
- // pool_max_idle_per_host(0) ensures a fresh connection.
- let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())
- .pool_max_idle_per_host(0)
- .build::<_, http_body_util::Empty<hyper::body::Bytes>>(connector);
- // Build a GET request to a mock URI with headers.
- // Uses mixed-case headers to match your style and verify case-insensitive handling.
- let request = hyper::Request::builder()
- .uri("http://mocked")
- .header("hOst", "mocked")
- .header("coNnection", "close")
- .body(http_body_util::Empty::<hyper::body::Bytes>::new())
- .expect("failed to build request");
- // Send the request and capture the response.
- // Expect a successful response due to the configured IoBuilder.
- let response = client
- .request(request)
- .await
- .expect("request should succeed");
- // Verify the response status is 200 OK.
- assert_eq!(response.status(), 200);
-}
diff --git a/vendor/hyper-util/tests/proxy.rs b/vendor/hyper-util/tests/proxy.rs
deleted file mode 100644
index 95f4bc2f..00000000
--- a/vendor/hyper-util/tests/proxy.rs
+++ /dev/null
@@ -1,478 +0,0 @@
-use tokio::io::{AsyncReadExt, AsyncWriteExt};
-use tokio::net::{TcpListener, TcpStream};
-use tower_service::Service;
-
-use hyper_util::client::legacy::connect::proxy::{SocksV4, SocksV5, Tunnel};
-use hyper_util::client::legacy::connect::HttpConnector;
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_tunnel_works() {
- let tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let addr = tcp.local_addr().expect("local_addr");
-
- let proxy_dst = format!("http://{addr}").parse().expect("uri");
- let mut connector = Tunnel::new(proxy_dst, HttpConnector::new());
- let t1 = tokio::spawn(async move {
- let _conn = connector
- .call("https://hyper.rs".parse().unwrap())
- .await
- .expect("tunnel");
- });
-
- let t2 = tokio::spawn(async move {
- let (mut io, _) = tcp.accept().await.expect("accept");
- let mut buf = [0u8; 64];
- let n = io.read(&mut buf).await.expect("read 1");
- assert_eq!(
- &buf[..n],
- b"CONNECT hyper.rs:443 HTTP/1.1\r\nHost: hyper.rs:443\r\n\r\n"
- );
- io.write_all(b"HTTP/1.1 200 OK\r\n\r\n")
- .await
- .expect("write 1");
- });
-
- t1.await.expect("task 1");
- t2.await.expect("task 2");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_socks_v5_without_auth_works() {
- let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
- let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
-
- let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let target_addr = target_tcp.local_addr().expect("local_addr");
- let target_dst = format!("http://{target_addr}").parse().expect("uri");
-
- let mut connector = SocksV5::new(proxy_dst, HttpConnector::new());
-
- // Client
- //
- // Will use `SocksV5` to establish proxy tunnel.
- // Will send "Hello World!" to the target and receive "Goodbye!" back.
- let t1 = tokio::spawn(async move {
- let conn = connector.call(target_dst).await.expect("tunnel");
- let mut tcp = conn.into_inner();
-
- tcp.write_all(b"Hello World!").await.expect("write 1");
-
- let mut buf = [0u8; 64];
- let n = tcp.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], b"Goodbye!");
- });
-
- // Proxy
- //
- // Will receive CONNECT command from client.
- // Will connect to target and success code back to client.
- // Will blindly tunnel between client and target.
- let t2 = tokio::spawn(async move {
- let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 513];
-
- // negotiation req/res
- let n = to_client.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], [0x05, 0x01, 0x00]);
-
- to_client.write_all(&[0x05, 0x00]).await.expect("write 1");
-
- // command req/rs
- let [p1, p2] = target_addr.port().to_be_bytes();
- let [ip1, ip2, ip3, ip4] = [0x7f, 0x00, 0x00, 0x01];
- let message = [0x05, 0x01, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
- let n = to_client.read(&mut buf).await.expect("read 2");
- assert_eq!(&buf[..n], message);
-
- let mut to_target = TcpStream::connect(target_addr).await.expect("connect");
-
- let message = [0x05, 0x00, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
- to_client.write_all(&message).await.expect("write 2");
-
- let (from_client, from_target) =
- tokio::io::copy_bidirectional(&mut to_client, &mut to_target)
- .await
- .expect("proxy");
-
- assert_eq!(from_client, 12);
- assert_eq!(from_target, 8)
- });
-
- // Target server
- //
- // Will accept connection from proxy server
- // Will receive "Hello World!" from the client and return "Goodbye!"
- let t3 = tokio::spawn(async move {
- let (mut io, _) = target_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 64];
-
- let n = io.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], b"Hello World!");
-
- io.write_all(b"Goodbye!").await.expect("write 1");
- });
-
- t1.await.expect("task - client");
- t2.await.expect("task - proxy");
- t3.await.expect("task - target");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_socks_v5_with_auth_works() {
- let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
- let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
-
- let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let target_addr = target_tcp.local_addr().expect("local_addr");
- let target_dst = format!("http://{target_addr}").parse().expect("uri");
-
- let mut connector =
- SocksV5::new(proxy_dst, HttpConnector::new()).with_auth("user".into(), "pass".into());
-
- // Client
- //
- // Will use `SocksV5` to establish proxy tunnel.
- // Will send "Hello World!" to the target and receive "Goodbye!" back.
- let t1 = tokio::spawn(async move {
- let conn = connector.call(target_dst).await.expect("tunnel");
- let mut tcp = conn.into_inner();
-
- tcp.write_all(b"Hello World!").await.expect("write 1");
-
- let mut buf = [0u8; 64];
- let n = tcp.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], b"Goodbye!");
- });
-
- // Proxy
- //
- // Will receive CONNECT command from client.
- // Will connect to target and success code back to client.
- // Will blindly tunnel between client and target.
- let t2 = tokio::spawn(async move {
- let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 513];
-
- // negotiation req/res
- let n = to_client.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], [0x05, 0x01, 0x02]);
-
- to_client.write_all(&[0x05, 0x02]).await.expect("write 1");
-
- // auth req/res
- let n = to_client.read(&mut buf).await.expect("read 2");
- let [u1, u2, u3, u4] = b"user";
- let [p1, p2, p3, p4] = b"pass";
- let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4];
- assert_eq!(&buf[..n], message);
-
- to_client.write_all(&[0x01, 0x00]).await.expect("write 2");
-
- // command req/res
- let n = to_client.read(&mut buf).await.expect("read 3");
- let [p1, p2] = target_addr.port().to_be_bytes();
- let [ip1, ip2, ip3, ip4] = [0x7f, 0x00, 0x00, 0x01];
- let message = [0x05, 0x01, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
- assert_eq!(&buf[..n], message);
-
- let mut to_target = TcpStream::connect(target_addr).await.expect("connect");
-
- let message = [0x05, 0x00, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
- to_client.write_all(&message).await.expect("write 3");
-
- let (from_client, from_target) =
- tokio::io::copy_bidirectional(&mut to_client, &mut to_target)
- .await
- .expect("proxy");
-
- assert_eq!(from_client, 12);
- assert_eq!(from_target, 8)
- });
-
- // Target server
- //
- // Will accept connection from proxy server
- // Will receive "Hello World!" from the client and return "Goodbye!"
- let t3 = tokio::spawn(async move {
- let (mut io, _) = target_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 64];
-
- let n = io.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], b"Hello World!");
-
- io.write_all(b"Goodbye!").await.expect("write 1");
- });
-
- t1.await.expect("task - client");
- t2.await.expect("task - proxy");
- t3.await.expect("task - target");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_socks_v5_with_server_resolved_domain_works() {
- let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
- let proxy_addr = format!("http://{proxy_addr}").parse().expect("uri");
-
- let mut connector = SocksV5::new(proxy_addr, HttpConnector::new())
- .with_auth("user".into(), "pass".into())
- .local_dns(false);
-
- // Client
- //
- // Will use `SocksV5` to establish proxy tunnel.
- // Will send "Hello World!" to the target and receive "Goodbye!" back.
- let t1 = tokio::spawn(async move {
- let _conn = connector
- .call("https://hyper.rs:443".try_into().unwrap())
- .await
- .expect("tunnel");
- });
-
- // Proxy
- //
- // Will receive CONNECT command from client.
- // Will connect to target and success code back to client.
- // Will blindly tunnel between client and target.
- let t2 = tokio::spawn(async move {
- let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 513];
-
- // negotiation req/res
- let n = to_client.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], [0x05, 0x01, 0x02]);
-
- to_client.write_all(&[0x05, 0x02]).await.expect("write 1");
-
- // auth req/res
- let n = to_client.read(&mut buf).await.expect("read 2");
- let [u1, u2, u3, u4] = b"user";
- let [p1, p2, p3, p4] = b"pass";
- let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4];
- assert_eq!(&buf[..n], message);
-
- to_client.write_all(&[0x01, 0x00]).await.expect("write 2");
-
- // command req/res
- let n = to_client.read(&mut buf).await.expect("read 3");
-
- let host = "hyper.rs";
- let port: u16 = 443;
- let mut message = vec![0x05, 0x01, 0x00, 0x03, host.len() as u8];
- message.extend(host.bytes());
- message.extend(port.to_be_bytes());
- assert_eq!(&buf[..n], message);
-
- let mut message = vec![0x05, 0x00, 0x00, 0x03, host.len() as u8];
- message.extend(host.bytes());
- message.extend(port.to_be_bytes());
- to_client.write_all(&message).await.expect("write 3");
- });
-
- t1.await.expect("task - client");
- t2.await.expect("task - proxy");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_socks_v5_with_locally_resolved_domain_works() {
- let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
- let proxy_addr = format!("http://{proxy_addr}").parse().expect("uri");
-
- let mut connector = SocksV5::new(proxy_addr, HttpConnector::new())
- .with_auth("user".into(), "pass".into())
- .local_dns(true);
-
- // Client
- //
- // Will use `SocksV5` to establish proxy tunnel.
- // Will send "Hello World!" to the target and receive "Goodbye!" back.
- let t1 = tokio::spawn(async move {
- let _conn = connector
- .call("https://hyper.rs:443".try_into().unwrap())
- .await
- .expect("tunnel");
- });
-
- // Proxy
- //
- // Will receive CONNECT command from client.
- // Will connect to target and success code back to client.
- // Will blindly tunnel between client and target.
- let t2 = tokio::spawn(async move {
- let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 513];
-
- // negotiation req/res
- let n = to_client.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], [0x05, 0x01, 0x02]);
-
- to_client.write_all(&[0x05, 0x02]).await.expect("write 1");
-
- // auth req/res
- let n = to_client.read(&mut buf).await.expect("read 2");
- let [u1, u2, u3, u4] = b"user";
- let [p1, p2, p3, p4] = b"pass";
- let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4];
- assert_eq!(&buf[..n], message);
-
- to_client.write_all(&[0x01, 0x00]).await.expect("write 2");
-
- // command req/res
- let n = to_client.read(&mut buf).await.expect("read 3");
- let message = [0x05, 0x01, 0x00];
- assert_eq!(&buf[..3], message);
- assert!(buf[3] == 0x01 || buf[3] == 0x04); // IPv4 or IPv6
- assert_eq!(n, 4 + 4 * (buf[3] as usize) + 2);
-
- let message = vec![0x05, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 0];
- to_client.write_all(&message).await.expect("write 3");
- });
-
- t1.await.expect("task - client");
- t2.await.expect("task - proxy");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_socks_v4_works() {
- let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
- let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
-
- let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let target_addr = target_tcp.local_addr().expect("local_addr");
- let target_dst = format!("http://{target_addr}").parse().expect("uri");
-
- let mut connector = SocksV4::new(proxy_dst, HttpConnector::new());
-
- // Client
- //
- // Will use `SocksV4` to establish proxy tunnel.
- // Will send "Hello World!" to the target and receive "Goodbye!" back.
- let t1 = tokio::spawn(async move {
- let conn = connector.call(target_dst).await.expect("tunnel");
- let mut tcp = conn.into_inner();
-
- tcp.write_all(b"Hello World!").await.expect("write 1");
-
- let mut buf = [0u8; 64];
- let n = tcp.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], b"Goodbye!");
- });
-
- // Proxy
- //
- // Will receive CONNECT command from client.
- // Will connect to target and success code back to client.
- // Will blindly tunnel between client and target.
- let t2 = tokio::spawn(async move {
- let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 512];
-
- let [p1, p2] = target_addr.port().to_be_bytes();
- let [ip1, ip2, ip3, ip4] = [127, 0, 0, 1];
- let message = [4, 0x01, p1, p2, ip1, ip2, ip3, ip4, 0, 0];
- let n = to_client.read(&mut buf).await.expect("read");
- assert_eq!(&buf[..n], message);
-
- let mut to_target = TcpStream::connect(target_addr).await.expect("connect");
-
- let message = [0, 90, p1, p2, ip1, ip2, ip3, ip4];
- to_client.write_all(&message).await.expect("write");
-
- let (from_client, from_target) =
- tokio::io::copy_bidirectional(&mut to_client, &mut to_target)
- .await
- .expect("proxy");
-
- assert_eq!(from_client, 12);
- assert_eq!(from_target, 8)
- });
-
- // Target server
- //
- // Will accept connection from proxy server
- // Will receive "Hello World!" from the client and return "Goodbye!"
- let t3 = tokio::spawn(async move {
- let (mut io, _) = target_tcp.accept().await.expect("accept");
- let mut buf = [0u8; 64];
-
- let n = io.read(&mut buf).await.expect("read 1");
- assert_eq!(&buf[..n], b"Hello World!");
-
- io.write_all(b"Goodbye!").await.expect("write 1");
- });
-
- t1.await.expect("task - client");
- t2.await.expect("task - proxy");
- t3.await.expect("task - target");
-}
-
-#[cfg(not(miri))]
-#[tokio::test]
-async fn test_socks_v5_optimistic_works() {
- let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
- let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
- let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
-
- let target_addr = std::net::SocketAddr::new([127, 0, 0, 1].into(), 1234);
- let target_dst = format!("http://{target_addr}").parse().expect("uri");
-
- let mut connector = SocksV5::new(proxy_dst, HttpConnector::new())
- .with_auth("ABC".into(), "XYZ".into())
- .send_optimistically(true);
-
- // Client
- //
- // Will use `SocksV5` to establish proxy tunnel.
- // Will send "Hello World!" to the target and receive "Goodbye!" back.
- let t1 = tokio::spawn(async move {
- let _ = connector.call(target_dst).await.expect("tunnel");
- });
-
- // Proxy
- //
- // Will receive SOCKS handshake from client.
- // Will connect to target and success code back to client.
- // Will blindly tunnel between client and target.
- let t2 = tokio::spawn(async move {
- let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
- let [p1, p2] = target_addr.port().to_be_bytes();
-
- let mut buf = [0; 22];
- let request = vec![
- 5, 1, 2, // Negotiation
- 1, 3, 65, 66, 67, 3, 88, 89, 90, // Auth ("ABC"/"XYZ")
- 5, 1, 0, 1, 127, 0, 0, 1, p1, p2, // Reply
- ];
-
- let response = vec![
- 5, 2, // Negotiation,
- 1, 0, // Auth,
- 5, 0, 0, 1, 127, 0, 0, 1, p1, p2, // Reply
- ];
-
- // Accept all handshake messages
- to_client.read_exact(&mut buf).await.expect("read");
- assert_eq!(request.as_slice(), buf);
-
- // Send all handshake messages back
- to_client
- .write_all(response.as_slice())
- .await
- .expect("write");
-
- to_client.flush().await.expect("flush");
- });
-
- t1.await.expect("task - client");
- t2.await.expect("task - proxy");
-}
diff --git a/vendor/hyper-util/tests/test_utils/mod.rs b/vendor/hyper-util/tests/test_utils/mod.rs
deleted file mode 100644
index df3a65d4..00000000
--- a/vendor/hyper-util/tests/test_utils/mod.rs
+++ /dev/null
@@ -1,175 +0,0 @@
-use std::pin::Pin;
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::sync::Arc;
-
-use futures_channel::mpsc;
-use futures_util::task::{Context, Poll};
-use futures_util::Future;
-use futures_util::TryFutureExt;
-use hyper::Uri;
-use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf};
-use tokio::net::TcpStream;
-
-use hyper::rt::ReadBufCursor;
-
-use hyper_util::client::legacy::connect::HttpConnector;
-use hyper_util::client::legacy::connect::{Connected, Connection};
-use hyper_util::rt::TokioIo;
-
-#[derive(Clone)]
-pub struct DebugConnector {
- pub http: HttpConnector,
- pub closes: mpsc::Sender<()>,
- pub connects: Arc<AtomicUsize>,
- pub is_proxy: bool,
- pub alpn_h2: bool,
-}
-
-impl DebugConnector {
- pub fn new() -> DebugConnector {
- let http = HttpConnector::new();
- let (tx, _) = mpsc::channel(10);
- DebugConnector::with_http_and_closes(http, tx)
- }
-
- pub fn with_http_and_closes(http: HttpConnector, closes: mpsc::Sender<()>) -> DebugConnector {
- DebugConnector {
- http,
- closes,
- connects: Arc::new(AtomicUsize::new(0)),
- is_proxy: false,
- alpn_h2: false,
- }
- }
-
- pub fn proxy(mut self) -> Self {
- self.is_proxy = true;
- self
- }
-}
-
-impl tower_service::Service<Uri> for DebugConnector {
- type Response = DebugStream;
- type Error = <HttpConnector as tower_service::Service<Uri>>::Error;
- type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
-
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // don't forget to check inner service is ready :)
- tower_service::Service::<Uri>::poll_ready(&mut self.http, cx)
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- self.connects.fetch_add(1, Ordering::SeqCst);
- let closes = self.closes.clone();
- let is_proxy = self.is_proxy;
- let is_alpn_h2 = self.alpn_h2;
- Box::pin(self.http.call(dst).map_ok(move |tcp| DebugStream {
- tcp,
- on_drop: closes,
- is_alpn_h2,
- is_proxy,
- }))
- }
-}
-
-pub struct DebugStream {
- tcp: TokioIo<TcpStream>,
- on_drop: mpsc::Sender<()>,
- is_alpn_h2: bool,
- is_proxy: bool,
-}
-
-impl Drop for DebugStream {
- fn drop(&mut self) {
- let _ = self.on_drop.try_send(());
- }
-}
-
-impl Connection for DebugStream {
- fn connected(&self) -> Connected {
- let connected = self.tcp.connected().proxy(self.is_proxy);
-
- if self.is_alpn_h2 {
- connected.negotiated_h2()
- } else {
- connected
- }
- }
-}
-
-impl hyper::rt::Read for DebugStream {
- fn poll_read(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: ReadBufCursor<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Read::poll_read(Pin::new(&mut self.tcp), cx, buf)
- }
-}
-
-impl hyper::rt::Write for DebugStream {
- fn poll_write(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, std::io::Error>> {
- hyper::rt::Write::poll_write(Pin::new(&mut self.tcp), cx, buf)
- }
-
- fn poll_flush(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Write::poll_flush(Pin::new(&mut self.tcp), cx)
- }
-
- fn poll_shutdown(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), std::io::Error>> {
- hyper::rt::Write::poll_shutdown(Pin::new(&mut self.tcp), cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- hyper::rt::Write::is_write_vectored(&self.tcp)
- }
-
- fn poll_write_vectored(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- bufs: &[std::io::IoSlice<'_>],
- ) -> Poll<Result<usize, std::io::Error>> {
- hyper::rt::Write::poll_write_vectored(Pin::new(&mut self.tcp), cx, bufs)
- }
-}
-
-impl AsyncWrite for DebugStream {
- fn poll_shutdown(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), io::Error>> {
- Pin::new(self.tcp.inner_mut()).poll_shutdown(cx)
- }
-
- fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
- Pin::new(self.tcp.inner_mut()).poll_flush(cx)
- }
-
- fn poll_write(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, io::Error>> {
- Pin::new(self.tcp.inner_mut()).poll_write(cx, buf)
- }
-}
-
-impl AsyncRead for DebugStream {
- fn poll_read(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &mut ReadBuf<'_>,
- ) -> Poll<io::Result<()>> {
- Pin::new(self.tcp.inner_mut()).poll_read(cx, buf)
- }
-}